1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32.h>
58 #include <linux/dmi.h>
59
60 #include <net/checksum.h>
61 #include <net/gso.h>
62 #include <net/ip.h>
63
64 #include <linux/io.h>
65 #include <asm/byteorder.h>
66 #include <linux/uaccess.h>
67
68 #include <uapi/linux/net_tstamp.h>
69 #include <linux/ptp_clock_kernel.h>
70
71 #define BAR_0 0
72 #define BAR_2 2
73
74 #include "tg3.h"
75
76 /* Functions & macros to verify TG3_FLAGS types */
77
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)78 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 return test_bit(flag, bits);
81 }
82
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)83 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 set_bit(flag, bits);
86 }
87
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)88 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 {
90 clear_bit(flag, bits);
91 }
92
93 #define tg3_flag(tp, flag) \
94 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_set(tp, flag) \
96 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
97 #define tg3_flag_clear(tp, flag) \
98 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
99
100 #define DRV_MODULE_NAME "tg3"
101 /* DO NOT UPDATE TG3_*_NUM defines */
102 #define TG3_MAJ_NUM 3
103 #define TG3_MIN_NUM 137
104
105 #define RESET_KIND_SHUTDOWN 0
106 #define RESET_KIND_INIT 1
107 #define RESET_KIND_SUSPEND 2
108
109 #define TG3_DEF_RX_MODE 0
110 #define TG3_DEF_TX_MODE 0
111 #define TG3_DEF_MSG_ENABLE \
112 (NETIF_MSG_DRV | \
113 NETIF_MSG_PROBE | \
114 NETIF_MSG_LINK | \
115 NETIF_MSG_TIMER | \
116 NETIF_MSG_IFDOWN | \
117 NETIF_MSG_IFUP | \
118 NETIF_MSG_RX_ERR | \
119 NETIF_MSG_TX_ERR)
120
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
122
123 /* length of time before we decide the hardware is borked,
124 * and dev->tx_timeout() should be called to fix the problem
125 */
126
127 #define TG3_TX_TIMEOUT (5 * HZ)
128
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135 * You can't change the ring sizes, but you can change where you place
136 * them in the NIC onboard memory.
137 */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING 200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
146
147 /* Do not place this n-ring entries value into the tp struct itself,
148 * we really want to expose these constants to GCC so that modulo et
149 * al. operations are done with shifts and masks instead of with
150 * hw multiply/modulo instructions. Another solution would be to
151 * replace things like '% foo' with '& (foo - 1)'.
152 */
153
154 #define TG3_TX_RING_SIZE 512
155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
156
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
164 TG3_TX_RING_SIZE)
165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166
167 #define TG3_DMA_BYTE_ENAB 64
168
169 #define TG3_RX_STD_DMA_SZ 1536
170 #define TG3_RX_JMB_DMA_SZ 9046
171
172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
173
174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184 * that are at least dword aligned when used in PCIX mode. The driver
185 * works around this bug by double copying the packet. This workaround
186 * is built into the normal double copy length check for efficiency.
187 *
188 * However, the double copy is only necessary on those architectures
189 * where unaligned memory accesses are inefficient. For those architectures
190 * where unaligned memory accesses incur little penalty, we can reintegrate
191 * the 5701 in the normal rx path. Doing so saves a device structure
192 * dereference by hardcoding the double copy threshold in place.
193 */
194 #define TG3_RX_COPY_THRESHOLD 256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
197 #else
198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #endif
200
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 #endif
206
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K 2048
210 #define TG3_TX_BD_DMA_MAX_4K 4096
211
212 #define TG3_RAW_IP_ALIGN 2
213
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216
217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219
220 #define FIRMWARE_TG3 "tigon/tg3.bin"
221 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
224
225 MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_FIRMWARE(FIRMWARE_TG3);
229 MODULE_FIRMWARE(FIRMWARE_TG357766);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
239
240 static const struct pci_device_id tg3_pci_tbl[] = {
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 TG3_DRV_DATA_FLAG_5705_10_100},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 PCI_VENDOR_ID_LENOVO,
291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 {}
357 };
358
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361 static const struct {
362 const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 { "rx_octets" },
365 { "rx_fragments" },
366 { "rx_ucast_packets" },
367 { "rx_mcast_packets" },
368 { "rx_bcast_packets" },
369 { "rx_fcs_errors" },
370 { "rx_align_errors" },
371 { "rx_xon_pause_rcvd" },
372 { "rx_xoff_pause_rcvd" },
373 { "rx_mac_ctrl_rcvd" },
374 { "rx_xoff_entered" },
375 { "rx_frame_too_long_errors" },
376 { "rx_jabbers" },
377 { "rx_undersize_packets" },
378 { "rx_in_length_errors" },
379 { "rx_out_length_errors" },
380 { "rx_64_or_less_octet_packets" },
381 { "rx_65_to_127_octet_packets" },
382 { "rx_128_to_255_octet_packets" },
383 { "rx_256_to_511_octet_packets" },
384 { "rx_512_to_1023_octet_packets" },
385 { "rx_1024_to_1522_octet_packets" },
386 { "rx_1523_to_2047_octet_packets" },
387 { "rx_2048_to_4095_octet_packets" },
388 { "rx_4096_to_8191_octet_packets" },
389 { "rx_8192_to_9022_octet_packets" },
390
391 { "tx_octets" },
392 { "tx_collisions" },
393
394 { "tx_xon_sent" },
395 { "tx_xoff_sent" },
396 { "tx_flow_control" },
397 { "tx_mac_errors" },
398 { "tx_single_collisions" },
399 { "tx_mult_collisions" },
400 { "tx_deferred" },
401 { "tx_excessive_collisions" },
402 { "tx_late_collisions" },
403 { "tx_collide_2times" },
404 { "tx_collide_3times" },
405 { "tx_collide_4times" },
406 { "tx_collide_5times" },
407 { "tx_collide_6times" },
408 { "tx_collide_7times" },
409 { "tx_collide_8times" },
410 { "tx_collide_9times" },
411 { "tx_collide_10times" },
412 { "tx_collide_11times" },
413 { "tx_collide_12times" },
414 { "tx_collide_13times" },
415 { "tx_collide_14times" },
416 { "tx_collide_15times" },
417 { "tx_ucast_packets" },
418 { "tx_mcast_packets" },
419 { "tx_bcast_packets" },
420 { "tx_carrier_sense_errors" },
421 { "tx_discards" },
422 { "tx_errors" },
423
424 { "dma_writeq_full" },
425 { "dma_write_prioq_full" },
426 { "rxbds_empty" },
427 { "rx_discards" },
428 { "rx_errors" },
429 { "rx_threshold_hit" },
430
431 { "dma_readq_full" },
432 { "dma_read_prioq_full" },
433 { "tx_comp_queue_full" },
434
435 { "ring_set_send_prod_index" },
436 { "ring_status_update" },
437 { "nic_irqs" },
438 { "nic_avoided_irqs" },
439 { "nic_tx_threshold_hit" },
440
441 { "mbuf_lwm_thresh_hit" },
442 };
443
444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST 0
446 #define TG3_LINK_TEST 1
447 #define TG3_REGISTER_TEST 2
448 #define TG3_MEMORY_TEST 3
449 #define TG3_MAC_LOOPB_TEST 4
450 #define TG3_PHY_LOOPB_TEST 5
451 #define TG3_EXT_LOOPB_TEST 6
452 #define TG3_INTERRUPT_TEST 7
453
454
455 static const struct {
456 const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 [TG3_NVRAM_TEST] = { "nvram test (online) " },
459 [TG3_LINK_TEST] = { "link test (online) " },
460 [TG3_REGISTER_TEST] = { "register test (offline)" },
461 [TG3_MEMORY_TEST] = { "memory test (offline)" },
462 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
463 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
464 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
465 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
466 };
467
468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
469
470
tg3_write32(struct tg3 * tp,u32 off,u32 val)471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 writel(val, tp->regs + off);
474 }
475
tg3_read32(struct tg3 * tp,u32 off)476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 return readl(tp->regs + off);
479 }
480
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 writel(val, tp->aperegs + off);
484 }
485
tg3_ape_read32(struct tg3 * tp,u32 off)486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 return readl(tp->aperegs + off);
489 }
490
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 unsigned long flags;
494
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 writel(val, tp->regs + off);
504 readl(tp->regs + off);
505 }
506
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 unsigned long flags;
510 u32 val;
511
512 spin_lock_irqsave(&tp->indirect_lock, flags);
513 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 return val;
517 }
518
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 unsigned long flags;
522
523 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 TG3_64BIT_REG_LOW, val);
526 return;
527 }
528 if (off == TG3_RX_STD_PROD_IDX_REG) {
529 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 TG3_64BIT_REG_LOW, val);
531 return;
532 }
533
534 spin_lock_irqsave(&tp->indirect_lock, flags);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539 /* In indirect mode when disabling interrupts, we also need
540 * to clear the interrupt bit in the GRC local ctrl register.
541 */
542 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 (val == 0x1)) {
544 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 }
547 }
548
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 unsigned long flags;
552 u32 val;
553
554 spin_lock_irqsave(&tp->indirect_lock, flags);
555 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 return val;
559 }
560
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562 * where it is unsafe to read back the register without some delay.
563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 /* Non-posted methods */
570 tp->write32(tp, off, val);
571 else {
572 /* Posted method */
573 tg3_write32(tp, off, val);
574 if (usec_wait)
575 udelay(usec_wait);
576 tp->read32(tp, off);
577 }
578 /* Wait again after the read for the posted method to guarantee that
579 * the wait time is met.
580 */
581 if (usec_wait)
582 udelay(usec_wait);
583 }
584
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 tp->write32_mbox(tp, off, val);
588 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 !tg3_flag(tp, ICH_WORKAROUND)))
591 tp->read32_mbox(tp, off);
592 }
593
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 void __iomem *mbox = tp->regs + off;
597 writel(val, mbox);
598 if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 writel(val, mbox);
600 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 readl(mbox);
603 }
604
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614
615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620
621 #define tw32(reg, val) tp->write32(tp, reg, val)
622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg) tp->read32(tp, reg)
625
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 unsigned long flags;
629
630 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 return;
633
634 spin_lock_irqsave(&tp->indirect_lock, flags);
635 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639 /* Always leave this as zero. */
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 } else {
642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645 /* Always leave this as zero. */
646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 }
648 spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 unsigned long flags;
654
655 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 *val = 0;
658 return;
659 }
660
661 spin_lock_irqsave(&tp->indirect_lock, flags);
662 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666 /* Always leave this as zero. */
667 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 } else {
669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 *val = tr32(TG3PCI_MEM_WIN_DATA);
671
672 /* Always leave this as zero. */
673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 }
675 spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677
tg3_ape_lock_init(struct tg3 * tp)678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 int i;
681 u32 regbase, bit;
682
683 if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 regbase = TG3_APE_LOCK_GRANT;
685 else
686 regbase = TG3_APE_PER_LOCK_GRANT;
687
688 /* Make sure the driver hasn't any stale locks. */
689 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 switch (i) {
691 case TG3_APE_LOCK_PHY0:
692 case TG3_APE_LOCK_PHY1:
693 case TG3_APE_LOCK_PHY2:
694 case TG3_APE_LOCK_PHY3:
695 bit = APE_LOCK_GRANT_DRIVER;
696 break;
697 default:
698 if (!tp->pci_fn)
699 bit = APE_LOCK_GRANT_DRIVER;
700 else
701 bit = 1 << tp->pci_fn;
702 }
703 tg3_ape_write32(tp, regbase + 4 * i, bit);
704 }
705
706 }
707
tg3_ape_lock(struct tg3 * tp,int locknum)708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 int i, off;
711 int ret = 0;
712 u32 status, req, gnt, bit;
713
714 if (!tg3_flag(tp, ENABLE_APE))
715 return 0;
716
717 switch (locknum) {
718 case TG3_APE_LOCK_GPIO:
719 if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 return 0;
721 fallthrough;
722 case TG3_APE_LOCK_GRC:
723 case TG3_APE_LOCK_MEM:
724 if (!tp->pci_fn)
725 bit = APE_LOCK_REQ_DRIVER;
726 else
727 bit = 1 << tp->pci_fn;
728 break;
729 case TG3_APE_LOCK_PHY0:
730 case TG3_APE_LOCK_PHY1:
731 case TG3_APE_LOCK_PHY2:
732 case TG3_APE_LOCK_PHY3:
733 bit = APE_LOCK_REQ_DRIVER;
734 break;
735 default:
736 return -EINVAL;
737 }
738
739 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740 req = TG3_APE_LOCK_REQ;
741 gnt = TG3_APE_LOCK_GRANT;
742 } else {
743 req = TG3_APE_PER_LOCK_REQ;
744 gnt = TG3_APE_PER_LOCK_GRANT;
745 }
746
747 off = 4 * locknum;
748
749 tg3_ape_write32(tp, req + off, bit);
750
751 /* Wait for up to 1 millisecond to acquire lock. */
752 for (i = 0; i < 100; i++) {
753 status = tg3_ape_read32(tp, gnt + off);
754 if (status == bit)
755 break;
756 if (pci_channel_offline(tp->pdev))
757 break;
758
759 udelay(10);
760 }
761
762 if (status != bit) {
763 /* Revoke the lock request. */
764 tg3_ape_write32(tp, gnt + off, bit);
765 ret = -EBUSY;
766 }
767
768 return ret;
769 }
770
tg3_ape_unlock(struct tg3 * tp,int locknum)771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773 u32 gnt, bit;
774
775 if (!tg3_flag(tp, ENABLE_APE))
776 return;
777
778 switch (locknum) {
779 case TG3_APE_LOCK_GPIO:
780 if (tg3_asic_rev(tp) == ASIC_REV_5761)
781 return;
782 fallthrough;
783 case TG3_APE_LOCK_GRC:
784 case TG3_APE_LOCK_MEM:
785 if (!tp->pci_fn)
786 bit = APE_LOCK_GRANT_DRIVER;
787 else
788 bit = 1 << tp->pci_fn;
789 break;
790 case TG3_APE_LOCK_PHY0:
791 case TG3_APE_LOCK_PHY1:
792 case TG3_APE_LOCK_PHY2:
793 case TG3_APE_LOCK_PHY3:
794 bit = APE_LOCK_GRANT_DRIVER;
795 break;
796 default:
797 return;
798 }
799
800 if (tg3_asic_rev(tp) == ASIC_REV_5761)
801 gnt = TG3_APE_LOCK_GRANT;
802 else
803 gnt = TG3_APE_PER_LOCK_GRANT;
804
805 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
806 }
807
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)808 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
809 {
810 u32 apedata;
811
812 while (timeout_us) {
813 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
814 return -EBUSY;
815
816 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
817 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
818 break;
819
820 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
821
822 udelay(10);
823 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
824 }
825
826 return timeout_us ? 0 : -EBUSY;
827 }
828
829 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)830 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
831 {
832 u32 i, apedata;
833
834 for (i = 0; i < timeout_us / 10; i++) {
835 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
836
837 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
838 break;
839
840 udelay(10);
841 }
842
843 return i == timeout_us / 10;
844 }
845
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)846 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
847 u32 len)
848 {
849 int err;
850 u32 i, bufoff, msgoff, maxlen, apedata;
851
852 if (!tg3_flag(tp, APE_HAS_NCSI))
853 return 0;
854
855 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856 if (apedata != APE_SEG_SIG_MAGIC)
857 return -ENODEV;
858
859 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860 if (!(apedata & APE_FW_STATUS_READY))
861 return -EAGAIN;
862
863 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
864 TG3_APE_SHMEM_BASE;
865 msgoff = bufoff + 2 * sizeof(u32);
866 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
867
868 while (len) {
869 u32 length;
870
871 /* Cap xfer sizes to scratchpad limits. */
872 length = (len > maxlen) ? maxlen : len;
873 len -= length;
874
875 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
876 if (!(apedata & APE_FW_STATUS_READY))
877 return -EAGAIN;
878
879 /* Wait for up to 1 msec for APE to service previous event. */
880 err = tg3_ape_event_lock(tp, 1000);
881 if (err)
882 return err;
883
884 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
885 APE_EVENT_STATUS_SCRTCHPD_READ |
886 APE_EVENT_STATUS_EVENT_PENDING;
887 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
888
889 tg3_ape_write32(tp, bufoff, base_off);
890 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
891
892 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
893 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
894
895 base_off += length;
896
897 if (tg3_ape_wait_for_event(tp, 30000))
898 return -EAGAIN;
899
900 for (i = 0; length; i += 4, length -= 4) {
901 u32 val = tg3_ape_read32(tp, msgoff + i);
902 memcpy(data, &val, sizeof(u32));
903 data++;
904 }
905 }
906
907 return 0;
908 }
909 #endif
910
tg3_ape_send_event(struct tg3 * tp,u32 event)911 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
912 {
913 int err;
914 u32 apedata;
915
916 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
917 if (apedata != APE_SEG_SIG_MAGIC)
918 return -EAGAIN;
919
920 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
921 if (!(apedata & APE_FW_STATUS_READY))
922 return -EAGAIN;
923
924 /* Wait for up to 20 millisecond for APE to service previous event. */
925 err = tg3_ape_event_lock(tp, 20000);
926 if (err)
927 return err;
928
929 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
930 event | APE_EVENT_STATUS_EVENT_PENDING);
931
932 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
933 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
934
935 return 0;
936 }
937
tg3_ape_driver_state_change(struct tg3 * tp,int kind)938 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
939 {
940 u32 event;
941 u32 apedata;
942
943 if (!tg3_flag(tp, ENABLE_APE))
944 return;
945
946 switch (kind) {
947 case RESET_KIND_INIT:
948 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
949 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
950 APE_HOST_SEG_SIG_MAGIC);
951 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
952 APE_HOST_SEG_LEN_MAGIC);
953 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
954 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
955 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
956 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
957 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
958 APE_HOST_BEHAV_NO_PHYLOCK);
959 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
960 TG3_APE_HOST_DRVR_STATE_START);
961
962 event = APE_EVENT_STATUS_STATE_START;
963 break;
964 case RESET_KIND_SHUTDOWN:
965 if (device_may_wakeup(&tp->pdev->dev) &&
966 tg3_flag(tp, WOL_ENABLE)) {
967 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
968 TG3_APE_HOST_WOL_SPEED_AUTO);
969 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
970 } else
971 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972
973 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974
975 event = APE_EVENT_STATUS_STATE_UNLOAD;
976 break;
977 default:
978 return;
979 }
980
981 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982
983 tg3_ape_send_event(tp, event);
984 }
985
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)986 static void tg3_send_ape_heartbeat(struct tg3 *tp,
987 unsigned long interval)
988 {
989 /* Check if hb interval has exceeded */
990 if (!tg3_flag(tp, ENABLE_APE) ||
991 time_before(jiffies, tp->ape_hb_jiffies + interval))
992 return;
993
994 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
995 tp->ape_hb_jiffies = jiffies;
996 }
997
tg3_disable_ints(struct tg3 * tp)998 static void tg3_disable_ints(struct tg3 *tp)
999 {
1000 int i;
1001
1002 tw32(TG3PCI_MISC_HOST_CTRL,
1003 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1004 for (i = 0; i < tp->irq_max; i++)
1005 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1006 }
1007
tg3_enable_ints(struct tg3 * tp)1008 static void tg3_enable_ints(struct tg3 *tp)
1009 {
1010 int i;
1011
1012 tp->irq_sync = 0;
1013 wmb();
1014
1015 tw32(TG3PCI_MISC_HOST_CTRL,
1016 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1017
1018 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1019 for (i = 0; i < tp->irq_cnt; i++) {
1020 struct tg3_napi *tnapi = &tp->napi[i];
1021
1022 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 if (tg3_flag(tp, 1SHOT_MSI))
1024 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025
1026 tp->coal_now |= tnapi->coal_now;
1027 }
1028
1029 /* Force an initial interrupt */
1030 if (!tg3_flag(tp, TAGGED_STATUS) &&
1031 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1032 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1033 else
1034 tw32(HOSTCC_MODE, tp->coal_now);
1035
1036 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1037 }
1038
tg3_has_work(struct tg3_napi * tnapi)1039 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1040 {
1041 struct tg3 *tp = tnapi->tp;
1042 struct tg3_hw_status *sblk = tnapi->hw_status;
1043 unsigned int work_exists = 0;
1044
1045 /* check for phy events */
1046 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1047 if (sblk->status & SD_STATUS_LINK_CHG)
1048 work_exists = 1;
1049 }
1050
1051 /* check for TX work to do */
1052 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1053 work_exists = 1;
1054
1055 /* check for RX work to do */
1056 if (tnapi->rx_rcb_prod_idx &&
1057 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1058 work_exists = 1;
1059
1060 return work_exists;
1061 }
1062
1063 /* tg3_int_reenable
1064 * similar to tg3_enable_ints, but it accurately determines whether there
1065 * is new work pending and can return without flushing the PIO write
1066 * which reenables interrupts
1067 */
tg3_int_reenable(struct tg3_napi * tnapi)1068 static void tg3_int_reenable(struct tg3_napi *tnapi)
1069 {
1070 struct tg3 *tp = tnapi->tp;
1071
1072 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1073
1074 /* When doing tagged status, this work check is unnecessary.
1075 * The last_tag we write above tells the chip which piece of
1076 * work we've completed.
1077 */
1078 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1079 tw32(HOSTCC_MODE, tp->coalesce_mode |
1080 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1081 }
1082
tg3_switch_clocks(struct tg3 * tp)1083 static void tg3_switch_clocks(struct tg3 *tp)
1084 {
1085 u32 clock_ctrl;
1086 u32 orig_clock_ctrl;
1087
1088 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1089 return;
1090
1091 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1092
1093 orig_clock_ctrl = clock_ctrl;
1094 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1095 CLOCK_CTRL_CLKRUN_OENABLE |
1096 0x1f);
1097 tp->pci_clock_ctrl = clock_ctrl;
1098
1099 if (tg3_flag(tp, 5705_PLUS)) {
1100 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1101 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1102 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1103 }
1104 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1105 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 clock_ctrl |
1107 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1108 40);
1109 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1110 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1111 40);
1112 }
1113 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1114 }
1115
1116 #define PHY_BUSY_LOOPS 5000
1117
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1118 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1119 u32 *val)
1120 {
1121 u32 frame_val;
1122 unsigned int loops;
1123 int ret;
1124
1125 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1126 tw32_f(MAC_MI_MODE,
1127 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1128 udelay(80);
1129 }
1130
1131 tg3_ape_lock(tp, tp->phy_ape_lock);
1132
1133 *val = 0x0;
1134
1135 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1136 MI_COM_PHY_ADDR_MASK);
1137 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1138 MI_COM_REG_ADDR_MASK);
1139 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1140
1141 tw32_f(MAC_MI_COM, frame_val);
1142
1143 loops = PHY_BUSY_LOOPS;
1144 while (loops != 0) {
1145 udelay(10);
1146 frame_val = tr32(MAC_MI_COM);
1147
1148 if ((frame_val & MI_COM_BUSY) == 0) {
1149 udelay(5);
1150 frame_val = tr32(MAC_MI_COM);
1151 break;
1152 }
1153 loops -= 1;
1154 }
1155
1156 ret = -EBUSY;
1157 if (loops != 0) {
1158 *val = frame_val & MI_COM_DATA_MASK;
1159 ret = 0;
1160 }
1161
1162 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1163 tw32_f(MAC_MI_MODE, tp->mi_mode);
1164 udelay(80);
1165 }
1166
1167 tg3_ape_unlock(tp, tp->phy_ape_lock);
1168
1169 return ret;
1170 }
1171
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1172 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1173 {
1174 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1175 }
1176
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1177 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1178 u32 val)
1179 {
1180 u32 frame_val;
1181 unsigned int loops;
1182 int ret;
1183
1184 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1185 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1186 return 0;
1187
1188 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1189 tw32_f(MAC_MI_MODE,
1190 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1191 udelay(80);
1192 }
1193
1194 tg3_ape_lock(tp, tp->phy_ape_lock);
1195
1196 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1197 MI_COM_PHY_ADDR_MASK);
1198 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1199 MI_COM_REG_ADDR_MASK);
1200 frame_val |= (val & MI_COM_DATA_MASK);
1201 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1202
1203 tw32_f(MAC_MI_COM, frame_val);
1204
1205 loops = PHY_BUSY_LOOPS;
1206 while (loops != 0) {
1207 udelay(10);
1208 frame_val = tr32(MAC_MI_COM);
1209 if ((frame_val & MI_COM_BUSY) == 0) {
1210 udelay(5);
1211 frame_val = tr32(MAC_MI_COM);
1212 break;
1213 }
1214 loops -= 1;
1215 }
1216
1217 ret = -EBUSY;
1218 if (loops != 0)
1219 ret = 0;
1220
1221 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1222 tw32_f(MAC_MI_MODE, tp->mi_mode);
1223 udelay(80);
1224 }
1225
1226 tg3_ape_unlock(tp, tp->phy_ape_lock);
1227
1228 return ret;
1229 }
1230
tg3_writephy(struct tg3 * tp,int reg,u32 val)1231 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1232 {
1233 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1234 }
1235
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1236 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1237 {
1238 int err;
1239
1240 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1241 if (err)
1242 goto done;
1243
1244 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1245 if (err)
1246 goto done;
1247
1248 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1249 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1254
1255 done:
1256 return err;
1257 }
1258
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1259 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1260 {
1261 int err;
1262
1263 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1264 if (err)
1265 goto done;
1266
1267 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1268 if (err)
1269 goto done;
1270
1271 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1272 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1273 if (err)
1274 goto done;
1275
1276 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1277
1278 done:
1279 return err;
1280 }
1281
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1282 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1283 {
1284 int err;
1285
1286 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1287 if (!err)
1288 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1289
1290 return err;
1291 }
1292
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1293 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1294 {
1295 int err;
1296
1297 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1298 if (!err)
1299 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1300
1301 return err;
1302 }
1303
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1304 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1305 {
1306 int err;
1307
1308 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1309 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1310 MII_TG3_AUXCTL_SHDWSEL_MISC);
1311 if (!err)
1312 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1313
1314 return err;
1315 }
1316
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1317 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1318 {
1319 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1320 set |= MII_TG3_AUXCTL_MISC_WREN;
1321
1322 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1323 }
1324
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1325 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1326 {
1327 u32 val;
1328 int err;
1329
1330 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1331
1332 if (err)
1333 return err;
1334
1335 if (enable)
1336 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 else
1338 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339
1340 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1341 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1342
1343 return err;
1344 }
1345
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1346 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1347 {
1348 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1349 reg | val | MII_TG3_MISC_SHDW_WREN);
1350 }
1351
tg3_bmcr_reset(struct tg3 * tp)1352 static int tg3_bmcr_reset(struct tg3 *tp)
1353 {
1354 u32 phy_control;
1355 int limit, err;
1356
1357 /* OK, reset it, and poll the BMCR_RESET bit until it
1358 * clears or we time out.
1359 */
1360 phy_control = BMCR_RESET;
1361 err = tg3_writephy(tp, MII_BMCR, phy_control);
1362 if (err != 0)
1363 return -EBUSY;
1364
1365 limit = 5000;
1366 while (limit--) {
1367 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1368 if (err != 0)
1369 return -EBUSY;
1370
1371 if ((phy_control & BMCR_RESET) == 0) {
1372 udelay(40);
1373 break;
1374 }
1375 udelay(10);
1376 }
1377 if (limit < 0)
1378 return -EBUSY;
1379
1380 return 0;
1381 }
1382
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1383 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1384 {
1385 struct tg3 *tp = bp->priv;
1386 u32 val;
1387
1388 spin_lock_bh(&tp->lock);
1389
1390 if (__tg3_readphy(tp, mii_id, reg, &val))
1391 val = -EIO;
1392
1393 spin_unlock_bh(&tp->lock);
1394
1395 return val;
1396 }
1397
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1398 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1399 {
1400 struct tg3 *tp = bp->priv;
1401 u32 ret = 0;
1402
1403 spin_lock_bh(&tp->lock);
1404
1405 if (__tg3_writephy(tp, mii_id, reg, val))
1406 ret = -EIO;
1407
1408 spin_unlock_bh(&tp->lock);
1409
1410 return ret;
1411 }
1412
tg3_mdio_config_5785(struct tg3 * tp)1413 static void tg3_mdio_config_5785(struct tg3 *tp)
1414 {
1415 u32 val;
1416 struct phy_device *phydev;
1417
1418 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1419 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1420 case PHY_ID_BCM50610:
1421 case PHY_ID_BCM50610M:
1422 val = MAC_PHYCFG2_50610_LED_MODES;
1423 break;
1424 case PHY_ID_BCMAC131:
1425 val = MAC_PHYCFG2_AC131_LED_MODES;
1426 break;
1427 case PHY_ID_RTL8211C:
1428 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1429 break;
1430 case PHY_ID_RTL8201E:
1431 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1432 break;
1433 default:
1434 return;
1435 }
1436
1437 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1438 tw32(MAC_PHYCFG2, val);
1439
1440 val = tr32(MAC_PHYCFG1);
1441 val &= ~(MAC_PHYCFG1_RGMII_INT |
1442 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1443 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1444 tw32(MAC_PHYCFG1, val);
1445
1446 return;
1447 }
1448
1449 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1450 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1451 MAC_PHYCFG2_FMODE_MASK_MASK |
1452 MAC_PHYCFG2_GMODE_MASK_MASK |
1453 MAC_PHYCFG2_ACT_MASK_MASK |
1454 MAC_PHYCFG2_QUAL_MASK_MASK |
1455 MAC_PHYCFG2_INBAND_ENABLE;
1456
1457 tw32(MAC_PHYCFG2, val);
1458
1459 val = tr32(MAC_PHYCFG1);
1460 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1461 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1465 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1466 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1467 }
1468 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1469 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1470 tw32(MAC_PHYCFG1, val);
1471
1472 val = tr32(MAC_EXT_RGMII_MODE);
1473 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1474 MAC_RGMII_MODE_RX_QUALITY |
1475 MAC_RGMII_MODE_RX_ACTIVITY |
1476 MAC_RGMII_MODE_RX_ENG_DET |
1477 MAC_RGMII_MODE_TX_ENABLE |
1478 MAC_RGMII_MODE_TX_LOWPWR |
1479 MAC_RGMII_MODE_TX_RESET);
1480 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1481 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1482 val |= MAC_RGMII_MODE_RX_INT_B |
1483 MAC_RGMII_MODE_RX_QUALITY |
1484 MAC_RGMII_MODE_RX_ACTIVITY |
1485 MAC_RGMII_MODE_RX_ENG_DET;
1486 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1487 val |= MAC_RGMII_MODE_TX_ENABLE |
1488 MAC_RGMII_MODE_TX_LOWPWR |
1489 MAC_RGMII_MODE_TX_RESET;
1490 }
1491 tw32(MAC_EXT_RGMII_MODE, val);
1492 }
1493
tg3_mdio_start(struct tg3 * tp)1494 static void tg3_mdio_start(struct tg3 *tp)
1495 {
1496 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1497 tw32_f(MAC_MI_MODE, tp->mi_mode);
1498 udelay(80);
1499
1500 if (tg3_flag(tp, MDIOBUS_INITED) &&
1501 tg3_asic_rev(tp) == ASIC_REV_5785)
1502 tg3_mdio_config_5785(tp);
1503 }
1504
tg3_mdio_init(struct tg3 * tp)1505 static int tg3_mdio_init(struct tg3 *tp)
1506 {
1507 int i;
1508 u32 reg;
1509 struct phy_device *phydev;
1510
1511 if (tg3_flag(tp, 5717_PLUS)) {
1512 u32 is_serdes;
1513
1514 tp->phy_addr = tp->pci_fn + 1;
1515
1516 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1517 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1518 else
1519 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1520 TG3_CPMU_PHY_STRAP_IS_SERDES;
1521 if (is_serdes)
1522 tp->phy_addr += 7;
1523 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1524 int addr;
1525
1526 addr = ssb_gige_get_phyaddr(tp->pdev);
1527 if (addr < 0)
1528 return addr;
1529 tp->phy_addr = addr;
1530 } else
1531 tp->phy_addr = TG3_PHY_MII_ADDR;
1532
1533 tg3_mdio_start(tp);
1534
1535 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1536 return 0;
1537
1538 tp->mdio_bus = mdiobus_alloc();
1539 if (tp->mdio_bus == NULL)
1540 return -ENOMEM;
1541
1542 tp->mdio_bus->name = "tg3 mdio bus";
1543 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1544 tp->mdio_bus->priv = tp;
1545 tp->mdio_bus->parent = &tp->pdev->dev;
1546 tp->mdio_bus->read = &tg3_mdio_read;
1547 tp->mdio_bus->write = &tg3_mdio_write;
1548 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1549
1550 /* The bus registration will look for all the PHYs on the mdio bus.
1551 * Unfortunately, it does not ensure the PHY is powered up before
1552 * accessing the PHY ID registers. A chip reset is the
1553 * quickest way to bring the device back to an operational state..
1554 */
1555 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1556 tg3_bmcr_reset(tp);
1557
1558 i = mdiobus_register(tp->mdio_bus);
1559 if (i) {
1560 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1561 mdiobus_free(tp->mdio_bus);
1562 return i;
1563 }
1564
1565 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1566
1567 if (!phydev || !phydev->drv) {
1568 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1569 mdiobus_unregister(tp->mdio_bus);
1570 mdiobus_free(tp->mdio_bus);
1571 return -ENODEV;
1572 }
1573
1574 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1575 case PHY_ID_BCM57780:
1576 phydev->interface = PHY_INTERFACE_MODE_GMII;
1577 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578 break;
1579 case PHY_ID_BCM50610:
1580 case PHY_ID_BCM50610M:
1581 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1582 PHY_BRCM_RX_REFCLK_UNUSED |
1583 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1584 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 fallthrough;
1586 case PHY_ID_RTL8211C:
1587 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1588 break;
1589 case PHY_ID_RTL8201E:
1590 case PHY_ID_BCMAC131:
1591 phydev->interface = PHY_INTERFACE_MODE_MII;
1592 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1593 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1594 break;
1595 }
1596
1597 tg3_flag_set(tp, MDIOBUS_INITED);
1598
1599 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1600 tg3_mdio_config_5785(tp);
1601
1602 return 0;
1603 }
1604
tg3_mdio_fini(struct tg3 * tp)1605 static void tg3_mdio_fini(struct tg3 *tp)
1606 {
1607 if (tg3_flag(tp, MDIOBUS_INITED)) {
1608 tg3_flag_clear(tp, MDIOBUS_INITED);
1609 mdiobus_unregister(tp->mdio_bus);
1610 mdiobus_free(tp->mdio_bus);
1611 }
1612 }
1613
1614 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1615 static inline void tg3_generate_fw_event(struct tg3 *tp)
1616 {
1617 u32 val;
1618
1619 val = tr32(GRC_RX_CPU_EVENT);
1620 val |= GRC_RX_CPU_DRIVER_EVENT;
1621 tw32_f(GRC_RX_CPU_EVENT, val);
1622
1623 tp->last_event_jiffies = jiffies;
1624 }
1625
1626 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627
1628 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1629 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 {
1631 int i;
1632 unsigned int delay_cnt;
1633 long time_remain;
1634
1635 /* If enough time has passed, no wait is necessary. */
1636 time_remain = (long)(tp->last_event_jiffies + 1 +
1637 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1638 (long)jiffies;
1639 if (time_remain < 0)
1640 return;
1641
1642 /* Check if we can shorten the wait time. */
1643 delay_cnt = jiffies_to_usecs(time_remain);
1644 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1645 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1646 delay_cnt = (delay_cnt >> 3) + 1;
1647
1648 for (i = 0; i < delay_cnt; i++) {
1649 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1650 break;
1651 if (pci_channel_offline(tp->pdev))
1652 break;
1653
1654 udelay(8);
1655 }
1656 }
1657
1658 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1659 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1660 {
1661 u32 reg, val;
1662
1663 val = 0;
1664 if (!tg3_readphy(tp, MII_BMCR, ®))
1665 val = reg << 16;
1666 if (!tg3_readphy(tp, MII_BMSR, ®))
1667 val |= (reg & 0xffff);
1668 *data++ = val;
1669
1670 val = 0;
1671 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1672 val = reg << 16;
1673 if (!tg3_readphy(tp, MII_LPA, ®))
1674 val |= (reg & 0xffff);
1675 *data++ = val;
1676
1677 val = 0;
1678 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1679 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1680 val = reg << 16;
1681 if (!tg3_readphy(tp, MII_STAT1000, ®))
1682 val |= (reg & 0xffff);
1683 }
1684 *data++ = val;
1685
1686 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1687 val = reg << 16;
1688 else
1689 val = 0;
1690 *data++ = val;
1691 }
1692
1693 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1694 static void tg3_ump_link_report(struct tg3 *tp)
1695 {
1696 u32 data[4];
1697
1698 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 return;
1700
1701 tg3_phy_gather_ump_data(tp, data);
1702
1703 tg3_wait_for_event_ack(tp);
1704
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1711
1712 tg3_generate_fw_event(tp);
1713 }
1714
1715 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1716 static void tg3_stop_fw(struct tg3 *tp)
1717 {
1718 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1719 /* Wait for RX cpu to ACK the previous event. */
1720 tg3_wait_for_event_ack(tp);
1721
1722 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1723
1724 tg3_generate_fw_event(tp);
1725
1726 /* Wait for RX cpu to ACK this event. */
1727 tg3_wait_for_event_ack(tp);
1728 }
1729 }
1730
1731 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1732 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1733 {
1734 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1735 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1736
1737 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1738 switch (kind) {
1739 case RESET_KIND_INIT:
1740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 DRV_STATE_START);
1742 break;
1743
1744 case RESET_KIND_SHUTDOWN:
1745 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 DRV_STATE_UNLOAD);
1747 break;
1748
1749 case RESET_KIND_SUSPEND:
1750 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 DRV_STATE_SUSPEND);
1752 break;
1753
1754 default:
1755 break;
1756 }
1757 }
1758 }
1759
1760 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1761 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1762 {
1763 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1764 switch (kind) {
1765 case RESET_KIND_INIT:
1766 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 DRV_STATE_START_DONE);
1768 break;
1769
1770 case RESET_KIND_SHUTDOWN:
1771 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 DRV_STATE_UNLOAD_DONE);
1773 break;
1774
1775 default:
1776 break;
1777 }
1778 }
1779 }
1780
1781 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1782 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1783 {
1784 if (tg3_flag(tp, ENABLE_ASF)) {
1785 switch (kind) {
1786 case RESET_KIND_INIT:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 DRV_STATE_START);
1789 break;
1790
1791 case RESET_KIND_SHUTDOWN:
1792 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 DRV_STATE_UNLOAD);
1794 break;
1795
1796 case RESET_KIND_SUSPEND:
1797 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 DRV_STATE_SUSPEND);
1799 break;
1800
1801 default:
1802 break;
1803 }
1804 }
1805 }
1806
tg3_poll_fw(struct tg3 * tp)1807 static int tg3_poll_fw(struct tg3 *tp)
1808 {
1809 int i;
1810 u32 val;
1811
1812 if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 return 0;
1814
1815 if (tg3_flag(tp, IS_SSB_CORE)) {
1816 /* We don't use firmware. */
1817 return 0;
1818 }
1819
1820 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1821 /* Wait up to 20ms for init done. */
1822 for (i = 0; i < 200; i++) {
1823 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1824 return 0;
1825 if (pci_channel_offline(tp->pdev))
1826 return -ENODEV;
1827
1828 udelay(100);
1829 }
1830 return -ENODEV;
1831 }
1832
1833 /* Wait for firmware initialization to complete. */
1834 for (i = 0; i < 100000; i++) {
1835 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1836 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1837 break;
1838 if (pci_channel_offline(tp->pdev)) {
1839 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1840 tg3_flag_set(tp, NO_FWARE_REPORTED);
1841 netdev_info(tp->dev, "No firmware running\n");
1842 }
1843
1844 break;
1845 }
1846
1847 udelay(10);
1848 }
1849
1850 /* Chip might not be fitted with firmware. Some Sun onboard
1851 * parts are configured like that. So don't signal the timeout
1852 * of the above loop as an error, but do report the lack of
1853 * running firmware once.
1854 */
1855 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 tg3_flag_set(tp, NO_FWARE_REPORTED);
1857
1858 netdev_info(tp->dev, "No firmware running\n");
1859 }
1860
1861 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1862 /* The 57765 A0 needs a little more
1863 * time to do some important work.
1864 */
1865 mdelay(10);
1866 }
1867
1868 return 0;
1869 }
1870
tg3_link_report(struct tg3 * tp)1871 static void tg3_link_report(struct tg3 *tp)
1872 {
1873 if (!netif_carrier_ok(tp->dev)) {
1874 netif_info(tp, link, tp->dev, "Link is down\n");
1875 tg3_ump_link_report(tp);
1876 } else if (netif_msg_link(tp)) {
1877 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1878 (tp->link_config.active_speed == SPEED_1000 ?
1879 1000 :
1880 (tp->link_config.active_speed == SPEED_100 ?
1881 100 : 10)),
1882 (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 "full" : "half"));
1884
1885 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1886 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1887 "on" : "off",
1888 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 "on" : "off");
1890
1891 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1892 netdev_info(tp->dev, "EEE is %s\n",
1893 tp->setlpicnt ? "enabled" : "disabled");
1894
1895 tg3_ump_link_report(tp);
1896 }
1897
1898 tp->link_up = netif_carrier_ok(tp->dev);
1899 }
1900
tg3_decode_flowctrl_1000T(u32 adv)1901 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1902 {
1903 u32 flowctrl = 0;
1904
1905 if (adv & ADVERTISE_PAUSE_CAP) {
1906 flowctrl |= FLOW_CTRL_RX;
1907 if (!(adv & ADVERTISE_PAUSE_ASYM))
1908 flowctrl |= FLOW_CTRL_TX;
1909 } else if (adv & ADVERTISE_PAUSE_ASYM)
1910 flowctrl |= FLOW_CTRL_TX;
1911
1912 return flowctrl;
1913 }
1914
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1915 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1916 {
1917 u16 miireg;
1918
1919 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1920 miireg = ADVERTISE_1000XPAUSE;
1921 else if (flow_ctrl & FLOW_CTRL_TX)
1922 miireg = ADVERTISE_1000XPSE_ASYM;
1923 else if (flow_ctrl & FLOW_CTRL_RX)
1924 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1925 else
1926 miireg = 0;
1927
1928 return miireg;
1929 }
1930
tg3_decode_flowctrl_1000X(u32 adv)1931 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1932 {
1933 u32 flowctrl = 0;
1934
1935 if (adv & ADVERTISE_1000XPAUSE) {
1936 flowctrl |= FLOW_CTRL_RX;
1937 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1938 flowctrl |= FLOW_CTRL_TX;
1939 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1940 flowctrl |= FLOW_CTRL_TX;
1941
1942 return flowctrl;
1943 }
1944
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1945 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1946 {
1947 u8 cap = 0;
1948
1949 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1950 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1951 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1952 if (lcladv & ADVERTISE_1000XPAUSE)
1953 cap = FLOW_CTRL_RX;
1954 if (rmtadv & ADVERTISE_1000XPAUSE)
1955 cap = FLOW_CTRL_TX;
1956 }
1957
1958 return cap;
1959 }
1960
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1961 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1962 {
1963 u8 autoneg;
1964 u8 flowctrl = 0;
1965 u32 old_rx_mode = tp->rx_mode;
1966 u32 old_tx_mode = tp->tx_mode;
1967
1968 if (tg3_flag(tp, USE_PHYLIB))
1969 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1970 else
1971 autoneg = tp->link_config.autoneg;
1972
1973 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1974 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1975 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1976 else
1977 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1978 } else
1979 flowctrl = tp->link_config.flowctrl;
1980
1981 tp->link_config.active_flowctrl = flowctrl;
1982
1983 if (flowctrl & FLOW_CTRL_RX)
1984 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1985 else
1986 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1987
1988 if (old_rx_mode != tp->rx_mode)
1989 tw32_f(MAC_RX_MODE, tp->rx_mode);
1990
1991 if (flowctrl & FLOW_CTRL_TX)
1992 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1993 else
1994 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1995
1996 if (old_tx_mode != tp->tx_mode)
1997 tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 }
1999
tg3_adjust_link(struct net_device * dev)2000 static void tg3_adjust_link(struct net_device *dev)
2001 {
2002 u8 oldflowctrl, linkmesg = 0;
2003 u32 mac_mode, lcl_adv, rmt_adv;
2004 struct tg3 *tp = netdev_priv(dev);
2005 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2006
2007 spin_lock_bh(&tp->lock);
2008
2009 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2010 MAC_MODE_HALF_DUPLEX);
2011
2012 oldflowctrl = tp->link_config.active_flowctrl;
2013
2014 if (phydev->link) {
2015 lcl_adv = 0;
2016 rmt_adv = 0;
2017
2018 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2019 mac_mode |= MAC_MODE_PORT_MODE_MII;
2020 else if (phydev->speed == SPEED_1000 ||
2021 tg3_asic_rev(tp) != ASIC_REV_5785)
2022 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2023 else
2024 mac_mode |= MAC_MODE_PORT_MODE_MII;
2025
2026 if (phydev->duplex == DUPLEX_HALF)
2027 mac_mode |= MAC_MODE_HALF_DUPLEX;
2028 else {
2029 lcl_adv = mii_advertise_flowctrl(
2030 tp->link_config.flowctrl);
2031
2032 if (phydev->pause)
2033 rmt_adv = LPA_PAUSE_CAP;
2034 if (phydev->asym_pause)
2035 rmt_adv |= LPA_PAUSE_ASYM;
2036 }
2037
2038 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2039 } else
2040 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2041
2042 if (mac_mode != tp->mac_mode) {
2043 tp->mac_mode = mac_mode;
2044 tw32_f(MAC_MODE, tp->mac_mode);
2045 udelay(40);
2046 }
2047
2048 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2049 if (phydev->speed == SPEED_10)
2050 tw32(MAC_MI_STAT,
2051 MAC_MI_STAT_10MBPS_MODE |
2052 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 else
2054 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 }
2056
2057 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2058 tw32(MAC_TX_LENGTHS,
2059 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2060 (6 << TX_LENGTHS_IPG_SHIFT) |
2061 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2062 else
2063 tw32(MAC_TX_LENGTHS,
2064 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 (6 << TX_LENGTHS_IPG_SHIFT) |
2066 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067
2068 if (phydev->link != tp->old_link ||
2069 phydev->speed != tp->link_config.active_speed ||
2070 phydev->duplex != tp->link_config.active_duplex ||
2071 oldflowctrl != tp->link_config.active_flowctrl)
2072 linkmesg = 1;
2073
2074 tp->old_link = phydev->link;
2075 tp->link_config.active_speed = phydev->speed;
2076 tp->link_config.active_duplex = phydev->duplex;
2077
2078 spin_unlock_bh(&tp->lock);
2079
2080 if (linkmesg)
2081 tg3_link_report(tp);
2082 }
2083
tg3_phy_init(struct tg3 * tp)2084 static int tg3_phy_init(struct tg3 *tp)
2085 {
2086 struct phy_device *phydev;
2087
2088 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 return 0;
2090
2091 /* Bring the PHY back to a known state. */
2092 tg3_bmcr_reset(tp);
2093
2094 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2095
2096 /* Attach the MAC to the PHY. */
2097 phydev = phy_connect(tp->dev, phydev_name(phydev),
2098 tg3_adjust_link, phydev->interface);
2099 if (IS_ERR(phydev)) {
2100 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2101 return PTR_ERR(phydev);
2102 }
2103
2104 /* Mask with MAC supported features. */
2105 switch (phydev->interface) {
2106 case PHY_INTERFACE_MODE_GMII:
2107 case PHY_INTERFACE_MODE_RGMII:
2108 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2109 phy_set_max_speed(phydev, SPEED_1000);
2110 phy_support_asym_pause(phydev);
2111 break;
2112 }
2113 fallthrough;
2114 case PHY_INTERFACE_MODE_MII:
2115 phy_set_max_speed(phydev, SPEED_100);
2116 phy_support_asym_pause(phydev);
2117 break;
2118 default:
2119 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120 return -EINVAL;
2121 }
2122
2123 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124
2125 phy_attached_info(phydev);
2126
2127 return 0;
2128 }
2129
tg3_phy_start(struct tg3 * tp)2130 static void tg3_phy_start(struct tg3 *tp)
2131 {
2132 struct phy_device *phydev;
2133
2134 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 return;
2136
2137 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2138
2139 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2140 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2141 phydev->speed = tp->link_config.speed;
2142 phydev->duplex = tp->link_config.duplex;
2143 phydev->autoneg = tp->link_config.autoneg;
2144 ethtool_convert_legacy_u32_to_link_mode(
2145 phydev->advertising, tp->link_config.advertising);
2146 }
2147
2148 phy_start(phydev);
2149
2150 phy_start_aneg(phydev);
2151 }
2152
tg3_phy_stop(struct tg3 * tp)2153 static void tg3_phy_stop(struct tg3 *tp)
2154 {
2155 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 return;
2157
2158 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 }
2160
tg3_phy_fini(struct tg3 * tp)2161 static void tg3_phy_fini(struct tg3 *tp)
2162 {
2163 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2164 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2165 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2166 }
2167 }
2168
tg3_phy_set_extloopbk(struct tg3 * tp)2169 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2170 {
2171 int err;
2172 u32 val;
2173
2174 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 return 0;
2176
2177 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2178 /* Cannot do read-modify-write on 5401 */
2179 err = tg3_phy_auxctl_write(tp,
2180 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2182 0x4c20);
2183 goto done;
2184 }
2185
2186 err = tg3_phy_auxctl_read(tp,
2187 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2188 if (err)
2189 return err;
2190
2191 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2192 err = tg3_phy_auxctl_write(tp,
2193 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2194
2195 done:
2196 return err;
2197 }
2198
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2199 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2200 {
2201 u32 phytest;
2202
2203 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 u32 phy;
2205
2206 tg3_writephy(tp, MII_TG3_FET_TEST,
2207 phytest | MII_TG3_FET_SHADOW_EN);
2208 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2209 if (enable)
2210 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 else
2212 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2214 }
2215 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2216 }
2217 }
2218
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2219 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2220 {
2221 u32 reg;
2222
2223 if (!tg3_flag(tp, 5705_PLUS) ||
2224 (tg3_flag(tp, 5717_PLUS) &&
2225 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 return;
2227
2228 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2229 tg3_phy_fet_toggle_apd(tp, enable);
2230 return;
2231 }
2232
2233 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2234 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239
2240 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241
2242
2243 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2244 if (enable)
2245 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2246
2247 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 }
2249
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2250 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2251 {
2252 u32 phy;
2253
2254 if (!tg3_flag(tp, 5705_PLUS) ||
2255 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256 return;
2257
2258 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259 u32 ephy;
2260
2261 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2262 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2263
2264 tg3_writephy(tp, MII_TG3_FET_TEST,
2265 ephy | MII_TG3_FET_SHADOW_EN);
2266 if (!tg3_readphy(tp, reg, &phy)) {
2267 if (enable)
2268 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 else
2270 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 tg3_writephy(tp, reg, phy);
2272 }
2273 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2274 }
2275 } else {
2276 int ret;
2277
2278 ret = tg3_phy_auxctl_read(tp,
2279 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280 if (!ret) {
2281 if (enable)
2282 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 else
2284 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 tg3_phy_auxctl_write(tp,
2286 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2287 }
2288 }
2289 }
2290
tg3_phy_set_wirespeed(struct tg3 * tp)2291 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2292 {
2293 int ret;
2294 u32 val;
2295
2296 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297 return;
2298
2299 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2300 if (!ret)
2301 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2302 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 }
2304
tg3_phy_apply_otp(struct tg3 * tp)2305 static void tg3_phy_apply_otp(struct tg3 *tp)
2306 {
2307 u32 otp, phy;
2308
2309 if (!tp->phy_otp)
2310 return;
2311
2312 otp = tp->phy_otp;
2313
2314 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315 return;
2316
2317 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2318 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2319 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2320
2321 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2322 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2323 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2324
2325 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2326 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2327 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2328
2329 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2330 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2331
2332 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2333 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2334
2335 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2336 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2337 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2338
2339 tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 }
2341
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_keee * eee)2342 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
2343 {
2344 u32 val;
2345 struct ethtool_keee *dest = &tp->eee;
2346
2347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2348 return;
2349
2350 if (eee)
2351 dest = eee;
2352
2353 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354 return;
2355
2356 /* Pull eee_active */
2357 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2358 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2359 dest->eee_active = 1;
2360 } else
2361 dest->eee_active = 0;
2362
2363 /* Pull lp advertised settings */
2364 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2365 return;
2366 mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
2367
2368 /* Pull advertised and eee_enabled settings */
2369 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2370 return;
2371 dest->eee_enabled = !!val;
2372 mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
2373
2374 /* Pull tx_lpi_enabled */
2375 val = tr32(TG3_CPMU_EEE_MODE);
2376 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2377
2378 /* Pull lpi timer value */
2379 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 }
2381
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2382 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2383 {
2384 u32 val;
2385
2386 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2387 return;
2388
2389 tp->setlpicnt = 0;
2390
2391 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2392 current_link_up &&
2393 tp->link_config.active_duplex == DUPLEX_FULL &&
2394 (tp->link_config.active_speed == SPEED_100 ||
2395 tp->link_config.active_speed == SPEED_1000)) {
2396 u32 eeectl;
2397
2398 if (tp->link_config.active_speed == SPEED_1000)
2399 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2400 else
2401 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2402
2403 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2404
2405 tg3_eee_pull_config(tp, NULL);
2406 if (tp->eee.eee_active)
2407 tp->setlpicnt = 2;
2408 }
2409
2410 if (!tp->setlpicnt) {
2411 if (current_link_up &&
2412 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2413 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2414 tg3_phy_toggle_auxctl_smdsp(tp, false);
2415 }
2416
2417 val = tr32(TG3_CPMU_EEE_MODE);
2418 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2419 }
2420 }
2421
tg3_phy_eee_enable(struct tg3 * tp)2422 static void tg3_phy_eee_enable(struct tg3 *tp)
2423 {
2424 u32 val;
2425
2426 if (tp->link_config.active_speed == SPEED_1000 &&
2427 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2428 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2429 tg3_flag(tp, 57765_CLASS)) &&
2430 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2431 val = MII_TG3_DSP_TAP26_ALNOKO |
2432 MII_TG3_DSP_TAP26_RMRXSTO;
2433 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2434 tg3_phy_toggle_auxctl_smdsp(tp, false);
2435 }
2436
2437 val = tr32(TG3_CPMU_EEE_MODE);
2438 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 }
2440
tg3_wait_macro_done(struct tg3 * tp)2441 static int tg3_wait_macro_done(struct tg3 *tp)
2442 {
2443 int limit = 100;
2444
2445 while (limit--) {
2446 u32 tmp32;
2447
2448 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2449 if ((tmp32 & 0x1000) == 0)
2450 break;
2451 }
2452 }
2453 if (limit < 0)
2454 return -EBUSY;
2455
2456 return 0;
2457 }
2458
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2459 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2460 {
2461 static const u32 test_pat[4][6] = {
2462 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2463 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2464 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2465 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2466 };
2467 int chan;
2468
2469 for (chan = 0; chan < 4; chan++) {
2470 int i;
2471
2472 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473 (chan * 0x2000) | 0x0200);
2474 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2475
2476 for (i = 0; i < 6; i++)
2477 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478 test_pat[chan][i]);
2479
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2481 if (tg3_wait_macro_done(tp)) {
2482 *resetp = 1;
2483 return -EBUSY;
2484 }
2485
2486 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2487 (chan * 0x2000) | 0x0200);
2488 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2489 if (tg3_wait_macro_done(tp)) {
2490 *resetp = 1;
2491 return -EBUSY;
2492 }
2493
2494 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2495 if (tg3_wait_macro_done(tp)) {
2496 *resetp = 1;
2497 return -EBUSY;
2498 }
2499
2500 for (i = 0; i < 6; i += 2) {
2501 u32 low, high;
2502
2503 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2504 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2505 tg3_wait_macro_done(tp)) {
2506 *resetp = 1;
2507 return -EBUSY;
2508 }
2509 low &= 0x7fff;
2510 high &= 0x000f;
2511 if (low != test_pat[chan][i] ||
2512 high != test_pat[chan][i+1]) {
2513 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2515 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2516
2517 return -EBUSY;
2518 }
2519 }
2520 }
2521
2522 return 0;
2523 }
2524
tg3_phy_reset_chanpat(struct tg3 * tp)2525 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2526 {
2527 int chan;
2528
2529 for (chan = 0; chan < 4; chan++) {
2530 int i;
2531
2532 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2533 (chan * 0x2000) | 0x0200);
2534 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2535 for (i = 0; i < 6; i++)
2536 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2537 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2538 if (tg3_wait_macro_done(tp))
2539 return -EBUSY;
2540 }
2541
2542 return 0;
2543 }
2544
tg3_phy_reset_5703_4_5(struct tg3 * tp)2545 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2546 {
2547 u32 reg32, phy9_orig;
2548 int retries, do_phy_reset, err;
2549
2550 retries = 10;
2551 do_phy_reset = 1;
2552 do {
2553 if (do_phy_reset) {
2554 err = tg3_bmcr_reset(tp);
2555 if (err)
2556 return err;
2557 do_phy_reset = 0;
2558 }
2559
2560 /* Disable transmitter and interrupt. */
2561 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2562 continue;
2563
2564 reg32 |= 0x3000;
2565 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2566
2567 /* Set full-duplex, 1000 mbps. */
2568 tg3_writephy(tp, MII_BMCR,
2569 BMCR_FULLDPLX | BMCR_SPEED1000);
2570
2571 /* Set to master mode. */
2572 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573 continue;
2574
2575 tg3_writephy(tp, MII_CTRL1000,
2576 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2577
2578 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2579 if (err)
2580 return err;
2581
2582 /* Block the PHY control access. */
2583 tg3_phydsp_write(tp, 0x8005, 0x0800);
2584
2585 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586 if (!err)
2587 break;
2588 } while (--retries);
2589
2590 err = tg3_phy_reset_chanpat(tp);
2591 if (err)
2592 return err;
2593
2594 tg3_phydsp_write(tp, 0x8005, 0x0000);
2595
2596 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2597 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2598
2599 tg3_phy_toggle_auxctl_smdsp(tp, false);
2600
2601 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2602
2603 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2604 if (err)
2605 return err;
2606
2607 reg32 &= ~0x3000;
2608 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2609
2610 return 0;
2611 }
2612
tg3_carrier_off(struct tg3 * tp)2613 static void tg3_carrier_off(struct tg3 *tp)
2614 {
2615 netif_carrier_off(tp->dev);
2616 tp->link_up = false;
2617 }
2618
tg3_warn_mgmt_link_flap(struct tg3 * tp)2619 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2620 {
2621 if (tg3_flag(tp, ENABLE_ASF))
2622 netdev_warn(tp->dev,
2623 "Management side-band traffic will be interrupted during phy settings change\n");
2624 }
2625
2626 /* This will reset the tigon3 PHY if there is no valid
2627 * link unless the FORCE argument is non-zero.
2628 */
tg3_phy_reset(struct tg3 * tp)2629 static int tg3_phy_reset(struct tg3 *tp)
2630 {
2631 u32 val, cpmuctrl;
2632 int err;
2633
2634 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2635 val = tr32(GRC_MISC_CFG);
2636 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637 udelay(40);
2638 }
2639 err = tg3_readphy(tp, MII_BMSR, &val);
2640 err |= tg3_readphy(tp, MII_BMSR, &val);
2641 if (err != 0)
2642 return -EBUSY;
2643
2644 if (netif_running(tp->dev) && tp->link_up) {
2645 netif_carrier_off(tp->dev);
2646 tg3_link_report(tp);
2647 }
2648
2649 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2651 tg3_asic_rev(tp) == ASIC_REV_5705) {
2652 err = tg3_phy_reset_5703_4_5(tp);
2653 if (err)
2654 return err;
2655 goto out;
2656 }
2657
2658 cpmuctrl = 0;
2659 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2660 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2661 cpmuctrl = tr32(TG3_CPMU_CTRL);
2662 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2663 tw32(TG3_CPMU_CTRL,
2664 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665 }
2666
2667 err = tg3_bmcr_reset(tp);
2668 if (err)
2669 return err;
2670
2671 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2672 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2673 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2674
2675 tw32(TG3_CPMU_CTRL, cpmuctrl);
2676 }
2677
2678 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2679 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2680 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2681 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2682 CPMU_LSPD_1000MB_MACCLK_12_5) {
2683 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2684 udelay(40);
2685 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2686 }
2687 }
2688
2689 if (tg3_flag(tp, 5717_PLUS) &&
2690 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691 return 0;
2692
2693 tg3_phy_apply_otp(tp);
2694
2695 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2696 tg3_phy_toggle_apd(tp, true);
2697 else
2698 tg3_phy_toggle_apd(tp, false);
2699
2700 out:
2701 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2702 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2703 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2704 tg3_phydsp_write(tp, 0x000a, 0x0323);
2705 tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 }
2707
2708 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 }
2712
2713 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2714 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715 tg3_phydsp_write(tp, 0x000a, 0x310b);
2716 tg3_phydsp_write(tp, 0x201f, 0x9506);
2717 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2718 tg3_phy_toggle_auxctl_smdsp(tp, false);
2719 }
2720 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2721 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2723 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2724 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2725 tg3_writephy(tp, MII_TG3_TEST1,
2726 MII_TG3_TEST1_TRIM_EN | 0x4);
2727 } else
2728 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2729
2730 tg3_phy_toggle_auxctl_smdsp(tp, false);
2731 }
2732 }
2733
2734 /* Set Extended packet length bit (bit 14) on all chips that */
2735 /* support jumbo frames */
2736 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2737 /* Cannot do read-modify-write on 5401 */
2738 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2739 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2740 /* Set bit 14 with read-modify-write to preserve other bits */
2741 err = tg3_phy_auxctl_read(tp,
2742 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2743 if (!err)
2744 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2745 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746 }
2747
2748 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2749 * jumbo frames transmission.
2750 */
2751 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2753 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2754 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755 }
2756
2757 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2758 /* adjust output voltage */
2759 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760 }
2761
2762 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2763 tg3_phydsp_write(tp, 0xffb, 0x4000);
2764
2765 tg3_phy_toggle_automdix(tp, true);
2766 tg3_phy_set_wirespeed(tp);
2767 return 0;
2768 }
2769
2770 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2771 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2772 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2773 TG3_GPIO_MSG_NEED_VAUX)
2774 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2775 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2778 (TG3_GPIO_MSG_DRVR_PRES << 12))
2779
2780 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2781 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2784 (TG3_GPIO_MSG_NEED_VAUX << 12))
2785
tg3_set_function_status(struct tg3 * tp,u32 newstat)2786 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2787 {
2788 u32 status, shift;
2789
2790 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2791 tg3_asic_rev(tp) == ASIC_REV_5719)
2792 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2793 else
2794 status = tr32(TG3_CPMU_DRV_STATUS);
2795
2796 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2797 status &= ~(TG3_GPIO_MSG_MASK << shift);
2798 status |= (newstat << shift);
2799
2800 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 tg3_asic_rev(tp) == ASIC_REV_5719)
2802 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2803 else
2804 tw32(TG3_CPMU_DRV_STATUS, status);
2805
2806 return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 }
2808
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2809 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2810 {
2811 if (!tg3_flag(tp, IS_NIC))
2812 return 0;
2813
2814 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2816 tg3_asic_rev(tp) == ASIC_REV_5720) {
2817 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818 return -EIO;
2819
2820 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2821
2822 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823 TG3_GRC_LCLCTL_PWRSW_DELAY);
2824
2825 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2826 } else {
2827 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 }
2830
2831 return 0;
2832 }
2833
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2834 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2835 {
2836 u32 grc_local_ctrl;
2837
2838 if (!tg3_flag(tp, IS_NIC) ||
2839 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2840 tg3_asic_rev(tp) == ASIC_REV_5701)
2841 return;
2842
2843 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2844
2845 tw32_wait_f(GRC_LOCAL_CTRL,
2846 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2847 TG3_GRC_LCLCTL_PWRSW_DELAY);
2848
2849 tw32_wait_f(GRC_LOCAL_CTRL,
2850 grc_local_ctrl,
2851 TG3_GRC_LCLCTL_PWRSW_DELAY);
2852
2853 tw32_wait_f(GRC_LOCAL_CTRL,
2854 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2855 TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 }
2857
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2858 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2859 {
2860 if (!tg3_flag(tp, IS_NIC))
2861 return;
2862
2863 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2864 tg3_asic_rev(tp) == ASIC_REV_5701) {
2865 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2866 (GRC_LCLCTRL_GPIO_OE0 |
2867 GRC_LCLCTRL_GPIO_OE1 |
2868 GRC_LCLCTRL_GPIO_OE2 |
2869 GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 GRC_LCLCTRL_GPIO_OUTPUT1),
2871 TG3_GRC_LCLCTL_PWRSW_DELAY);
2872 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2873 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2874 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2875 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2876 GRC_LCLCTRL_GPIO_OE1 |
2877 GRC_LCLCTRL_GPIO_OE2 |
2878 GRC_LCLCTRL_GPIO_OUTPUT0 |
2879 GRC_LCLCTRL_GPIO_OUTPUT1 |
2880 tp->grc_local_ctrl;
2881 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2882 TG3_GRC_LCLCTL_PWRSW_DELAY);
2883
2884 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2885 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886 TG3_GRC_LCLCTL_PWRSW_DELAY);
2887
2888 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2889 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890 TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 } else {
2892 u32 no_gpio2;
2893 u32 grc_local_ctrl = 0;
2894
2895 /* Workaround to prevent overdrawing Amps. */
2896 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2897 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2898 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2899 grc_local_ctrl,
2900 TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 }
2902
2903 /* On 5753 and variants, GPIO2 cannot be used. */
2904 no_gpio2 = tp->nic_sram_data_cfg &
2905 NIC_SRAM_DATA_CFG_NO_GPIO2;
2906
2907 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2908 GRC_LCLCTRL_GPIO_OE1 |
2909 GRC_LCLCTRL_GPIO_OE2 |
2910 GRC_LCLCTRL_GPIO_OUTPUT1 |
2911 GRC_LCLCTRL_GPIO_OUTPUT2;
2912 if (no_gpio2) {
2913 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2914 GRC_LCLCTRL_GPIO_OUTPUT2);
2915 }
2916 tw32_wait_f(GRC_LOCAL_CTRL,
2917 tp->grc_local_ctrl | grc_local_ctrl,
2918 TG3_GRC_LCLCTL_PWRSW_DELAY);
2919
2920 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2921
2922 tw32_wait_f(GRC_LOCAL_CTRL,
2923 tp->grc_local_ctrl | grc_local_ctrl,
2924 TG3_GRC_LCLCTL_PWRSW_DELAY);
2925
2926 if (!no_gpio2) {
2927 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2928 tw32_wait_f(GRC_LOCAL_CTRL,
2929 tp->grc_local_ctrl | grc_local_ctrl,
2930 TG3_GRC_LCLCTL_PWRSW_DELAY);
2931 }
2932 }
2933 }
2934
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2935 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2936 {
2937 u32 msg = 0;
2938
2939 /* Serialize power state transitions */
2940 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941 return;
2942
2943 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2944 msg = TG3_GPIO_MSG_NEED_VAUX;
2945
2946 msg = tg3_set_function_status(tp, msg);
2947
2948 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949 goto done;
2950
2951 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2952 tg3_pwrsrc_switch_to_vaux(tp);
2953 else
2954 tg3_pwrsrc_die_with_vmain(tp);
2955
2956 done:
2957 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 }
2959
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2960 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2961 {
2962 bool need_vaux = false;
2963
2964 /* The GPIOs do something completely different on 57765. */
2965 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966 return;
2967
2968 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2970 tg3_asic_rev(tp) == ASIC_REV_5720) {
2971 tg3_frob_aux_power_5717(tp, include_wol ?
2972 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2973 return;
2974 }
2975
2976 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2977 struct net_device *dev_peer;
2978
2979 dev_peer = pci_get_drvdata(tp->pdev_peer);
2980
2981 /* remove_one() may have been run on the peer. */
2982 if (dev_peer) {
2983 struct tg3 *tp_peer = netdev_priv(dev_peer);
2984
2985 if (tg3_flag(tp_peer, INIT_COMPLETE))
2986 return;
2987
2988 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2989 tg3_flag(tp_peer, ENABLE_ASF))
2990 need_vaux = true;
2991 }
2992 }
2993
2994 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2995 tg3_flag(tp, ENABLE_ASF))
2996 need_vaux = true;
2997
2998 if (need_vaux)
2999 tg3_pwrsrc_switch_to_vaux(tp);
3000 else
3001 tg3_pwrsrc_die_with_vmain(tp);
3002 }
3003
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3004 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3005 {
3006 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3007 return 1;
3008 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3009 if (speed != SPEED_10)
3010 return 1;
3011 } else if (speed == SPEED_10)
3012 return 1;
3013
3014 return 0;
3015 }
3016
tg3_phy_power_bug(struct tg3 * tp)3017 static bool tg3_phy_power_bug(struct tg3 *tp)
3018 {
3019 switch (tg3_asic_rev(tp)) {
3020 case ASIC_REV_5700:
3021 case ASIC_REV_5704:
3022 return true;
3023 case ASIC_REV_5780:
3024 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3025 return true;
3026 return false;
3027 case ASIC_REV_5717:
3028 if (!tp->pci_fn)
3029 return true;
3030 return false;
3031 case ASIC_REV_5719:
3032 case ASIC_REV_5720:
3033 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3034 !tp->pci_fn)
3035 return true;
3036 return false;
3037 }
3038
3039 return false;
3040 }
3041
tg3_phy_led_bug(struct tg3 * tp)3042 static bool tg3_phy_led_bug(struct tg3 *tp)
3043 {
3044 switch (tg3_asic_rev(tp)) {
3045 case ASIC_REV_5719:
3046 case ASIC_REV_5720:
3047 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3048 !tp->pci_fn)
3049 return true;
3050 return false;
3051 }
3052
3053 return false;
3054 }
3055
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3056 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3057 {
3058 u32 val;
3059
3060 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061 return;
3062
3063 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3064 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3065 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3066 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067
3068 sg_dig_ctrl |=
3069 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3070 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3071 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3072 }
3073 return;
3074 }
3075
3076 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3077 tg3_bmcr_reset(tp);
3078 val = tr32(GRC_MISC_CFG);
3079 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080 udelay(40);
3081 return;
3082 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3083 u32 phytest;
3084 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085 u32 phy;
3086
3087 tg3_writephy(tp, MII_ADVERTISE, 0);
3088 tg3_writephy(tp, MII_BMCR,
3089 BMCR_ANENABLE | BMCR_ANRESTART);
3090
3091 tg3_writephy(tp, MII_TG3_FET_TEST,
3092 phytest | MII_TG3_FET_SHADOW_EN);
3093 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3094 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3095 tg3_writephy(tp,
3096 MII_TG3_FET_SHDW_AUXMODE4,
3097 phy);
3098 }
3099 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100 }
3101 return;
3102 } else if (do_low_power) {
3103 if (!tg3_phy_led_bug(tp))
3104 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3105 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3106
3107 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3108 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3109 MII_TG3_AUXCTL_PCTL_VREG_11V;
3110 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111 }
3112
3113 /* The PHY should not be powered down on some chips because
3114 * of bugs.
3115 */
3116 if (tg3_phy_power_bug(tp))
3117 return;
3118
3119 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3120 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3121 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3122 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3123 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3124 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125 }
3126
3127 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 }
3129
3130 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3131 static int tg3_nvram_lock(struct tg3 *tp)
3132 {
3133 if (tg3_flag(tp, NVRAM)) {
3134 int i;
3135
3136 if (tp->nvram_lock_cnt == 0) {
3137 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3138 for (i = 0; i < 8000; i++) {
3139 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3140 break;
3141 udelay(20);
3142 }
3143 if (i == 8000) {
3144 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3145 return -ENODEV;
3146 }
3147 }
3148 tp->nvram_lock_cnt++;
3149 }
3150 return 0;
3151 }
3152
3153 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3154 static void tg3_nvram_unlock(struct tg3 *tp)
3155 {
3156 if (tg3_flag(tp, NVRAM)) {
3157 if (tp->nvram_lock_cnt > 0)
3158 tp->nvram_lock_cnt--;
3159 if (tp->nvram_lock_cnt == 0)
3160 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3161 }
3162 }
3163
3164 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3165 static void tg3_enable_nvram_access(struct tg3 *tp)
3166 {
3167 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168 u32 nvaccess = tr32(NVRAM_ACCESS);
3169
3170 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3171 }
3172 }
3173
3174 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3175 static void tg3_disable_nvram_access(struct tg3 *tp)
3176 {
3177 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178 u32 nvaccess = tr32(NVRAM_ACCESS);
3179
3180 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3181 }
3182 }
3183
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3184 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3185 u32 offset, u32 *val)
3186 {
3187 u32 tmp;
3188 int i;
3189
3190 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191 return -EINVAL;
3192
3193 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3194 EEPROM_ADDR_DEVID_MASK |
3195 EEPROM_ADDR_READ);
3196 tw32(GRC_EEPROM_ADDR,
3197 tmp |
3198 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3199 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3200 EEPROM_ADDR_ADDR_MASK) |
3201 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3202
3203 for (i = 0; i < 1000; i++) {
3204 tmp = tr32(GRC_EEPROM_ADDR);
3205
3206 if (tmp & EEPROM_ADDR_COMPLETE)
3207 break;
3208 msleep(1);
3209 }
3210 if (!(tmp & EEPROM_ADDR_COMPLETE))
3211 return -EBUSY;
3212
3213 tmp = tr32(GRC_EEPROM_DATA);
3214
3215 /*
3216 * The data will always be opposite the native endian
3217 * format. Perform a blind byteswap to compensate.
3218 */
3219 *val = swab32(tmp);
3220
3221 return 0;
3222 }
3223
3224 #define NVRAM_CMD_TIMEOUT 10000
3225
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3226 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3227 {
3228 int i;
3229
3230 tw32(NVRAM_CMD, nvram_cmd);
3231 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3232 usleep_range(10, 40);
3233 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3234 udelay(10);
3235 break;
3236 }
3237 }
3238
3239 if (i == NVRAM_CMD_TIMEOUT)
3240 return -EBUSY;
3241
3242 return 0;
3243 }
3244
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3245 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3246 {
3247 if (tg3_flag(tp, NVRAM) &&
3248 tg3_flag(tp, NVRAM_BUFFERED) &&
3249 tg3_flag(tp, FLASH) &&
3250 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3251 (tp->nvram_jedecnum == JEDEC_ATMEL))
3252
3253 addr = ((addr / tp->nvram_pagesize) <<
3254 ATMEL_AT45DB0X1B_PAGE_POS) +
3255 (addr % tp->nvram_pagesize);
3256
3257 return addr;
3258 }
3259
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3260 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3261 {
3262 if (tg3_flag(tp, NVRAM) &&
3263 tg3_flag(tp, NVRAM_BUFFERED) &&
3264 tg3_flag(tp, FLASH) &&
3265 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266 (tp->nvram_jedecnum == JEDEC_ATMEL))
3267
3268 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3269 tp->nvram_pagesize) +
3270 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3271
3272 return addr;
3273 }
3274
3275 /* NOTE: Data read in from NVRAM is byteswapped according to
3276 * the byteswapping settings for all other register accesses.
3277 * tg3 devices are BE devices, so on a BE machine, the data
3278 * returned will be exactly as it is seen in NVRAM. On a LE
3279 * machine, the 32-bit value will be byteswapped.
3280 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3281 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3282 {
3283 int ret;
3284
3285 if (!tg3_flag(tp, NVRAM))
3286 return tg3_nvram_read_using_eeprom(tp, offset, val);
3287
3288 offset = tg3_nvram_phys_addr(tp, offset);
3289
3290 if (offset > NVRAM_ADDR_MSK)
3291 return -EINVAL;
3292
3293 ret = tg3_nvram_lock(tp);
3294 if (ret)
3295 return ret;
3296
3297 tg3_enable_nvram_access(tp);
3298
3299 tw32(NVRAM_ADDR, offset);
3300 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3301 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302
3303 if (ret == 0)
3304 *val = tr32(NVRAM_RDDATA);
3305
3306 tg3_disable_nvram_access(tp);
3307
3308 tg3_nvram_unlock(tp);
3309
3310 return ret;
3311 }
3312
3313 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3314 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 {
3316 u32 v;
3317 int res = tg3_nvram_read(tp, offset, &v);
3318 if (!res)
3319 *val = cpu_to_be32(v);
3320 return res;
3321 }
3322
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3323 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3324 u32 offset, u32 len, u8 *buf)
3325 {
3326 int i, j, rc = 0;
3327 u32 val;
3328
3329 for (i = 0; i < len; i += 4) {
3330 u32 addr;
3331 __be32 data;
3332
3333 addr = offset + i;
3334
3335 memcpy(&data, buf + i, 4);
3336
3337 /*
3338 * The SEEPROM interface expects the data to always be opposite
3339 * the native endian format. We accomplish this by reversing
3340 * all the operations that would have been performed on the
3341 * data from a call to tg3_nvram_read_be32().
3342 */
3343 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3344
3345 val = tr32(GRC_EEPROM_ADDR);
3346 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3347
3348 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3349 EEPROM_ADDR_READ);
3350 tw32(GRC_EEPROM_ADDR, val |
3351 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3352 (addr & EEPROM_ADDR_ADDR_MASK) |
3353 EEPROM_ADDR_START |
3354 EEPROM_ADDR_WRITE);
3355
3356 for (j = 0; j < 1000; j++) {
3357 val = tr32(GRC_EEPROM_ADDR);
3358
3359 if (val & EEPROM_ADDR_COMPLETE)
3360 break;
3361 msleep(1);
3362 }
3363 if (!(val & EEPROM_ADDR_COMPLETE)) {
3364 rc = -EBUSY;
3365 break;
3366 }
3367 }
3368
3369 return rc;
3370 }
3371
3372 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3373 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3374 u8 *buf)
3375 {
3376 int ret = 0;
3377 u32 pagesize = tp->nvram_pagesize;
3378 u32 pagemask = pagesize - 1;
3379 u32 nvram_cmd;
3380 u8 *tmp;
3381
3382 tmp = kmalloc(pagesize, GFP_KERNEL);
3383 if (tmp == NULL)
3384 return -ENOMEM;
3385
3386 while (len) {
3387 int j;
3388 u32 phy_addr, page_off, size;
3389
3390 phy_addr = offset & ~pagemask;
3391
3392 for (j = 0; j < pagesize; j += 4) {
3393 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3394 (__be32 *) (tmp + j));
3395 if (ret)
3396 break;
3397 }
3398 if (ret)
3399 break;
3400
3401 page_off = offset & pagemask;
3402 size = pagesize;
3403 if (len < size)
3404 size = len;
3405
3406 len -= size;
3407
3408 memcpy(tmp + page_off, buf, size);
3409
3410 offset = offset + (pagesize - page_off);
3411
3412 tg3_enable_nvram_access(tp);
3413
3414 /*
3415 * Before we can erase the flash page, we need
3416 * to issue a special "write enable" command.
3417 */
3418 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419
3420 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 break;
3422
3423 /* Erase the target page */
3424 tw32(NVRAM_ADDR, phy_addr);
3425
3426 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3427 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3428
3429 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 break;
3431
3432 /* Issue another write enable to start the write. */
3433 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434
3435 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 break;
3437
3438 for (j = 0; j < pagesize; j += 4) {
3439 __be32 data;
3440
3441 data = *((__be32 *) (tmp + j));
3442
3443 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3444
3445 tw32(NVRAM_ADDR, phy_addr + j);
3446
3447 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3448 NVRAM_CMD_WR;
3449
3450 if (j == 0)
3451 nvram_cmd |= NVRAM_CMD_FIRST;
3452 else if (j == (pagesize - 4))
3453 nvram_cmd |= NVRAM_CMD_LAST;
3454
3455 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3456 if (ret)
3457 break;
3458 }
3459 if (ret)
3460 break;
3461 }
3462
3463 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3464 tg3_nvram_exec_cmd(tp, nvram_cmd);
3465
3466 kfree(tmp);
3467
3468 return ret;
3469 }
3470
3471 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3472 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3473 u8 *buf)
3474 {
3475 int i, ret = 0;
3476
3477 for (i = 0; i < len; i += 4, offset += 4) {
3478 u32 page_off, phy_addr, nvram_cmd;
3479 __be32 data;
3480
3481 memcpy(&data, buf + i, 4);
3482 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3483
3484 page_off = offset % tp->nvram_pagesize;
3485
3486 phy_addr = tg3_nvram_phys_addr(tp, offset);
3487
3488 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3489
3490 if (page_off == 0 || i == 0)
3491 nvram_cmd |= NVRAM_CMD_FIRST;
3492 if (page_off == (tp->nvram_pagesize - 4))
3493 nvram_cmd |= NVRAM_CMD_LAST;
3494
3495 if (i == (len - 4))
3496 nvram_cmd |= NVRAM_CMD_LAST;
3497
3498 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3499 !tg3_flag(tp, FLASH) ||
3500 !tg3_flag(tp, 57765_PLUS))
3501 tw32(NVRAM_ADDR, phy_addr);
3502
3503 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3504 !tg3_flag(tp, 5755_PLUS) &&
3505 (tp->nvram_jedecnum == JEDEC_ST) &&
3506 (nvram_cmd & NVRAM_CMD_FIRST)) {
3507 u32 cmd;
3508
3509 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3510 ret = tg3_nvram_exec_cmd(tp, cmd);
3511 if (ret)
3512 break;
3513 }
3514 if (!tg3_flag(tp, FLASH)) {
3515 /* We always do complete word writes to eeprom. */
3516 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517 }
3518
3519 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3520 if (ret)
3521 break;
3522 }
3523 return ret;
3524 }
3525
3526 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3527 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3528 {
3529 int ret;
3530
3531 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3533 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3534 udelay(40);
3535 }
3536
3537 if (!tg3_flag(tp, NVRAM)) {
3538 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3539 } else {
3540 u32 grc_mode;
3541
3542 ret = tg3_nvram_lock(tp);
3543 if (ret)
3544 return ret;
3545
3546 tg3_enable_nvram_access(tp);
3547 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3548 tw32(NVRAM_WRITE1, 0x406);
3549
3550 grc_mode = tr32(GRC_MODE);
3551 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3552
3553 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3554 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555 buf);
3556 } else {
3557 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3558 buf);
3559 }
3560
3561 grc_mode = tr32(GRC_MODE);
3562 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3563
3564 tg3_disable_nvram_access(tp);
3565 tg3_nvram_unlock(tp);
3566 }
3567
3568 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3569 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3570 udelay(40);
3571 }
3572
3573 return ret;
3574 }
3575
3576 #define RX_CPU_SCRATCH_BASE 0x30000
3577 #define RX_CPU_SCRATCH_SIZE 0x04000
3578 #define TX_CPU_SCRATCH_BASE 0x34000
3579 #define TX_CPU_SCRATCH_SIZE 0x04000
3580
3581 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3582 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 {
3584 int i;
3585 const int iters = 10000;
3586
3587 for (i = 0; i < iters; i++) {
3588 tw32(cpu_base + CPU_STATE, 0xffffffff);
3589 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3590 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3591 break;
3592 if (pci_channel_offline(tp->pdev))
3593 return -EBUSY;
3594 }
3595
3596 return (i == iters) ? -EBUSY : 0;
3597 }
3598
3599 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3600 static int tg3_rxcpu_pause(struct tg3 *tp)
3601 {
3602 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3603
3604 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3605 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3606 udelay(10);
3607
3608 return rc;
3609 }
3610
3611 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3612 static int tg3_txcpu_pause(struct tg3 *tp)
3613 {
3614 return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 }
3616
3617 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3618 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3619 {
3620 tw32(cpu_base + CPU_STATE, 0xffffffff);
3621 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3622 }
3623
3624 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3625 static void tg3_rxcpu_resume(struct tg3 *tp)
3626 {
3627 tg3_resume_cpu(tp, RX_CPU_BASE);
3628 }
3629
3630 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3631 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3632 {
3633 int rc;
3634
3635 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3636
3637 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3638 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3639
3640 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641 return 0;
3642 }
3643 if (cpu_base == RX_CPU_BASE) {
3644 rc = tg3_rxcpu_pause(tp);
3645 } else {
3646 /*
3647 * There is only an Rx CPU for the 5750 derivative in the
3648 * BCM4785.
3649 */
3650 if (tg3_flag(tp, IS_SSB_CORE))
3651 return 0;
3652
3653 rc = tg3_txcpu_pause(tp);
3654 }
3655
3656 if (rc) {
3657 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3658 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3659 return -ENODEV;
3660 }
3661
3662 /* Clear firmware's nvram arbitration. */
3663 if (tg3_flag(tp, NVRAM))
3664 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3665 return 0;
3666 }
3667
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3668 static int tg3_fw_data_len(struct tg3 *tp,
3669 const struct tg3_firmware_hdr *fw_hdr)
3670 {
3671 int fw_len;
3672
3673 /* Non fragmented firmware have one firmware header followed by a
3674 * contiguous chunk of data to be written. The length field in that
3675 * header is not the length of data to be written but the complete
3676 * length of the bss. The data length is determined based on
3677 * tp->fw->size minus headers.
3678 *
3679 * Fragmented firmware have a main header followed by multiple
3680 * fragments. Each fragment is identical to non fragmented firmware
3681 * with a firmware header followed by a contiguous chunk of data. In
3682 * the main header, the length field is unused and set to 0xffffffff.
3683 * In each fragment header the length is the entire size of that
3684 * fragment i.e. fragment data + header length. Data length is
3685 * therefore length field in the header minus TG3_FW_HDR_LEN.
3686 */
3687 if (tp->fw_len == 0xffffffff)
3688 fw_len = be32_to_cpu(fw_hdr->len);
3689 else
3690 fw_len = tp->fw->size;
3691
3692 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 }
3694
3695 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3696 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3697 u32 cpu_scratch_base, int cpu_scratch_size,
3698 const struct tg3_firmware_hdr *fw_hdr)
3699 {
3700 int err, i;
3701 void (*write_op)(struct tg3 *, u32, u32);
3702 int total_len = tp->fw->size;
3703
3704 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3705 netdev_err(tp->dev,
3706 "%s: Trying to load TX cpu firmware which is 5705\n",
3707 __func__);
3708 return -EINVAL;
3709 }
3710
3711 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3712 write_op = tg3_write_mem;
3713 else
3714 write_op = tg3_write_indirect_reg32;
3715
3716 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3717 /* It is possible that bootcode is still loading at this point.
3718 * Get the nvram lock first before halting the cpu.
3719 */
3720 int lock_err = tg3_nvram_lock(tp);
3721 err = tg3_halt_cpu(tp, cpu_base);
3722 if (!lock_err)
3723 tg3_nvram_unlock(tp);
3724 if (err)
3725 goto out;
3726
3727 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3728 write_op(tp, cpu_scratch_base + i, 0);
3729 tw32(cpu_base + CPU_STATE, 0xffffffff);
3730 tw32(cpu_base + CPU_MODE,
3731 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3732 } else {
3733 /* Subtract additional main header for fragmented firmware and
3734 * advance to the first fragment
3735 */
3736 total_len -= TG3_FW_HDR_LEN;
3737 fw_hdr++;
3738 }
3739
3740 do {
3741 __be32 *fw_data = (__be32 *)(fw_hdr + 1);
3742 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3743 write_op(tp, cpu_scratch_base +
3744 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3745 (i * sizeof(u32)),
3746 be32_to_cpu(fw_data[i]));
3747
3748 total_len -= be32_to_cpu(fw_hdr->len);
3749
3750 /* Advance to next fragment */
3751 fw_hdr = (struct tg3_firmware_hdr *)
3752 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3753 } while (total_len > 0);
3754
3755 err = 0;
3756
3757 out:
3758 return err;
3759 }
3760
3761 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3762 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 {
3764 int i;
3765 const int iters = 5;
3766
3767 tw32(cpu_base + CPU_STATE, 0xffffffff);
3768 tw32_f(cpu_base + CPU_PC, pc);
3769
3770 for (i = 0; i < iters; i++) {
3771 if (tr32(cpu_base + CPU_PC) == pc)
3772 break;
3773 tw32(cpu_base + CPU_STATE, 0xffffffff);
3774 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3775 tw32_f(cpu_base + CPU_PC, pc);
3776 udelay(1000);
3777 }
3778
3779 return (i == iters) ? -EBUSY : 0;
3780 }
3781
3782 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3783 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3784 {
3785 const struct tg3_firmware_hdr *fw_hdr;
3786 int err;
3787
3788 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3789
3790 /* Firmware blob starts with version numbers, followed by
3791 start address and length. We are setting complete length.
3792 length = end_address_of_bss - start_address_of_text.
3793 Remainder is the blob to be loaded contiguously
3794 from start address. */
3795
3796 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3797 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3798 fw_hdr);
3799 if (err)
3800 return err;
3801
3802 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3803 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3804 fw_hdr);
3805 if (err)
3806 return err;
3807
3808 /* Now startup only the RX cpu. */
3809 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3810 be32_to_cpu(fw_hdr->base_addr));
3811 if (err) {
3812 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3813 "should be %08x\n", __func__,
3814 tr32(RX_CPU_BASE + CPU_PC),
3815 be32_to_cpu(fw_hdr->base_addr));
3816 return -ENODEV;
3817 }
3818
3819 tg3_rxcpu_resume(tp);
3820
3821 return 0;
3822 }
3823
tg3_validate_rxcpu_state(struct tg3 * tp)3824 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3825 {
3826 const int iters = 1000;
3827 int i;
3828 u32 val;
3829
3830 /* Wait for boot code to complete initialization and enter service
3831 * loop. It is then safe to download service patches
3832 */
3833 for (i = 0; i < iters; i++) {
3834 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3835 break;
3836
3837 udelay(10);
3838 }
3839
3840 if (i == iters) {
3841 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3842 return -EBUSY;
3843 }
3844
3845 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3846 if (val & 0xff) {
3847 netdev_warn(tp->dev,
3848 "Other patches exist. Not downloading EEE patch\n");
3849 return -EEXIST;
3850 }
3851
3852 return 0;
3853 }
3854
3855 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3856 static void tg3_load_57766_firmware(struct tg3 *tp)
3857 {
3858 struct tg3_firmware_hdr *fw_hdr;
3859
3860 if (!tg3_flag(tp, NO_NVRAM))
3861 return;
3862
3863 if (tg3_validate_rxcpu_state(tp))
3864 return;
3865
3866 if (!tp->fw)
3867 return;
3868
3869 /* This firmware blob has a different format than older firmware
3870 * releases as given below. The main difference is we have fragmented
3871 * data to be written to non-contiguous locations.
3872 *
3873 * In the beginning we have a firmware header identical to other
3874 * firmware which consists of version, base addr and length. The length
3875 * here is unused and set to 0xffffffff.
3876 *
3877 * This is followed by a series of firmware fragments which are
3878 * individually identical to previous firmware. i.e. they have the
3879 * firmware header and followed by data for that fragment. The version
3880 * field of the individual fragment header is unused.
3881 */
3882
3883 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3884 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885 return;
3886
3887 if (tg3_rxcpu_pause(tp))
3888 return;
3889
3890 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3891 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3892
3893 tg3_rxcpu_resume(tp);
3894 }
3895
3896 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3897 static int tg3_load_tso_firmware(struct tg3 *tp)
3898 {
3899 const struct tg3_firmware_hdr *fw_hdr;
3900 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901 int err;
3902
3903 if (!tg3_flag(tp, FW_TSO))
3904 return 0;
3905
3906 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3907
3908 /* Firmware blob starts with version numbers, followed by
3909 start address and length. We are setting complete length.
3910 length = end_address_of_bss - start_address_of_text.
3911 Remainder is the blob to be loaded contiguously
3912 from start address. */
3913
3914 cpu_scratch_size = tp->fw_len;
3915
3916 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3917 cpu_base = RX_CPU_BASE;
3918 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3919 } else {
3920 cpu_base = TX_CPU_BASE;
3921 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3922 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923 }
3924
3925 err = tg3_load_firmware_cpu(tp, cpu_base,
3926 cpu_scratch_base, cpu_scratch_size,
3927 fw_hdr);
3928 if (err)
3929 return err;
3930
3931 /* Now startup the cpu. */
3932 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3933 be32_to_cpu(fw_hdr->base_addr));
3934 if (err) {
3935 netdev_err(tp->dev,
3936 "%s fails to set CPU PC, is %08x should be %08x\n",
3937 __func__, tr32(cpu_base + CPU_PC),
3938 be32_to_cpu(fw_hdr->base_addr));
3939 return -ENODEV;
3940 }
3941
3942 tg3_resume_cpu(tp, cpu_base);
3943 return 0;
3944 }
3945
3946 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3947 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3948 int index)
3949 {
3950 u32 addr_high, addr_low;
3951
3952 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954 (mac_addr[4] << 8) | mac_addr[5]);
3955
3956 if (index < 4) {
3957 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959 } else {
3960 index -= 4;
3961 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963 }
3964 }
3965
3966 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969 u32 addr_high;
3970 int i;
3971
3972 for (i = 0; i < 4; i++) {
3973 if (i == 1 && skip_mac_1)
3974 continue;
3975 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976 }
3977
3978 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979 tg3_asic_rev(tp) == ASIC_REV_5704) {
3980 for (i = 4; i < 16; i++)
3981 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 }
3983
3984 addr_high = (tp->dev->dev_addr[0] +
3985 tp->dev->dev_addr[1] +
3986 tp->dev->dev_addr[2] +
3987 tp->dev->dev_addr[3] +
3988 tp->dev->dev_addr[4] +
3989 tp->dev->dev_addr[5]) &
3990 TX_BACKOFF_SEED_MASK;
3991 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993
tg3_enable_register_access(struct tg3 * tp)3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996 /*
3997 * Make sure register accesses (indirect or otherwise) will function
3998 * correctly.
3999 */
4000 pci_write_config_dword(tp->pdev,
4001 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003
tg3_power_up(struct tg3 * tp)4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006 int err;
4007
4008 tg3_enable_register_access(tp);
4009
4010 err = pci_set_power_state(tp->pdev, PCI_D0);
4011 if (!err) {
4012 /* Switch out of Vaux if it is a NIC */
4013 tg3_pwrsrc_switch_to_vmain(tp);
4014 } else {
4015 netdev_err(tp->dev, "Transition to D0 failed\n");
4016 }
4017
4018 return err;
4019 }
4020
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022
tg3_power_down_prepare(struct tg3 * tp)4023 static void tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025 u32 misc_host_ctrl;
4026 bool device_should_wake, do_low_power;
4027
4028 tg3_enable_register_access(tp);
4029
4030 /* Restore the CLKREQ setting. */
4031 if (tg3_flag(tp, CLKREQ_BUG))
4032 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033 PCI_EXP_LNKCTL_CLKREQ_EN);
4034
4035 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036 tw32(TG3PCI_MISC_HOST_CTRL,
4037 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038
4039 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040 tg3_flag(tp, WOL_ENABLE);
4041
4042 if (tg3_flag(tp, USE_PHYLIB)) {
4043 do_low_power = false;
4044 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4047 struct phy_device *phydev;
4048 u32 phyid;
4049
4050 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051
4052 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053
4054 tp->link_config.speed = phydev->speed;
4055 tp->link_config.duplex = phydev->duplex;
4056 tp->link_config.autoneg = phydev->autoneg;
4057 ethtool_convert_link_mode_to_legacy_u32(
4058 &tp->link_config.advertising,
4059 phydev->advertising);
4060
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4063 advertising);
4064 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4065 advertising);
4066 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4067 advertising);
4068
4069 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4070 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4071 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4072 advertising);
4073 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4074 advertising);
4075 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076 advertising);
4077 } else {
4078 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4079 advertising);
4080 }
4081 }
4082
4083 linkmode_copy(phydev->advertising, advertising);
4084 phy_start_aneg(phydev);
4085
4086 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4087 if (phyid != PHY_ID_BCMAC131) {
4088 phyid &= PHY_BCM_OUI_MASK;
4089 if (phyid == PHY_BCM_OUI_1 ||
4090 phyid == PHY_BCM_OUI_2 ||
4091 phyid == PHY_BCM_OUI_3)
4092 do_low_power = true;
4093 }
4094 }
4095 } else {
4096 do_low_power = true;
4097
4098 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4099 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4100
4101 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4102 tg3_setup_phy(tp, false);
4103 }
4104
4105 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 u32 val;
4107
4108 val = tr32(GRC_VCPU_EXT_CTRL);
4109 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4110 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4111 int i;
4112 u32 val;
4113
4114 for (i = 0; i < 200; i++) {
4115 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4116 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4117 break;
4118 msleep(1);
4119 }
4120 }
4121 if (tg3_flag(tp, WOL_CAP))
4122 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4123 WOL_DRV_STATE_SHUTDOWN |
4124 WOL_DRV_WOL |
4125 WOL_SET_MAGIC_PKT);
4126
4127 if (device_should_wake) {
4128 u32 mac_mode;
4129
4130 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4131 if (do_low_power &&
4132 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4133 tg3_phy_auxctl_write(tp,
4134 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4135 MII_TG3_AUXCTL_PCTL_WOL_EN |
4136 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4137 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4138 udelay(40);
4139 }
4140
4141 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4142 mac_mode = MAC_MODE_PORT_MODE_GMII;
4143 else if (tp->phy_flags &
4144 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4145 if (tp->link_config.active_speed == SPEED_1000)
4146 mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 else
4148 mac_mode = MAC_MODE_PORT_MODE_MII;
4149 } else
4150 mac_mode = MAC_MODE_PORT_MODE_MII;
4151
4152 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4153 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4154 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4155 SPEED_100 : SPEED_10;
4156 if (tg3_5700_link_polarity(tp, speed))
4157 mac_mode |= MAC_MODE_LINK_POLARITY;
4158 else
4159 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4160 }
4161 } else {
4162 mac_mode = MAC_MODE_PORT_MODE_TBI;
4163 }
4164
4165 if (!tg3_flag(tp, 5750_PLUS))
4166 tw32(MAC_LED_CTRL, tp->led_ctrl);
4167
4168 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4169 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4170 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4171 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4172
4173 if (tg3_flag(tp, ENABLE_APE))
4174 mac_mode |= MAC_MODE_APE_TX_EN |
4175 MAC_MODE_APE_RX_EN |
4176 MAC_MODE_TDE_ENABLE;
4177
4178 tw32_f(MAC_MODE, mac_mode);
4179 udelay(100);
4180
4181 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4182 udelay(10);
4183 }
4184
4185 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4186 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4187 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4188 u32 base_val;
4189
4190 base_val = tp->pci_clock_ctrl;
4191 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4192 CLOCK_CTRL_TXCLK_DISABLE);
4193
4194 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4195 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4196 } else if (tg3_flag(tp, 5780_CLASS) ||
4197 tg3_flag(tp, CPMU_PRESENT) ||
4198 tg3_asic_rev(tp) == ASIC_REV_5906) {
4199 /* do nothing */
4200 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4201 u32 newbits1, newbits2;
4202
4203 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4204 tg3_asic_rev(tp) == ASIC_REV_5701) {
4205 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4206 CLOCK_CTRL_TXCLK_DISABLE |
4207 CLOCK_CTRL_ALTCLK);
4208 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209 } else if (tg3_flag(tp, 5705_PLUS)) {
4210 newbits1 = CLOCK_CTRL_625_CORE;
4211 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4212 } else {
4213 newbits1 = CLOCK_CTRL_ALTCLK;
4214 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 }
4216
4217 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4218 40);
4219
4220 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4221 40);
4222
4223 if (!tg3_flag(tp, 5705_PLUS)) {
4224 u32 newbits3;
4225
4226 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4227 tg3_asic_rev(tp) == ASIC_REV_5701) {
4228 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4229 CLOCK_CTRL_TXCLK_DISABLE |
4230 CLOCK_CTRL_44MHZ_CORE);
4231 } else {
4232 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4233 }
4234
4235 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4236 tp->pci_clock_ctrl | newbits3, 40);
4237 }
4238 }
4239
4240 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4241 tg3_power_down_phy(tp, do_low_power);
4242
4243 tg3_frob_aux_power(tp, true);
4244
4245 /* Workaround for unstable PLL clock */
4246 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4247 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4248 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4249 u32 val = tr32(0x7d00);
4250
4251 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252 tw32(0x7d00, val);
4253 if (!tg3_flag(tp, ENABLE_ASF)) {
4254 int err;
4255
4256 err = tg3_nvram_lock(tp);
4257 tg3_halt_cpu(tp, RX_CPU_BASE);
4258 if (!err)
4259 tg3_nvram_unlock(tp);
4260 }
4261 }
4262
4263 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4264
4265 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4266
4267 return;
4268 }
4269
tg3_power_down(struct tg3 * tp)4270 static void tg3_power_down(struct tg3 *tp)
4271 {
4272 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4273 pci_set_power_state(tp->pdev, PCI_D3hot);
4274 }
4275
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4276 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4277 {
4278 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4279 case MII_TG3_AUX_STAT_10HALF:
4280 *speed = SPEED_10;
4281 *duplex = DUPLEX_HALF;
4282 break;
4283
4284 case MII_TG3_AUX_STAT_10FULL:
4285 *speed = SPEED_10;
4286 *duplex = DUPLEX_FULL;
4287 break;
4288
4289 case MII_TG3_AUX_STAT_100HALF:
4290 *speed = SPEED_100;
4291 *duplex = DUPLEX_HALF;
4292 break;
4293
4294 case MII_TG3_AUX_STAT_100FULL:
4295 *speed = SPEED_100;
4296 *duplex = DUPLEX_FULL;
4297 break;
4298
4299 case MII_TG3_AUX_STAT_1000HALF:
4300 *speed = SPEED_1000;
4301 *duplex = DUPLEX_HALF;
4302 break;
4303
4304 case MII_TG3_AUX_STAT_1000FULL:
4305 *speed = SPEED_1000;
4306 *duplex = DUPLEX_FULL;
4307 break;
4308
4309 default:
4310 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4311 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4312 SPEED_10;
4313 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4314 DUPLEX_HALF;
4315 break;
4316 }
4317 *speed = SPEED_UNKNOWN;
4318 *duplex = DUPLEX_UNKNOWN;
4319 break;
4320 }
4321 }
4322
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4323 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4324 {
4325 int err = 0;
4326 u32 val, new_adv;
4327
4328 new_adv = ADVERTISE_CSMA;
4329 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4330 new_adv |= mii_advertise_flowctrl(flowctrl);
4331
4332 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4333 if (err)
4334 goto done;
4335
4336 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4337 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4338
4339 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4340 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4341 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4342
4343 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4344 if (err)
4345 goto done;
4346 }
4347
4348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4349 goto done;
4350
4351 tw32(TG3_CPMU_EEE_MODE,
4352 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4353
4354 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4355 if (!err) {
4356 u32 err2;
4357
4358 if (!tp->eee.eee_enabled)
4359 val = 0;
4360 else
4361 val = ethtool_adv_to_mmd_eee_adv_t(advertise);
4362
4363 mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
4364 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4365 if (err)
4366 val = 0;
4367
4368 switch (tg3_asic_rev(tp)) {
4369 case ASIC_REV_5717:
4370 case ASIC_REV_57765:
4371 case ASIC_REV_57766:
4372 case ASIC_REV_5719:
4373 /* If we advertised any eee advertisements above... */
4374 if (val)
4375 val = MII_TG3_DSP_TAP26_ALNOKO |
4376 MII_TG3_DSP_TAP26_RMRXSTO |
4377 MII_TG3_DSP_TAP26_OPCSINPT;
4378 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4379 fallthrough;
4380 case ASIC_REV_5720:
4381 case ASIC_REV_5762:
4382 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4383 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4384 MII_TG3_DSP_CH34TP2_HIBW01);
4385 }
4386
4387 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4388 if (!err)
4389 err = err2;
4390 }
4391
4392 done:
4393 return err;
4394 }
4395
tg3_phy_copper_begin(struct tg3 * tp)4396 static void tg3_phy_copper_begin(struct tg3 *tp)
4397 {
4398 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4399 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4400 u32 adv, fc;
4401
4402 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4403 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4404 adv = ADVERTISED_10baseT_Half |
4405 ADVERTISED_10baseT_Full;
4406 if (tg3_flag(tp, WOL_SPEED_100MB))
4407 adv |= ADVERTISED_100baseT_Half |
4408 ADVERTISED_100baseT_Full;
4409 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4410 if (!(tp->phy_flags &
4411 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4412 adv |= ADVERTISED_1000baseT_Half;
4413 adv |= ADVERTISED_1000baseT_Full;
4414 }
4415
4416 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4417 } else {
4418 adv = tp->link_config.advertising;
4419 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4420 adv &= ~(ADVERTISED_1000baseT_Half |
4421 ADVERTISED_1000baseT_Full);
4422
4423 fc = tp->link_config.flowctrl;
4424 }
4425
4426 tg3_phy_autoneg_cfg(tp, adv, fc);
4427
4428 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4429 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4430 /* Normally during power down we want to autonegotiate
4431 * the lowest possible speed for WOL. However, to avoid
4432 * link flap, we leave it untouched.
4433 */
4434 return;
4435 }
4436
4437 tg3_writephy(tp, MII_BMCR,
4438 BMCR_ANENABLE | BMCR_ANRESTART);
4439 } else {
4440 int i;
4441 u32 bmcr, orig_bmcr;
4442
4443 tp->link_config.active_speed = tp->link_config.speed;
4444 tp->link_config.active_duplex = tp->link_config.duplex;
4445
4446 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4447 /* With autoneg disabled, 5715 only links up when the
4448 * advertisement register has the configured speed
4449 * enabled.
4450 */
4451 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4452 }
4453
4454 bmcr = 0;
4455 switch (tp->link_config.speed) {
4456 default:
4457 case SPEED_10:
4458 break;
4459
4460 case SPEED_100:
4461 bmcr |= BMCR_SPEED100;
4462 break;
4463
4464 case SPEED_1000:
4465 bmcr |= BMCR_SPEED1000;
4466 break;
4467 }
4468
4469 if (tp->link_config.duplex == DUPLEX_FULL)
4470 bmcr |= BMCR_FULLDPLX;
4471
4472 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4473 (bmcr != orig_bmcr)) {
4474 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4475 for (i = 0; i < 1500; i++) {
4476 u32 tmp;
4477
4478 udelay(10);
4479 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4480 tg3_readphy(tp, MII_BMSR, &tmp))
4481 continue;
4482 if (!(tmp & BMSR_LSTATUS)) {
4483 udelay(40);
4484 break;
4485 }
4486 }
4487 tg3_writephy(tp, MII_BMCR, bmcr);
4488 udelay(40);
4489 }
4490 }
4491 }
4492
tg3_phy_pull_config(struct tg3 * tp)4493 static int tg3_phy_pull_config(struct tg3 *tp)
4494 {
4495 int err;
4496 u32 val;
4497
4498 err = tg3_readphy(tp, MII_BMCR, &val);
4499 if (err)
4500 goto done;
4501
4502 if (!(val & BMCR_ANENABLE)) {
4503 tp->link_config.autoneg = AUTONEG_DISABLE;
4504 tp->link_config.advertising = 0;
4505 tg3_flag_clear(tp, PAUSE_AUTONEG);
4506
4507 err = -EIO;
4508
4509 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4510 case 0:
4511 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4512 goto done;
4513
4514 tp->link_config.speed = SPEED_10;
4515 break;
4516 case BMCR_SPEED100:
4517 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4518 goto done;
4519
4520 tp->link_config.speed = SPEED_100;
4521 break;
4522 case BMCR_SPEED1000:
4523 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4524 tp->link_config.speed = SPEED_1000;
4525 break;
4526 }
4527 fallthrough;
4528 default:
4529 goto done;
4530 }
4531
4532 if (val & BMCR_FULLDPLX)
4533 tp->link_config.duplex = DUPLEX_FULL;
4534 else
4535 tp->link_config.duplex = DUPLEX_HALF;
4536
4537 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4538
4539 err = 0;
4540 goto done;
4541 }
4542
4543 tp->link_config.autoneg = AUTONEG_ENABLE;
4544 tp->link_config.advertising = ADVERTISED_Autoneg;
4545 tg3_flag_set(tp, PAUSE_AUTONEG);
4546
4547 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4548 u32 adv;
4549
4550 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4551 if (err)
4552 goto done;
4553
4554 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4555 tp->link_config.advertising |= adv | ADVERTISED_TP;
4556
4557 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4558 } else {
4559 tp->link_config.advertising |= ADVERTISED_FIBRE;
4560 }
4561
4562 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4563 u32 adv;
4564
4565 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4566 err = tg3_readphy(tp, MII_CTRL1000, &val);
4567 if (err)
4568 goto done;
4569
4570 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4571 } else {
4572 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4573 if (err)
4574 goto done;
4575
4576 adv = tg3_decode_flowctrl_1000X(val);
4577 tp->link_config.flowctrl = adv;
4578
4579 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4580 adv = mii_adv_to_ethtool_adv_x(val);
4581 }
4582
4583 tp->link_config.advertising |= adv;
4584 }
4585
4586 done:
4587 return err;
4588 }
4589
tg3_init_5401phy_dsp(struct tg3 * tp)4590 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4591 {
4592 int err;
4593
4594 /* Turn off tap power management. */
4595 /* Set Extended packet length bit */
4596 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4597
4598 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4599 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4600 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4601 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4602 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4603
4604 udelay(40);
4605
4606 return err;
4607 }
4608
tg3_phy_eee_config_ok(struct tg3 * tp)4609 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4610 {
4611 struct ethtool_keee eee = {};
4612
4613 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4614 return true;
4615
4616 tg3_eee_pull_config(tp, &eee);
4617
4618 if (tp->eee.eee_enabled) {
4619 if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
4620 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4621 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4622 return false;
4623 } else {
4624 /* EEE is disabled but we're advertising */
4625 if (!linkmode_empty(eee.advertised))
4626 return false;
4627 }
4628
4629 return true;
4630 }
4631
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4632 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4633 {
4634 u32 advmsk, tgtadv, advertising;
4635
4636 advertising = tp->link_config.advertising;
4637 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4638
4639 advmsk = ADVERTISE_ALL;
4640 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4641 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4642 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4643 }
4644
4645 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4646 return false;
4647
4648 if ((*lcladv & advmsk) != tgtadv)
4649 return false;
4650
4651 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4652 u32 tg3_ctrl;
4653
4654 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4655
4656 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4657 return false;
4658
4659 if (tgtadv &&
4660 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4661 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4662 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4663 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4664 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4665 } else {
4666 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4667 }
4668
4669 if (tg3_ctrl != tgtadv)
4670 return false;
4671 }
4672
4673 return true;
4674 }
4675
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4676 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4677 {
4678 u32 lpeth = 0;
4679
4680 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4681 u32 val;
4682
4683 if (tg3_readphy(tp, MII_STAT1000, &val))
4684 return false;
4685
4686 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4687 }
4688
4689 if (tg3_readphy(tp, MII_LPA, rmtadv))
4690 return false;
4691
4692 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4693 tp->link_config.rmt_adv = lpeth;
4694
4695 return true;
4696 }
4697
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4698 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4699 {
4700 if (curr_link_up != tp->link_up) {
4701 if (curr_link_up) {
4702 netif_carrier_on(tp->dev);
4703 } else {
4704 netif_carrier_off(tp->dev);
4705 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4706 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4707 }
4708
4709 tg3_link_report(tp);
4710 return true;
4711 }
4712
4713 return false;
4714 }
4715
tg3_clear_mac_status(struct tg3 * tp)4716 static void tg3_clear_mac_status(struct tg3 *tp)
4717 {
4718 tw32(MAC_EVENT, 0);
4719
4720 tw32_f(MAC_STATUS,
4721 MAC_STATUS_SYNC_CHANGED |
4722 MAC_STATUS_CFG_CHANGED |
4723 MAC_STATUS_MI_COMPLETION |
4724 MAC_STATUS_LNKSTATE_CHANGED);
4725 udelay(40);
4726 }
4727
tg3_setup_eee(struct tg3 * tp)4728 static void tg3_setup_eee(struct tg3 *tp)
4729 {
4730 u32 val;
4731
4732 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4733 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4734 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4735 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4736
4737 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4738
4739 tw32_f(TG3_CPMU_EEE_CTRL,
4740 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4741
4742 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4743 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4744 TG3_CPMU_EEEMD_LPI_IN_RX |
4745 TG3_CPMU_EEEMD_EEE_ENABLE;
4746
4747 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4748 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4749
4750 if (tg3_flag(tp, ENABLE_APE))
4751 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4752
4753 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4754
4755 tw32_f(TG3_CPMU_EEE_DBTMR1,
4756 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4757 (tp->eee.tx_lpi_timer & 0xffff));
4758
4759 tw32_f(TG3_CPMU_EEE_DBTMR2,
4760 TG3_CPMU_DBTMR2_APE_TX_2047US |
4761 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4762 }
4763
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4764 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4765 {
4766 bool current_link_up;
4767 u32 bmsr, val;
4768 u32 lcl_adv, rmt_adv;
4769 u32 current_speed;
4770 u8 current_duplex;
4771 int i, err;
4772
4773 tg3_clear_mac_status(tp);
4774
4775 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4776 tw32_f(MAC_MI_MODE,
4777 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4778 udelay(80);
4779 }
4780
4781 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4782
4783 /* Some third-party PHYs need to be reset on link going
4784 * down.
4785 */
4786 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4787 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4788 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4789 tp->link_up) {
4790 tg3_readphy(tp, MII_BMSR, &bmsr);
4791 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4792 !(bmsr & BMSR_LSTATUS))
4793 force_reset = true;
4794 }
4795 if (force_reset)
4796 tg3_phy_reset(tp);
4797
4798 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4799 tg3_readphy(tp, MII_BMSR, &bmsr);
4800 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4801 !tg3_flag(tp, INIT_COMPLETE))
4802 bmsr = 0;
4803
4804 if (!(bmsr & BMSR_LSTATUS)) {
4805 err = tg3_init_5401phy_dsp(tp);
4806 if (err)
4807 return err;
4808
4809 tg3_readphy(tp, MII_BMSR, &bmsr);
4810 for (i = 0; i < 1000; i++) {
4811 udelay(10);
4812 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4813 (bmsr & BMSR_LSTATUS)) {
4814 udelay(40);
4815 break;
4816 }
4817 }
4818
4819 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4820 TG3_PHY_REV_BCM5401_B0 &&
4821 !(bmsr & BMSR_LSTATUS) &&
4822 tp->link_config.active_speed == SPEED_1000) {
4823 err = tg3_phy_reset(tp);
4824 if (!err)
4825 err = tg3_init_5401phy_dsp(tp);
4826 if (err)
4827 return err;
4828 }
4829 }
4830 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4831 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4832 /* 5701 {A0,B0} CRC bug workaround */
4833 tg3_writephy(tp, 0x15, 0x0a75);
4834 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4835 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4836 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4837 }
4838
4839 /* Clear pending interrupts... */
4840 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4841 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4842
4843 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4844 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4845 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4846 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4847
4848 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4849 tg3_asic_rev(tp) == ASIC_REV_5701) {
4850 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4851 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4852 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4853 else
4854 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4855 }
4856
4857 current_link_up = false;
4858 current_speed = SPEED_UNKNOWN;
4859 current_duplex = DUPLEX_UNKNOWN;
4860 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4861 tp->link_config.rmt_adv = 0;
4862
4863 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4864 err = tg3_phy_auxctl_read(tp,
4865 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4866 &val);
4867 if (!err && !(val & (1 << 10))) {
4868 tg3_phy_auxctl_write(tp,
4869 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4870 val | (1 << 10));
4871 goto relink;
4872 }
4873 }
4874
4875 bmsr = 0;
4876 for (i = 0; i < 100; i++) {
4877 tg3_readphy(tp, MII_BMSR, &bmsr);
4878 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4879 (bmsr & BMSR_LSTATUS))
4880 break;
4881 udelay(40);
4882 }
4883
4884 if (bmsr & BMSR_LSTATUS) {
4885 u32 aux_stat, bmcr;
4886
4887 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4888 for (i = 0; i < 2000; i++) {
4889 udelay(10);
4890 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4891 aux_stat)
4892 break;
4893 }
4894
4895 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4896 ¤t_speed,
4897 ¤t_duplex);
4898
4899 bmcr = 0;
4900 for (i = 0; i < 200; i++) {
4901 tg3_readphy(tp, MII_BMCR, &bmcr);
4902 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4903 continue;
4904 if (bmcr && bmcr != 0x7fff)
4905 break;
4906 udelay(10);
4907 }
4908
4909 lcl_adv = 0;
4910 rmt_adv = 0;
4911
4912 tp->link_config.active_speed = current_speed;
4913 tp->link_config.active_duplex = current_duplex;
4914
4915 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4917
4918 if ((bmcr & BMCR_ANENABLE) &&
4919 eee_config_ok &&
4920 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4921 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4922 current_link_up = true;
4923
4924 /* EEE settings changes take effect only after a phy
4925 * reset. If we have skipped a reset due to Link Flap
4926 * Avoidance being enabled, do it now.
4927 */
4928 if (!eee_config_ok &&
4929 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4930 !force_reset) {
4931 tg3_setup_eee(tp);
4932 tg3_phy_reset(tp);
4933 }
4934 } else {
4935 if (!(bmcr & BMCR_ANENABLE) &&
4936 tp->link_config.speed == current_speed &&
4937 tp->link_config.duplex == current_duplex) {
4938 current_link_up = true;
4939 }
4940 }
4941
4942 if (current_link_up &&
4943 tp->link_config.active_duplex == DUPLEX_FULL) {
4944 u32 reg, bit;
4945
4946 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4947 reg = MII_TG3_FET_GEN_STAT;
4948 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4949 } else {
4950 reg = MII_TG3_EXT_STAT;
4951 bit = MII_TG3_EXT_STAT_MDIX;
4952 }
4953
4954 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4955 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4956
4957 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4958 }
4959 }
4960
4961 relink:
4962 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4963 tg3_phy_copper_begin(tp);
4964
4965 if (tg3_flag(tp, ROBOSWITCH)) {
4966 current_link_up = true;
4967 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4968 current_speed = SPEED_1000;
4969 current_duplex = DUPLEX_FULL;
4970 tp->link_config.active_speed = current_speed;
4971 tp->link_config.active_duplex = current_duplex;
4972 }
4973
4974 tg3_readphy(tp, MII_BMSR, &bmsr);
4975 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4976 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4977 current_link_up = true;
4978 }
4979
4980 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4981 if (current_link_up) {
4982 if (tp->link_config.active_speed == SPEED_100 ||
4983 tp->link_config.active_speed == SPEED_10)
4984 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4985 else
4986 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4987 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4988 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4989 else
4990 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4991
4992 /* In order for the 5750 core in BCM4785 chip to work properly
4993 * in RGMII mode, the Led Control Register must be set up.
4994 */
4995 if (tg3_flag(tp, RGMII_MODE)) {
4996 u32 led_ctrl = tr32(MAC_LED_CTRL);
4997 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4998
4999 if (tp->link_config.active_speed == SPEED_10)
5000 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5001 else if (tp->link_config.active_speed == SPEED_100)
5002 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5003 LED_CTRL_100MBPS_ON);
5004 else if (tp->link_config.active_speed == SPEED_1000)
5005 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5006 LED_CTRL_1000MBPS_ON);
5007
5008 tw32(MAC_LED_CTRL, led_ctrl);
5009 udelay(40);
5010 }
5011
5012 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5013 if (tp->link_config.active_duplex == DUPLEX_HALF)
5014 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5015
5016 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5017 if (current_link_up &&
5018 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5019 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5020 else
5021 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5022 }
5023
5024 /* ??? Without this setting Netgear GA302T PHY does not
5025 * ??? send/receive packets...
5026 */
5027 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5028 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5029 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5030 tw32_f(MAC_MI_MODE, tp->mi_mode);
5031 udelay(80);
5032 }
5033
5034 tw32_f(MAC_MODE, tp->mac_mode);
5035 udelay(40);
5036
5037 tg3_phy_eee_adjust(tp, current_link_up);
5038
5039 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5040 /* Polled via timer. */
5041 tw32_f(MAC_EVENT, 0);
5042 } else {
5043 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5044 }
5045 udelay(40);
5046
5047 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5048 current_link_up &&
5049 tp->link_config.active_speed == SPEED_1000 &&
5050 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5051 udelay(120);
5052 tw32_f(MAC_STATUS,
5053 (MAC_STATUS_SYNC_CHANGED |
5054 MAC_STATUS_CFG_CHANGED));
5055 udelay(40);
5056 tg3_write_mem(tp,
5057 NIC_SRAM_FIRMWARE_MBOX,
5058 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5059 }
5060
5061 /* Prevent send BD corruption. */
5062 if (tg3_flag(tp, CLKREQ_BUG)) {
5063 if (tp->link_config.active_speed == SPEED_100 ||
5064 tp->link_config.active_speed == SPEED_10)
5065 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5066 PCI_EXP_LNKCTL_CLKREQ_EN);
5067 else
5068 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5069 PCI_EXP_LNKCTL_CLKREQ_EN);
5070 }
5071
5072 tg3_test_and_report_link_chg(tp, current_link_up);
5073
5074 return 0;
5075 }
5076
5077 struct tg3_fiber_aneginfo {
5078 int state;
5079 #define ANEG_STATE_UNKNOWN 0
5080 #define ANEG_STATE_AN_ENABLE 1
5081 #define ANEG_STATE_RESTART_INIT 2
5082 #define ANEG_STATE_RESTART 3
5083 #define ANEG_STATE_DISABLE_LINK_OK 4
5084 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5085 #define ANEG_STATE_ABILITY_DETECT 6
5086 #define ANEG_STATE_ACK_DETECT_INIT 7
5087 #define ANEG_STATE_ACK_DETECT 8
5088 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5089 #define ANEG_STATE_COMPLETE_ACK 10
5090 #define ANEG_STATE_IDLE_DETECT_INIT 11
5091 #define ANEG_STATE_IDLE_DETECT 12
5092 #define ANEG_STATE_LINK_OK 13
5093 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5094 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5095
5096 u32 flags;
5097 #define MR_AN_ENABLE 0x00000001
5098 #define MR_RESTART_AN 0x00000002
5099 #define MR_AN_COMPLETE 0x00000004
5100 #define MR_PAGE_RX 0x00000008
5101 #define MR_NP_LOADED 0x00000010
5102 #define MR_TOGGLE_TX 0x00000020
5103 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5104 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5105 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5106 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5107 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5108 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5109 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5110 #define MR_TOGGLE_RX 0x00002000
5111 #define MR_NP_RX 0x00004000
5112
5113 #define MR_LINK_OK 0x80000000
5114
5115 unsigned long link_time, cur_time;
5116
5117 u32 ability_match_cfg;
5118 int ability_match_count;
5119
5120 char ability_match, idle_match, ack_match;
5121
5122 u32 txconfig, rxconfig;
5123 #define ANEG_CFG_NP 0x00000080
5124 #define ANEG_CFG_ACK 0x00000040
5125 #define ANEG_CFG_RF2 0x00000020
5126 #define ANEG_CFG_RF1 0x00000010
5127 #define ANEG_CFG_PS2 0x00000001
5128 #define ANEG_CFG_PS1 0x00008000
5129 #define ANEG_CFG_HD 0x00004000
5130 #define ANEG_CFG_FD 0x00002000
5131 #define ANEG_CFG_INVAL 0x00001f06
5132
5133 };
5134 #define ANEG_OK 0
5135 #define ANEG_DONE 1
5136 #define ANEG_TIMER_ENAB 2
5137 #define ANEG_FAILED -1
5138
5139 #define ANEG_STATE_SETTLE_TIME 10000
5140
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5141 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5142 struct tg3_fiber_aneginfo *ap)
5143 {
5144 u16 flowctrl;
5145 unsigned long delta;
5146 u32 rx_cfg_reg;
5147 int ret;
5148
5149 if (ap->state == ANEG_STATE_UNKNOWN) {
5150 ap->rxconfig = 0;
5151 ap->link_time = 0;
5152 ap->cur_time = 0;
5153 ap->ability_match_cfg = 0;
5154 ap->ability_match_count = 0;
5155 ap->ability_match = 0;
5156 ap->idle_match = 0;
5157 ap->ack_match = 0;
5158 }
5159 ap->cur_time++;
5160
5161 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5162 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5163
5164 if (rx_cfg_reg != ap->ability_match_cfg) {
5165 ap->ability_match_cfg = rx_cfg_reg;
5166 ap->ability_match = 0;
5167 ap->ability_match_count = 0;
5168 } else {
5169 if (++ap->ability_match_count > 1) {
5170 ap->ability_match = 1;
5171 ap->ability_match_cfg = rx_cfg_reg;
5172 }
5173 }
5174 if (rx_cfg_reg & ANEG_CFG_ACK)
5175 ap->ack_match = 1;
5176 else
5177 ap->ack_match = 0;
5178
5179 ap->idle_match = 0;
5180 } else {
5181 ap->idle_match = 1;
5182 ap->ability_match_cfg = 0;
5183 ap->ability_match_count = 0;
5184 ap->ability_match = 0;
5185 ap->ack_match = 0;
5186
5187 rx_cfg_reg = 0;
5188 }
5189
5190 ap->rxconfig = rx_cfg_reg;
5191 ret = ANEG_OK;
5192
5193 switch (ap->state) {
5194 case ANEG_STATE_UNKNOWN:
5195 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5196 ap->state = ANEG_STATE_AN_ENABLE;
5197
5198 fallthrough;
5199 case ANEG_STATE_AN_ENABLE:
5200 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5201 if (ap->flags & MR_AN_ENABLE) {
5202 ap->link_time = 0;
5203 ap->cur_time = 0;
5204 ap->ability_match_cfg = 0;
5205 ap->ability_match_count = 0;
5206 ap->ability_match = 0;
5207 ap->idle_match = 0;
5208 ap->ack_match = 0;
5209
5210 ap->state = ANEG_STATE_RESTART_INIT;
5211 } else {
5212 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5213 }
5214 break;
5215
5216 case ANEG_STATE_RESTART_INIT:
5217 ap->link_time = ap->cur_time;
5218 ap->flags &= ~(MR_NP_LOADED);
5219 ap->txconfig = 0;
5220 tw32(MAC_TX_AUTO_NEG, 0);
5221 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5222 tw32_f(MAC_MODE, tp->mac_mode);
5223 udelay(40);
5224
5225 ret = ANEG_TIMER_ENAB;
5226 ap->state = ANEG_STATE_RESTART;
5227
5228 fallthrough;
5229 case ANEG_STATE_RESTART:
5230 delta = ap->cur_time - ap->link_time;
5231 if (delta > ANEG_STATE_SETTLE_TIME)
5232 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5233 else
5234 ret = ANEG_TIMER_ENAB;
5235 break;
5236
5237 case ANEG_STATE_DISABLE_LINK_OK:
5238 ret = ANEG_DONE;
5239 break;
5240
5241 case ANEG_STATE_ABILITY_DETECT_INIT:
5242 ap->flags &= ~(MR_TOGGLE_TX);
5243 ap->txconfig = ANEG_CFG_FD;
5244 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5245 if (flowctrl & ADVERTISE_1000XPAUSE)
5246 ap->txconfig |= ANEG_CFG_PS1;
5247 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5248 ap->txconfig |= ANEG_CFG_PS2;
5249 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5250 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5251 tw32_f(MAC_MODE, tp->mac_mode);
5252 udelay(40);
5253
5254 ap->state = ANEG_STATE_ABILITY_DETECT;
5255 break;
5256
5257 case ANEG_STATE_ABILITY_DETECT:
5258 if (ap->ability_match != 0 && ap->rxconfig != 0)
5259 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5260 break;
5261
5262 case ANEG_STATE_ACK_DETECT_INIT:
5263 ap->txconfig |= ANEG_CFG_ACK;
5264 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5265 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5266 tw32_f(MAC_MODE, tp->mac_mode);
5267 udelay(40);
5268
5269 ap->state = ANEG_STATE_ACK_DETECT;
5270
5271 fallthrough;
5272 case ANEG_STATE_ACK_DETECT:
5273 if (ap->ack_match != 0) {
5274 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5275 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5276 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5277 } else {
5278 ap->state = ANEG_STATE_AN_ENABLE;
5279 }
5280 } else if (ap->ability_match != 0 &&
5281 ap->rxconfig == 0) {
5282 ap->state = ANEG_STATE_AN_ENABLE;
5283 }
5284 break;
5285
5286 case ANEG_STATE_COMPLETE_ACK_INIT:
5287 if (ap->rxconfig & ANEG_CFG_INVAL) {
5288 ret = ANEG_FAILED;
5289 break;
5290 }
5291 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5292 MR_LP_ADV_HALF_DUPLEX |
5293 MR_LP_ADV_SYM_PAUSE |
5294 MR_LP_ADV_ASYM_PAUSE |
5295 MR_LP_ADV_REMOTE_FAULT1 |
5296 MR_LP_ADV_REMOTE_FAULT2 |
5297 MR_LP_ADV_NEXT_PAGE |
5298 MR_TOGGLE_RX |
5299 MR_NP_RX);
5300 if (ap->rxconfig & ANEG_CFG_FD)
5301 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5302 if (ap->rxconfig & ANEG_CFG_HD)
5303 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5304 if (ap->rxconfig & ANEG_CFG_PS1)
5305 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5306 if (ap->rxconfig & ANEG_CFG_PS2)
5307 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5308 if (ap->rxconfig & ANEG_CFG_RF1)
5309 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5310 if (ap->rxconfig & ANEG_CFG_RF2)
5311 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5312 if (ap->rxconfig & ANEG_CFG_NP)
5313 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5314
5315 ap->link_time = ap->cur_time;
5316
5317 ap->flags ^= (MR_TOGGLE_TX);
5318 if (ap->rxconfig & 0x0008)
5319 ap->flags |= MR_TOGGLE_RX;
5320 if (ap->rxconfig & ANEG_CFG_NP)
5321 ap->flags |= MR_NP_RX;
5322 ap->flags |= MR_PAGE_RX;
5323
5324 ap->state = ANEG_STATE_COMPLETE_ACK;
5325 ret = ANEG_TIMER_ENAB;
5326 break;
5327
5328 case ANEG_STATE_COMPLETE_ACK:
5329 if (ap->ability_match != 0 &&
5330 ap->rxconfig == 0) {
5331 ap->state = ANEG_STATE_AN_ENABLE;
5332 break;
5333 }
5334 delta = ap->cur_time - ap->link_time;
5335 if (delta > ANEG_STATE_SETTLE_TIME) {
5336 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5337 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5338 } else {
5339 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5340 !(ap->flags & MR_NP_RX)) {
5341 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5342 } else {
5343 ret = ANEG_FAILED;
5344 }
5345 }
5346 }
5347 break;
5348
5349 case ANEG_STATE_IDLE_DETECT_INIT:
5350 ap->link_time = ap->cur_time;
5351 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5352 tw32_f(MAC_MODE, tp->mac_mode);
5353 udelay(40);
5354
5355 ap->state = ANEG_STATE_IDLE_DETECT;
5356 ret = ANEG_TIMER_ENAB;
5357 break;
5358
5359 case ANEG_STATE_IDLE_DETECT:
5360 if (ap->ability_match != 0 &&
5361 ap->rxconfig == 0) {
5362 ap->state = ANEG_STATE_AN_ENABLE;
5363 break;
5364 }
5365 delta = ap->cur_time - ap->link_time;
5366 if (delta > ANEG_STATE_SETTLE_TIME) {
5367 /* XXX another gem from the Broadcom driver :( */
5368 ap->state = ANEG_STATE_LINK_OK;
5369 }
5370 break;
5371
5372 case ANEG_STATE_LINK_OK:
5373 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5374 ret = ANEG_DONE;
5375 break;
5376
5377 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5378 /* ??? unimplemented */
5379 break;
5380
5381 case ANEG_STATE_NEXT_PAGE_WAIT:
5382 /* ??? unimplemented */
5383 break;
5384
5385 default:
5386 ret = ANEG_FAILED;
5387 break;
5388 }
5389
5390 return ret;
5391 }
5392
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5393 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5394 {
5395 int res = 0;
5396 struct tg3_fiber_aneginfo aninfo;
5397 int status = ANEG_FAILED;
5398 unsigned int tick;
5399 u32 tmp;
5400
5401 tw32_f(MAC_TX_AUTO_NEG, 0);
5402
5403 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5404 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5405 udelay(40);
5406
5407 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5408 udelay(40);
5409
5410 memset(&aninfo, 0, sizeof(aninfo));
5411 aninfo.flags |= MR_AN_ENABLE;
5412 aninfo.state = ANEG_STATE_UNKNOWN;
5413 aninfo.cur_time = 0;
5414 tick = 0;
5415 while (++tick < 195000) {
5416 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5417 if (status == ANEG_DONE || status == ANEG_FAILED)
5418 break;
5419
5420 udelay(1);
5421 }
5422
5423 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5424 tw32_f(MAC_MODE, tp->mac_mode);
5425 udelay(40);
5426
5427 *txflags = aninfo.txconfig;
5428 *rxflags = aninfo.flags;
5429
5430 if (status == ANEG_DONE &&
5431 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5432 MR_LP_ADV_FULL_DUPLEX)))
5433 res = 1;
5434
5435 return res;
5436 }
5437
tg3_init_bcm8002(struct tg3 * tp)5438 static void tg3_init_bcm8002(struct tg3 *tp)
5439 {
5440 u32 mac_status = tr32(MAC_STATUS);
5441 int i;
5442
5443 /* Reset when initting first time or we have a link. */
5444 if (tg3_flag(tp, INIT_COMPLETE) &&
5445 !(mac_status & MAC_STATUS_PCS_SYNCED))
5446 return;
5447
5448 /* Set PLL lock range. */
5449 tg3_writephy(tp, 0x16, 0x8007);
5450
5451 /* SW reset */
5452 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5453
5454 /* Wait for reset to complete. */
5455 /* XXX schedule_timeout() ... */
5456 for (i = 0; i < 500; i++)
5457 udelay(10);
5458
5459 /* Config mode; select PMA/Ch 1 regs. */
5460 tg3_writephy(tp, 0x10, 0x8411);
5461
5462 /* Enable auto-lock and comdet, select txclk for tx. */
5463 tg3_writephy(tp, 0x11, 0x0a10);
5464
5465 tg3_writephy(tp, 0x18, 0x00a0);
5466 tg3_writephy(tp, 0x16, 0x41ff);
5467
5468 /* Assert and deassert POR. */
5469 tg3_writephy(tp, 0x13, 0x0400);
5470 udelay(40);
5471 tg3_writephy(tp, 0x13, 0x0000);
5472
5473 tg3_writephy(tp, 0x11, 0x0a50);
5474 udelay(40);
5475 tg3_writephy(tp, 0x11, 0x0a10);
5476
5477 /* Wait for signal to stabilize */
5478 /* XXX schedule_timeout() ... */
5479 for (i = 0; i < 15000; i++)
5480 udelay(10);
5481
5482 /* Deselect the channel register so we can read the PHYID
5483 * later.
5484 */
5485 tg3_writephy(tp, 0x10, 0x8011);
5486 }
5487
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5488 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5489 {
5490 u16 flowctrl;
5491 bool current_link_up;
5492 u32 sg_dig_ctrl, sg_dig_status;
5493 u32 serdes_cfg, expected_sg_dig_ctrl;
5494 int workaround, port_a;
5495
5496 serdes_cfg = 0;
5497 workaround = 0;
5498 port_a = 1;
5499 current_link_up = false;
5500
5501 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5502 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5503 workaround = 1;
5504 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5505 port_a = 0;
5506
5507 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5508 /* preserve bits 20-23 for voltage regulator */
5509 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5510 }
5511
5512 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5513
5514 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5515 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5516 if (workaround) {
5517 u32 val = serdes_cfg;
5518
5519 if (port_a)
5520 val |= 0xc010000;
5521 else
5522 val |= 0x4010000;
5523 tw32_f(MAC_SERDES_CFG, val);
5524 }
5525
5526 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5527 }
5528 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5529 tg3_setup_flow_control(tp, 0, 0);
5530 current_link_up = true;
5531 }
5532 goto out;
5533 }
5534
5535 /* Want auto-negotiation. */
5536 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5537
5538 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5539 if (flowctrl & ADVERTISE_1000XPAUSE)
5540 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5541 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5542 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5543
5544 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5545 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5546 tp->serdes_counter &&
5547 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5548 MAC_STATUS_RCVD_CFG)) ==
5549 MAC_STATUS_PCS_SYNCED)) {
5550 tp->serdes_counter--;
5551 current_link_up = true;
5552 goto out;
5553 }
5554 restart_autoneg:
5555 if (workaround)
5556 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5557 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5558 udelay(5);
5559 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5560
5561 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5562 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5563 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5564 MAC_STATUS_SIGNAL_DET)) {
5565 sg_dig_status = tr32(SG_DIG_STATUS);
5566 mac_status = tr32(MAC_STATUS);
5567
5568 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5569 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5570 u32 local_adv = 0, remote_adv = 0;
5571
5572 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5573 local_adv |= ADVERTISE_1000XPAUSE;
5574 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5575 local_adv |= ADVERTISE_1000XPSE_ASYM;
5576
5577 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5578 remote_adv |= LPA_1000XPAUSE;
5579 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5580 remote_adv |= LPA_1000XPAUSE_ASYM;
5581
5582 tp->link_config.rmt_adv =
5583 mii_adv_to_ethtool_adv_x(remote_adv);
5584
5585 tg3_setup_flow_control(tp, local_adv, remote_adv);
5586 current_link_up = true;
5587 tp->serdes_counter = 0;
5588 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5589 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5590 if (tp->serdes_counter)
5591 tp->serdes_counter--;
5592 else {
5593 if (workaround) {
5594 u32 val = serdes_cfg;
5595
5596 if (port_a)
5597 val |= 0xc010000;
5598 else
5599 val |= 0x4010000;
5600
5601 tw32_f(MAC_SERDES_CFG, val);
5602 }
5603
5604 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5605 udelay(40);
5606
5607 /* Link parallel detection - link is up */
5608 /* only if we have PCS_SYNC and not */
5609 /* receiving config code words */
5610 mac_status = tr32(MAC_STATUS);
5611 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5612 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5613 tg3_setup_flow_control(tp, 0, 0);
5614 current_link_up = true;
5615 tp->phy_flags |=
5616 TG3_PHYFLG_PARALLEL_DETECT;
5617 tp->serdes_counter =
5618 SERDES_PARALLEL_DET_TIMEOUT;
5619 } else
5620 goto restart_autoneg;
5621 }
5622 }
5623 } else {
5624 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5625 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5626 }
5627
5628 out:
5629 return current_link_up;
5630 }
5631
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5632 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5633 {
5634 bool current_link_up = false;
5635
5636 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5637 goto out;
5638
5639 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5640 u32 txflags, rxflags;
5641 int i;
5642
5643 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5644 u32 local_adv = 0, remote_adv = 0;
5645
5646 if (txflags & ANEG_CFG_PS1)
5647 local_adv |= ADVERTISE_1000XPAUSE;
5648 if (txflags & ANEG_CFG_PS2)
5649 local_adv |= ADVERTISE_1000XPSE_ASYM;
5650
5651 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5652 remote_adv |= LPA_1000XPAUSE;
5653 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5654 remote_adv |= LPA_1000XPAUSE_ASYM;
5655
5656 tp->link_config.rmt_adv =
5657 mii_adv_to_ethtool_adv_x(remote_adv);
5658
5659 tg3_setup_flow_control(tp, local_adv, remote_adv);
5660
5661 current_link_up = true;
5662 }
5663 for (i = 0; i < 30; i++) {
5664 udelay(20);
5665 tw32_f(MAC_STATUS,
5666 (MAC_STATUS_SYNC_CHANGED |
5667 MAC_STATUS_CFG_CHANGED));
5668 udelay(40);
5669 if ((tr32(MAC_STATUS) &
5670 (MAC_STATUS_SYNC_CHANGED |
5671 MAC_STATUS_CFG_CHANGED)) == 0)
5672 break;
5673 }
5674
5675 mac_status = tr32(MAC_STATUS);
5676 if (!current_link_up &&
5677 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5678 !(mac_status & MAC_STATUS_RCVD_CFG))
5679 current_link_up = true;
5680 } else {
5681 tg3_setup_flow_control(tp, 0, 0);
5682
5683 /* Forcing 1000FD link up. */
5684 current_link_up = true;
5685
5686 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5687 udelay(40);
5688
5689 tw32_f(MAC_MODE, tp->mac_mode);
5690 udelay(40);
5691 }
5692
5693 out:
5694 return current_link_up;
5695 }
5696
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5697 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5698 {
5699 u32 orig_pause_cfg;
5700 u32 orig_active_speed;
5701 u8 orig_active_duplex;
5702 u32 mac_status;
5703 bool current_link_up;
5704 int i;
5705
5706 orig_pause_cfg = tp->link_config.active_flowctrl;
5707 orig_active_speed = tp->link_config.active_speed;
5708 orig_active_duplex = tp->link_config.active_duplex;
5709
5710 if (!tg3_flag(tp, HW_AUTONEG) &&
5711 tp->link_up &&
5712 tg3_flag(tp, INIT_COMPLETE)) {
5713 mac_status = tr32(MAC_STATUS);
5714 mac_status &= (MAC_STATUS_PCS_SYNCED |
5715 MAC_STATUS_SIGNAL_DET |
5716 MAC_STATUS_CFG_CHANGED |
5717 MAC_STATUS_RCVD_CFG);
5718 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5719 MAC_STATUS_SIGNAL_DET)) {
5720 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5721 MAC_STATUS_CFG_CHANGED));
5722 return 0;
5723 }
5724 }
5725
5726 tw32_f(MAC_TX_AUTO_NEG, 0);
5727
5728 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5729 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5730 tw32_f(MAC_MODE, tp->mac_mode);
5731 udelay(40);
5732
5733 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5734 tg3_init_bcm8002(tp);
5735
5736 /* Enable link change event even when serdes polling. */
5737 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5738 udelay(40);
5739
5740 tp->link_config.rmt_adv = 0;
5741 mac_status = tr32(MAC_STATUS);
5742
5743 if (tg3_flag(tp, HW_AUTONEG))
5744 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5745 else
5746 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5747
5748 tp->napi[0].hw_status->status =
5749 (SD_STATUS_UPDATED |
5750 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5751
5752 for (i = 0; i < 100; i++) {
5753 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5754 MAC_STATUS_CFG_CHANGED));
5755 udelay(5);
5756 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5757 MAC_STATUS_CFG_CHANGED |
5758 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5759 break;
5760 }
5761
5762 mac_status = tr32(MAC_STATUS);
5763 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5764 current_link_up = false;
5765 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5766 tp->serdes_counter == 0) {
5767 tw32_f(MAC_MODE, (tp->mac_mode |
5768 MAC_MODE_SEND_CONFIGS));
5769 udelay(1);
5770 tw32_f(MAC_MODE, tp->mac_mode);
5771 }
5772 }
5773
5774 if (current_link_up) {
5775 tp->link_config.active_speed = SPEED_1000;
5776 tp->link_config.active_duplex = DUPLEX_FULL;
5777 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5778 LED_CTRL_LNKLED_OVERRIDE |
5779 LED_CTRL_1000MBPS_ON));
5780 } else {
5781 tp->link_config.active_speed = SPEED_UNKNOWN;
5782 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5783 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5784 LED_CTRL_LNKLED_OVERRIDE |
5785 LED_CTRL_TRAFFIC_OVERRIDE));
5786 }
5787
5788 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5789 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5790 if (orig_pause_cfg != now_pause_cfg ||
5791 orig_active_speed != tp->link_config.active_speed ||
5792 orig_active_duplex != tp->link_config.active_duplex)
5793 tg3_link_report(tp);
5794 }
5795
5796 return 0;
5797 }
5798
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5799 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5800 {
5801 int err = 0;
5802 u32 bmsr, bmcr;
5803 u32 current_speed = SPEED_UNKNOWN;
5804 u8 current_duplex = DUPLEX_UNKNOWN;
5805 bool current_link_up = false;
5806 u32 local_adv = 0, remote_adv = 0, sgsr;
5807
5808 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5809 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5810 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5811 (sgsr & SERDES_TG3_SGMII_MODE)) {
5812
5813 if (force_reset)
5814 tg3_phy_reset(tp);
5815
5816 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5817
5818 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5819 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5820 } else {
5821 current_link_up = true;
5822 if (sgsr & SERDES_TG3_SPEED_1000) {
5823 current_speed = SPEED_1000;
5824 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5825 } else if (sgsr & SERDES_TG3_SPEED_100) {
5826 current_speed = SPEED_100;
5827 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5828 } else {
5829 current_speed = SPEED_10;
5830 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5831 }
5832
5833 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5834 current_duplex = DUPLEX_FULL;
5835 else
5836 current_duplex = DUPLEX_HALF;
5837 }
5838
5839 tw32_f(MAC_MODE, tp->mac_mode);
5840 udelay(40);
5841
5842 tg3_clear_mac_status(tp);
5843
5844 goto fiber_setup_done;
5845 }
5846
5847 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5848 tw32_f(MAC_MODE, tp->mac_mode);
5849 udelay(40);
5850
5851 tg3_clear_mac_status(tp);
5852
5853 if (force_reset)
5854 tg3_phy_reset(tp);
5855
5856 tp->link_config.rmt_adv = 0;
5857
5858 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5859 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5860 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5861 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5862 bmsr |= BMSR_LSTATUS;
5863 else
5864 bmsr &= ~BMSR_LSTATUS;
5865 }
5866
5867 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5868
5869 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5870 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5871 /* do nothing, just check for link up at the end */
5872 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5873 u32 adv, newadv;
5874
5875 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5876 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5877 ADVERTISE_1000XPAUSE |
5878 ADVERTISE_1000XPSE_ASYM |
5879 ADVERTISE_SLCT);
5880
5881 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5882 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5883
5884 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5885 tg3_writephy(tp, MII_ADVERTISE, newadv);
5886 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5887 tg3_writephy(tp, MII_BMCR, bmcr);
5888
5889 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5890 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5891 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5892
5893 return err;
5894 }
5895 } else {
5896 u32 new_bmcr;
5897
5898 bmcr &= ~BMCR_SPEED1000;
5899 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5900
5901 if (tp->link_config.duplex == DUPLEX_FULL)
5902 new_bmcr |= BMCR_FULLDPLX;
5903
5904 if (new_bmcr != bmcr) {
5905 /* BMCR_SPEED1000 is a reserved bit that needs
5906 * to be set on write.
5907 */
5908 new_bmcr |= BMCR_SPEED1000;
5909
5910 /* Force a linkdown */
5911 if (tp->link_up) {
5912 u32 adv;
5913
5914 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5915 adv &= ~(ADVERTISE_1000XFULL |
5916 ADVERTISE_1000XHALF |
5917 ADVERTISE_SLCT);
5918 tg3_writephy(tp, MII_ADVERTISE, adv);
5919 tg3_writephy(tp, MII_BMCR, bmcr |
5920 BMCR_ANRESTART |
5921 BMCR_ANENABLE);
5922 udelay(10);
5923 tg3_carrier_off(tp);
5924 }
5925 tg3_writephy(tp, MII_BMCR, new_bmcr);
5926 bmcr = new_bmcr;
5927 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5928 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5929 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5930 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5931 bmsr |= BMSR_LSTATUS;
5932 else
5933 bmsr &= ~BMSR_LSTATUS;
5934 }
5935 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5936 }
5937 }
5938
5939 if (bmsr & BMSR_LSTATUS) {
5940 current_speed = SPEED_1000;
5941 current_link_up = true;
5942 if (bmcr & BMCR_FULLDPLX)
5943 current_duplex = DUPLEX_FULL;
5944 else
5945 current_duplex = DUPLEX_HALF;
5946
5947 if (bmcr & BMCR_ANENABLE) {
5948 u32 common;
5949
5950 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5951 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5952 common = local_adv & remote_adv;
5953 if (common & (ADVERTISE_1000XHALF |
5954 ADVERTISE_1000XFULL)) {
5955 if (common & ADVERTISE_1000XFULL)
5956 current_duplex = DUPLEX_FULL;
5957 else
5958 current_duplex = DUPLEX_HALF;
5959
5960 tp->link_config.rmt_adv =
5961 mii_adv_to_ethtool_adv_x(remote_adv);
5962 } else if (!tg3_flag(tp, 5780_CLASS)) {
5963 /* Link is up via parallel detect */
5964 } else {
5965 current_link_up = false;
5966 }
5967 }
5968 }
5969
5970 fiber_setup_done:
5971 if (current_link_up && current_duplex == DUPLEX_FULL)
5972 tg3_setup_flow_control(tp, local_adv, remote_adv);
5973
5974 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5975 if (tp->link_config.active_duplex == DUPLEX_HALF)
5976 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5977
5978 tw32_f(MAC_MODE, tp->mac_mode);
5979 udelay(40);
5980
5981 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5982
5983 tp->link_config.active_speed = current_speed;
5984 tp->link_config.active_duplex = current_duplex;
5985
5986 tg3_test_and_report_link_chg(tp, current_link_up);
5987 return err;
5988 }
5989
tg3_serdes_parallel_detect(struct tg3 * tp)5990 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5991 {
5992 if (tp->serdes_counter) {
5993 /* Give autoneg time to complete. */
5994 tp->serdes_counter--;
5995 return;
5996 }
5997
5998 if (!tp->link_up &&
5999 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6000 u32 bmcr;
6001
6002 tg3_readphy(tp, MII_BMCR, &bmcr);
6003 if (bmcr & BMCR_ANENABLE) {
6004 u32 phy1, phy2;
6005
6006 /* Select shadow register 0x1f */
6007 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6008 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6009
6010 /* Select expansion interrupt status register */
6011 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6012 MII_TG3_DSP_EXP1_INT_STAT);
6013 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6014 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6015
6016 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6017 /* We have signal detect and not receiving
6018 * config code words, link is up by parallel
6019 * detection.
6020 */
6021
6022 bmcr &= ~BMCR_ANENABLE;
6023 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6024 tg3_writephy(tp, MII_BMCR, bmcr);
6025 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6026 }
6027 }
6028 } else if (tp->link_up &&
6029 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6030 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6031 u32 phy2;
6032
6033 /* Select expansion interrupt status register */
6034 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6035 MII_TG3_DSP_EXP1_INT_STAT);
6036 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6037 if (phy2 & 0x20) {
6038 u32 bmcr;
6039
6040 /* Config code words received, turn on autoneg. */
6041 tg3_readphy(tp, MII_BMCR, &bmcr);
6042 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6043
6044 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6045
6046 }
6047 }
6048 }
6049
tg3_setup_phy(struct tg3 * tp,bool force_reset)6050 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6051 {
6052 u32 val;
6053 int err;
6054
6055 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6056 err = tg3_setup_fiber_phy(tp, force_reset);
6057 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6058 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6059 else
6060 err = tg3_setup_copper_phy(tp, force_reset);
6061
6062 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6063 u32 scale;
6064
6065 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6066 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6067 scale = 65;
6068 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6069 scale = 6;
6070 else
6071 scale = 12;
6072
6073 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6074 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6075 tw32(GRC_MISC_CFG, val);
6076 }
6077
6078 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6079 (6 << TX_LENGTHS_IPG_SHIFT);
6080 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6081 tg3_asic_rev(tp) == ASIC_REV_5762)
6082 val |= tr32(MAC_TX_LENGTHS) &
6083 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6084 TX_LENGTHS_CNT_DWN_VAL_MSK);
6085
6086 if (tp->link_config.active_speed == SPEED_1000 &&
6087 tp->link_config.active_duplex == DUPLEX_HALF)
6088 tw32(MAC_TX_LENGTHS, val |
6089 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6090 else
6091 tw32(MAC_TX_LENGTHS, val |
6092 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6093
6094 if (!tg3_flag(tp, 5705_PLUS)) {
6095 if (tp->link_up) {
6096 tw32(HOSTCC_STAT_COAL_TICKS,
6097 tp->coal.stats_block_coalesce_usecs);
6098 } else {
6099 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6100 }
6101 }
6102
6103 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6104 val = tr32(PCIE_PWR_MGMT_THRESH);
6105 if (!tp->link_up)
6106 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6107 tp->pwrmgmt_thresh;
6108 else
6109 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6110 tw32(PCIE_PWR_MGMT_THRESH, val);
6111 }
6112
6113 return err;
6114 }
6115
6116 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6117 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6118 {
6119 u64 stamp;
6120
6121 ptp_read_system_prets(sts);
6122 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6123 ptp_read_system_postts(sts);
6124 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6125
6126 return stamp;
6127 }
6128
6129 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6130 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6131 {
6132 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6133
6134 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6135 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6136 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6137 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6138 }
6139
6140 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6141 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)6142 static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
6143 {
6144 struct tg3 *tp = netdev_priv(dev);
6145
6146 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
6147
6148 if (tg3_flag(tp, PTP_CAPABLE)) {
6149 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6150 SOF_TIMESTAMPING_RX_HARDWARE |
6151 SOF_TIMESTAMPING_RAW_HARDWARE;
6152 }
6153
6154 if (tp->ptp_clock)
6155 info->phc_index = ptp_clock_index(tp->ptp_clock);
6156
6157 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6158
6159 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6160 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6161 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6162 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6163 return 0;
6164 }
6165
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6166 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6167 {
6168 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6169 u64 correction;
6170 bool neg_adj;
6171
6172 /* Frequency adjustment is performed using hardware with a 24 bit
6173 * accumulator and a programmable correction value. On each clk, the
6174 * correction value gets added to the accumulator and when it
6175 * overflows, the time counter is incremented/decremented.
6176 */
6177 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6178
6179 tg3_full_lock(tp, 0);
6180
6181 if (correction)
6182 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6183 TG3_EAV_REF_CLK_CORRECT_EN |
6184 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6185 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6186 else
6187 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6188
6189 tg3_full_unlock(tp);
6190
6191 return 0;
6192 }
6193
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6194 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6195 {
6196 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6197
6198 tg3_full_lock(tp, 0);
6199 tp->ptp_adjust += delta;
6200 tg3_full_unlock(tp);
6201
6202 return 0;
6203 }
6204
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6205 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6206 struct ptp_system_timestamp *sts)
6207 {
6208 u64 ns;
6209 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6210
6211 tg3_full_lock(tp, 0);
6212 ns = tg3_refclk_read(tp, sts);
6213 ns += tp->ptp_adjust;
6214 tg3_full_unlock(tp);
6215
6216 *ts = ns_to_timespec64(ns);
6217
6218 return 0;
6219 }
6220
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6221 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6222 const struct timespec64 *ts)
6223 {
6224 u64 ns;
6225 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6226
6227 ns = timespec64_to_ns(ts);
6228
6229 tg3_full_lock(tp, 0);
6230 tg3_refclk_write(tp, ns);
6231 tp->ptp_adjust = 0;
6232 tg3_full_unlock(tp);
6233
6234 return 0;
6235 }
6236
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6237 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6238 struct ptp_clock_request *rq, int on)
6239 {
6240 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6241 u32 clock_ctl;
6242 int rval = 0;
6243
6244 switch (rq->type) {
6245 case PTP_CLK_REQ_PEROUT:
6246 /* Reject requests with unsupported flags */
6247 if (rq->perout.flags)
6248 return -EOPNOTSUPP;
6249
6250 if (rq->perout.index != 0)
6251 return -EINVAL;
6252
6253 tg3_full_lock(tp, 0);
6254 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6255 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6256
6257 if (on) {
6258 u64 nsec;
6259
6260 nsec = rq->perout.start.sec * 1000000000ULL +
6261 rq->perout.start.nsec;
6262
6263 if (rq->perout.period.sec || rq->perout.period.nsec) {
6264 netdev_warn(tp->dev,
6265 "Device supports only a one-shot timesync output, period must be 0\n");
6266 rval = -EINVAL;
6267 goto err_out;
6268 }
6269
6270 if (nsec & (1ULL << 63)) {
6271 netdev_warn(tp->dev,
6272 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6273 rval = -EINVAL;
6274 goto err_out;
6275 }
6276
6277 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6278 tw32(TG3_EAV_WATCHDOG0_MSB,
6279 TG3_EAV_WATCHDOG0_EN |
6280 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6281
6282 tw32(TG3_EAV_REF_CLCK_CTL,
6283 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6284 } else {
6285 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6286 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6287 }
6288
6289 err_out:
6290 tg3_full_unlock(tp);
6291 return rval;
6292
6293 default:
6294 break;
6295 }
6296
6297 return -EOPNOTSUPP;
6298 }
6299
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6300 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6301 struct skb_shared_hwtstamps *timestamp)
6302 {
6303 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6304 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6305 tp->ptp_adjust);
6306 }
6307
tg3_read_tx_tstamp(struct tg3 * tp,u64 * hwclock)6308 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6309 {
6310 *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6311 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6312 }
6313
tg3_ptp_ts_aux_work(struct ptp_clock_info * ptp)6314 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6315 {
6316 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6317 struct skb_shared_hwtstamps timestamp;
6318 u64 hwclock;
6319
6320 if (tp->ptp_txts_retrycnt > 2)
6321 goto done;
6322
6323 tg3_read_tx_tstamp(tp, &hwclock);
6324
6325 if (hwclock != tp->pre_tx_ts) {
6326 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6327 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp);
6328 goto done;
6329 }
6330 tp->ptp_txts_retrycnt++;
6331 return HZ / 10;
6332 done:
6333 dev_consume_skb_any(tp->tx_tstamp_skb);
6334 tp->tx_tstamp_skb = NULL;
6335 tp->ptp_txts_retrycnt = 0;
6336 tp->pre_tx_ts = 0;
6337 return -1;
6338 }
6339
6340 static const struct ptp_clock_info tg3_ptp_caps = {
6341 .owner = THIS_MODULE,
6342 .name = "tg3 clock",
6343 .max_adj = 250000000,
6344 .n_alarm = 0,
6345 .n_ext_ts = 0,
6346 .n_per_out = 1,
6347 .n_pins = 0,
6348 .pps = 0,
6349 .adjfine = tg3_ptp_adjfine,
6350 .adjtime = tg3_ptp_adjtime,
6351 .do_aux_work = tg3_ptp_ts_aux_work,
6352 .gettimex64 = tg3_ptp_gettimex,
6353 .settime64 = tg3_ptp_settime,
6354 .enable = tg3_ptp_enable,
6355 };
6356
6357 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6358 static void tg3_ptp_init(struct tg3 *tp)
6359 {
6360 if (!tg3_flag(tp, PTP_CAPABLE))
6361 return;
6362
6363 /* Initialize the hardware clock to the system time. */
6364 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6365 tp->ptp_adjust = 0;
6366 tp->ptp_info = tg3_ptp_caps;
6367 }
6368
6369 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6370 static void tg3_ptp_resume(struct tg3 *tp)
6371 {
6372 if (!tg3_flag(tp, PTP_CAPABLE))
6373 return;
6374
6375 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6376 tp->ptp_adjust = 0;
6377 }
6378
tg3_ptp_fini(struct tg3 * tp)6379 static void tg3_ptp_fini(struct tg3 *tp)
6380 {
6381 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6382 return;
6383
6384 ptp_clock_unregister(tp->ptp_clock);
6385 tp->ptp_clock = NULL;
6386 tp->ptp_adjust = 0;
6387 dev_consume_skb_any(tp->tx_tstamp_skb);
6388 tp->tx_tstamp_skb = NULL;
6389 }
6390
tg3_irq_sync(struct tg3 * tp)6391 static inline int tg3_irq_sync(struct tg3 *tp)
6392 {
6393 return tp->irq_sync;
6394 }
6395
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6396 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6397 {
6398 int i;
6399
6400 dst = (u32 *)((u8 *)dst + off);
6401 for (i = 0; i < len; i += sizeof(u32))
6402 *dst++ = tr32(off + i);
6403 }
6404
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6405 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6406 {
6407 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6408 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6409 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6410 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6411 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6412 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6413 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6414 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6415 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6416 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6417 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6418 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6419 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6420 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6421 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6422 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6423 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6424 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6425 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6426
6427 if (tg3_flag(tp, SUPPORT_MSIX))
6428 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6429
6430 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6431 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6432 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6433 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6434 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6435 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6436 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6437 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6438
6439 if (!tg3_flag(tp, 5705_PLUS)) {
6440 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6441 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6442 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6443 }
6444
6445 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6446 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6447 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6448 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6449 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6450
6451 if (tg3_flag(tp, NVRAM))
6452 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6453 }
6454
tg3_dump_state(struct tg3 * tp)6455 static void tg3_dump_state(struct tg3 *tp)
6456 {
6457 int i;
6458 u32 *regs;
6459
6460 /* If it is a PCI error, all registers will be 0xffff,
6461 * we don't dump them out, just report the error and return
6462 */
6463 if (tp->pdev->error_state != pci_channel_io_normal) {
6464 netdev_err(tp->dev, "PCI channel ERROR!\n");
6465 return;
6466 }
6467
6468 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6469 if (!regs)
6470 return;
6471
6472 if (tg3_flag(tp, PCI_EXPRESS)) {
6473 /* Read up to but not including private PCI registers */
6474 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6475 regs[i / sizeof(u32)] = tr32(i);
6476 } else
6477 tg3_dump_legacy_regs(tp, regs);
6478
6479 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6480 if (!regs[i + 0] && !regs[i + 1] &&
6481 !regs[i + 2] && !regs[i + 3])
6482 continue;
6483
6484 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6485 i * 4,
6486 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6487 }
6488
6489 kfree(regs);
6490
6491 for (i = 0; i < tp->irq_cnt; i++) {
6492 struct tg3_napi *tnapi = &tp->napi[i];
6493
6494 /* SW status block */
6495 netdev_err(tp->dev,
6496 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6497 i,
6498 tnapi->hw_status->status,
6499 tnapi->hw_status->status_tag,
6500 tnapi->hw_status->rx_jumbo_consumer,
6501 tnapi->hw_status->rx_consumer,
6502 tnapi->hw_status->rx_mini_consumer,
6503 tnapi->hw_status->idx[0].rx_producer,
6504 tnapi->hw_status->idx[0].tx_consumer);
6505
6506 netdev_err(tp->dev,
6507 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6508 i,
6509 tnapi->last_tag, tnapi->last_irq_tag,
6510 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6511 tnapi->rx_rcb_ptr,
6512 tnapi->prodring.rx_std_prod_idx,
6513 tnapi->prodring.rx_std_cons_idx,
6514 tnapi->prodring.rx_jmb_prod_idx,
6515 tnapi->prodring.rx_jmb_cons_idx);
6516 }
6517 }
6518
6519 /* This is called whenever we suspect that the system chipset is re-
6520 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6521 * is bogus tx completions. We try to recover by setting the
6522 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6523 * in the workqueue.
6524 */
tg3_tx_recover(struct tg3 * tp)6525 static void tg3_tx_recover(struct tg3 *tp)
6526 {
6527 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6528 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6529
6530 netdev_warn(tp->dev,
6531 "The system may be re-ordering memory-mapped I/O "
6532 "cycles to the network device, attempting to recover. "
6533 "Please report the problem to the driver maintainer "
6534 "and include system chipset information.\n");
6535
6536 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6537 }
6538
tg3_tx_avail(struct tg3_napi * tnapi)6539 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6540 {
6541 /* Tell compiler to fetch tx indices from memory. */
6542 barrier();
6543 return tnapi->tx_pending -
6544 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6545 }
6546
6547 /* Tigon3 never reports partial packet sends. So we do not
6548 * need special logic to handle SKBs that have not had all
6549 * of their frags sent yet, like SunGEM does.
6550 */
tg3_tx(struct tg3_napi * tnapi)6551 static void tg3_tx(struct tg3_napi *tnapi)
6552 {
6553 struct tg3 *tp = tnapi->tp;
6554 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6555 u32 sw_idx = tnapi->tx_cons;
6556 struct netdev_queue *txq;
6557 int index = tnapi - tp->napi;
6558 unsigned int pkts_compl = 0, bytes_compl = 0;
6559
6560 if (tg3_flag(tp, ENABLE_TSS))
6561 index--;
6562
6563 txq = netdev_get_tx_queue(tp->dev, index);
6564
6565 while (sw_idx != hw_idx) {
6566 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6567 bool complete_skb_later = false;
6568 struct sk_buff *skb = ri->skb;
6569 int i, tx_bug = 0;
6570
6571 if (unlikely(skb == NULL)) {
6572 tg3_tx_recover(tp);
6573 return;
6574 }
6575
6576 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6577 struct skb_shared_hwtstamps timestamp;
6578 u64 hwclock;
6579
6580 tg3_read_tx_tstamp(tp, &hwclock);
6581 if (hwclock != tp->pre_tx_ts) {
6582 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6583 skb_tstamp_tx(skb, ×tamp);
6584 tp->pre_tx_ts = 0;
6585 } else {
6586 tp->tx_tstamp_skb = skb;
6587 complete_skb_later = true;
6588 }
6589 }
6590
6591 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6592 skb_headlen(skb), DMA_TO_DEVICE);
6593
6594 ri->skb = NULL;
6595
6596 while (ri->fragmented) {
6597 ri->fragmented = false;
6598 sw_idx = NEXT_TX(sw_idx);
6599 ri = &tnapi->tx_buffers[sw_idx];
6600 }
6601
6602 sw_idx = NEXT_TX(sw_idx);
6603
6604 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6605 ri = &tnapi->tx_buffers[sw_idx];
6606 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6607 tx_bug = 1;
6608
6609 dma_unmap_page(&tp->pdev->dev,
6610 dma_unmap_addr(ri, mapping),
6611 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6612 DMA_TO_DEVICE);
6613
6614 while (ri->fragmented) {
6615 ri->fragmented = false;
6616 sw_idx = NEXT_TX(sw_idx);
6617 ri = &tnapi->tx_buffers[sw_idx];
6618 }
6619
6620 sw_idx = NEXT_TX(sw_idx);
6621 }
6622
6623 pkts_compl++;
6624 bytes_compl += skb->len;
6625
6626 if (!complete_skb_later)
6627 dev_consume_skb_any(skb);
6628 else
6629 ptp_schedule_worker(tp->ptp_clock, 0);
6630
6631 if (unlikely(tx_bug)) {
6632 tg3_tx_recover(tp);
6633 return;
6634 }
6635 }
6636
6637 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6638
6639 tnapi->tx_cons = sw_idx;
6640
6641 /* Need to make the tx_cons update visible to __tg3_start_xmit()
6642 * before checking for netif_queue_stopped(). Without the
6643 * memory barrier, there is a small possibility that __tg3_start_xmit()
6644 * will miss it and cause the queue to be stopped forever.
6645 */
6646 smp_mb();
6647
6648 if (unlikely(netif_tx_queue_stopped(txq) &&
6649 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6650 __netif_tx_lock(txq, smp_processor_id());
6651 if (netif_tx_queue_stopped(txq) &&
6652 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6653 netif_tx_wake_queue(txq);
6654 __netif_tx_unlock(txq);
6655 }
6656 }
6657
tg3_frag_free(bool is_frag,void * data)6658 static void tg3_frag_free(bool is_frag, void *data)
6659 {
6660 if (is_frag)
6661 skb_free_frag(data);
6662 else
6663 kfree(data);
6664 }
6665
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6666 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6667 {
6668 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6669 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6670
6671 if (!ri->data)
6672 return;
6673
6674 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6675 DMA_FROM_DEVICE);
6676 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6677 ri->data = NULL;
6678 }
6679
6680
6681 /* Returns size of skb allocated or < 0 on error.
6682 *
6683 * We only need to fill in the address because the other members
6684 * of the RX descriptor are invariant, see tg3_init_rings.
6685 *
6686 * Note the purposeful asymmetry of cpu vs. chip accesses. For
6687 * posting buffers we only dirty the first cache line of the RX
6688 * descriptor (containing the address). Whereas for the RX status
6689 * buffers the cpu only reads the last cacheline of the RX descriptor
6690 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6691 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6692 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6693 u32 opaque_key, u32 dest_idx_unmasked,
6694 unsigned int *frag_size)
6695 {
6696 struct tg3_rx_buffer_desc *desc;
6697 struct ring_info *map;
6698 u8 *data;
6699 dma_addr_t mapping;
6700 int skb_size, data_size, dest_idx;
6701
6702 switch (opaque_key) {
6703 case RXD_OPAQUE_RING_STD:
6704 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6705 desc = &tpr->rx_std[dest_idx];
6706 map = &tpr->rx_std_buffers[dest_idx];
6707 data_size = tp->rx_pkt_map_sz;
6708 break;
6709
6710 case RXD_OPAQUE_RING_JUMBO:
6711 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6712 desc = &tpr->rx_jmb[dest_idx].std;
6713 map = &tpr->rx_jmb_buffers[dest_idx];
6714 data_size = TG3_RX_JMB_MAP_SZ;
6715 break;
6716
6717 default:
6718 return -EINVAL;
6719 }
6720
6721 /* Do not overwrite any of the map or rp information
6722 * until we are sure we can commit to a new buffer.
6723 *
6724 * Callers depend upon this behavior and assume that
6725 * we leave everything unchanged if we fail.
6726 */
6727 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6728 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6729 if (skb_size <= PAGE_SIZE) {
6730 data = napi_alloc_frag(skb_size);
6731 *frag_size = skb_size;
6732 } else {
6733 data = kmalloc(skb_size, GFP_ATOMIC);
6734 *frag_size = 0;
6735 }
6736 if (!data)
6737 return -ENOMEM;
6738
6739 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6740 data_size, DMA_FROM_DEVICE);
6741 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6742 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6743 return -EIO;
6744 }
6745
6746 map->data = data;
6747 dma_unmap_addr_set(map, mapping, mapping);
6748
6749 desc->addr_hi = ((u64)mapping >> 32);
6750 desc->addr_lo = ((u64)mapping & 0xffffffff);
6751
6752 return data_size;
6753 }
6754
6755 /* We only need to move over in the address because the other
6756 * members of the RX descriptor are invariant. See notes above
6757 * tg3_alloc_rx_data for full details.
6758 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6759 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6760 struct tg3_rx_prodring_set *dpr,
6761 u32 opaque_key, int src_idx,
6762 u32 dest_idx_unmasked)
6763 {
6764 struct tg3 *tp = tnapi->tp;
6765 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6766 struct ring_info *src_map, *dest_map;
6767 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6768 int dest_idx;
6769
6770 switch (opaque_key) {
6771 case RXD_OPAQUE_RING_STD:
6772 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6773 dest_desc = &dpr->rx_std[dest_idx];
6774 dest_map = &dpr->rx_std_buffers[dest_idx];
6775 src_desc = &spr->rx_std[src_idx];
6776 src_map = &spr->rx_std_buffers[src_idx];
6777 break;
6778
6779 case RXD_OPAQUE_RING_JUMBO:
6780 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6781 dest_desc = &dpr->rx_jmb[dest_idx].std;
6782 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6783 src_desc = &spr->rx_jmb[src_idx].std;
6784 src_map = &spr->rx_jmb_buffers[src_idx];
6785 break;
6786
6787 default:
6788 return;
6789 }
6790
6791 dest_map->data = src_map->data;
6792 dma_unmap_addr_set(dest_map, mapping,
6793 dma_unmap_addr(src_map, mapping));
6794 dest_desc->addr_hi = src_desc->addr_hi;
6795 dest_desc->addr_lo = src_desc->addr_lo;
6796
6797 /* Ensure that the update to the skb happens after the physical
6798 * addresses have been transferred to the new BD location.
6799 */
6800 smp_wmb();
6801
6802 src_map->data = NULL;
6803 }
6804
6805 /* The RX ring scheme is composed of multiple rings which post fresh
6806 * buffers to the chip, and one special ring the chip uses to report
6807 * status back to the host.
6808 *
6809 * The special ring reports the status of received packets to the
6810 * host. The chip does not write into the original descriptor the
6811 * RX buffer was obtained from. The chip simply takes the original
6812 * descriptor as provided by the host, updates the status and length
6813 * field, then writes this into the next status ring entry.
6814 *
6815 * Each ring the host uses to post buffers to the chip is described
6816 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6817 * it is first placed into the on-chip ram. When the packet's length
6818 * is known, it walks down the TG3_BDINFO entries to select the ring.
6819 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6820 * which is within the range of the new packet's length is chosen.
6821 *
6822 * The "separate ring for rx status" scheme may sound queer, but it makes
6823 * sense from a cache coherency perspective. If only the host writes
6824 * to the buffer post rings, and only the chip writes to the rx status
6825 * rings, then cache lines never move beyond shared-modified state.
6826 * If both the host and chip were to write into the same ring, cache line
6827 * eviction could occur since both entities want it in an exclusive state.
6828 */
tg3_rx(struct tg3_napi * tnapi,int budget)6829 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6830 {
6831 struct tg3 *tp = tnapi->tp;
6832 u32 work_mask, rx_std_posted = 0;
6833 u32 std_prod_idx, jmb_prod_idx;
6834 u32 sw_idx = tnapi->rx_rcb_ptr;
6835 u16 hw_idx;
6836 int received;
6837 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6838
6839 hw_idx = *(tnapi->rx_rcb_prod_idx);
6840 /*
6841 * We need to order the read of hw_idx and the read of
6842 * the opaque cookie.
6843 */
6844 rmb();
6845 work_mask = 0;
6846 received = 0;
6847 std_prod_idx = tpr->rx_std_prod_idx;
6848 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6849 while (sw_idx != hw_idx && budget > 0) {
6850 struct ring_info *ri;
6851 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6852 unsigned int len;
6853 struct sk_buff *skb;
6854 dma_addr_t dma_addr;
6855 u32 opaque_key, desc_idx, *post_ptr;
6856 u8 *data;
6857 u64 tstamp = 0;
6858
6859 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6860 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6861 if (opaque_key == RXD_OPAQUE_RING_STD) {
6862 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6863 dma_addr = dma_unmap_addr(ri, mapping);
6864 data = ri->data;
6865 post_ptr = &std_prod_idx;
6866 rx_std_posted++;
6867 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6868 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6869 dma_addr = dma_unmap_addr(ri, mapping);
6870 data = ri->data;
6871 post_ptr = &jmb_prod_idx;
6872 } else
6873 goto next_pkt_nopost;
6874
6875 work_mask |= opaque_key;
6876
6877 if (desc->err_vlan & RXD_ERR_MASK) {
6878 drop_it:
6879 tg3_recycle_rx(tnapi, tpr, opaque_key,
6880 desc_idx, *post_ptr);
6881 drop_it_no_recycle:
6882 /* Other statistics kept track of by card. */
6883 tnapi->rx_dropped++;
6884 goto next_pkt;
6885 }
6886
6887 prefetch(data + TG3_RX_OFFSET(tp));
6888 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6889 ETH_FCS_LEN;
6890
6891 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6892 RXD_FLAG_PTPSTAT_PTPV1 ||
6893 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6894 RXD_FLAG_PTPSTAT_PTPV2) {
6895 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6896 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6897 }
6898
6899 if (len > TG3_RX_COPY_THRESH(tp)) {
6900 int skb_size;
6901 unsigned int frag_size;
6902
6903 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6904 *post_ptr, &frag_size);
6905 if (skb_size < 0)
6906 goto drop_it;
6907
6908 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6909 DMA_FROM_DEVICE);
6910
6911 /* Ensure that the update to the data happens
6912 * after the usage of the old DMA mapping.
6913 */
6914 smp_wmb();
6915
6916 ri->data = NULL;
6917
6918 if (frag_size)
6919 skb = build_skb(data, frag_size);
6920 else
6921 skb = slab_build_skb(data);
6922 if (!skb) {
6923 tg3_frag_free(frag_size != 0, data);
6924 goto drop_it_no_recycle;
6925 }
6926 skb_reserve(skb, TG3_RX_OFFSET(tp));
6927 } else {
6928 tg3_recycle_rx(tnapi, tpr, opaque_key,
6929 desc_idx, *post_ptr);
6930
6931 skb = netdev_alloc_skb(tp->dev,
6932 len + TG3_RAW_IP_ALIGN);
6933 if (skb == NULL)
6934 goto drop_it_no_recycle;
6935
6936 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6937 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6938 DMA_FROM_DEVICE);
6939 memcpy(skb->data,
6940 data + TG3_RX_OFFSET(tp),
6941 len);
6942 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6943 len, DMA_FROM_DEVICE);
6944 }
6945
6946 skb_put(skb, len);
6947 if (tstamp)
6948 tg3_hwclock_to_timestamp(tp, tstamp,
6949 skb_hwtstamps(skb));
6950
6951 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6952 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6953 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6954 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6955 skb->ip_summed = CHECKSUM_UNNECESSARY;
6956 else
6957 skb_checksum_none_assert(skb);
6958
6959 skb->protocol = eth_type_trans(skb, tp->dev);
6960
6961 if (len > (tp->dev->mtu + ETH_HLEN) &&
6962 skb->protocol != htons(ETH_P_8021Q) &&
6963 skb->protocol != htons(ETH_P_8021AD)) {
6964 dev_kfree_skb_any(skb);
6965 goto drop_it_no_recycle;
6966 }
6967
6968 if (desc->type_flags & RXD_FLAG_VLAN &&
6969 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6970 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6971 desc->err_vlan & RXD_VLAN_MASK);
6972
6973 napi_gro_receive(&tnapi->napi, skb);
6974
6975 received++;
6976 budget--;
6977
6978 next_pkt:
6979 (*post_ptr)++;
6980
6981 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6982 tpr->rx_std_prod_idx = std_prod_idx &
6983 tp->rx_std_ring_mask;
6984 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6985 tpr->rx_std_prod_idx);
6986 work_mask &= ~RXD_OPAQUE_RING_STD;
6987 rx_std_posted = 0;
6988 }
6989 next_pkt_nopost:
6990 sw_idx++;
6991 sw_idx &= tp->rx_ret_ring_mask;
6992
6993 /* Refresh hw_idx to see if there is new work */
6994 if (sw_idx == hw_idx) {
6995 hw_idx = *(tnapi->rx_rcb_prod_idx);
6996 rmb();
6997 }
6998 }
6999
7000 /* ACK the status ring. */
7001 tnapi->rx_rcb_ptr = sw_idx;
7002 tw32_rx_mbox(tnapi->consmbox, sw_idx);
7003
7004 /* Refill RX ring(s). */
7005 if (!tg3_flag(tp, ENABLE_RSS)) {
7006 /* Sync BD data before updating mailbox */
7007 wmb();
7008
7009 if (work_mask & RXD_OPAQUE_RING_STD) {
7010 tpr->rx_std_prod_idx = std_prod_idx &
7011 tp->rx_std_ring_mask;
7012 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7013 tpr->rx_std_prod_idx);
7014 }
7015 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7016 tpr->rx_jmb_prod_idx = jmb_prod_idx &
7017 tp->rx_jmb_ring_mask;
7018 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7019 tpr->rx_jmb_prod_idx);
7020 }
7021 } else if (work_mask) {
7022 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7023 * updated before the producer indices can be updated.
7024 */
7025 smp_wmb();
7026
7027 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7028 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7029
7030 if (tnapi != &tp->napi[1]) {
7031 tp->rx_refill = true;
7032 napi_schedule(&tp->napi[1].napi);
7033 }
7034 }
7035
7036 return received;
7037 }
7038
tg3_poll_link(struct tg3 * tp)7039 static void tg3_poll_link(struct tg3 *tp)
7040 {
7041 /* handle link change and other phy events */
7042 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7043 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7044
7045 if (sblk->status & SD_STATUS_LINK_CHG) {
7046 sblk->status = SD_STATUS_UPDATED |
7047 (sblk->status & ~SD_STATUS_LINK_CHG);
7048 spin_lock(&tp->lock);
7049 if (tg3_flag(tp, USE_PHYLIB)) {
7050 tw32_f(MAC_STATUS,
7051 (MAC_STATUS_SYNC_CHANGED |
7052 MAC_STATUS_CFG_CHANGED |
7053 MAC_STATUS_MI_COMPLETION |
7054 MAC_STATUS_LNKSTATE_CHANGED));
7055 udelay(40);
7056 } else
7057 tg3_setup_phy(tp, false);
7058 spin_unlock(&tp->lock);
7059 }
7060 }
7061 }
7062
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7063 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7064 struct tg3_rx_prodring_set *dpr,
7065 struct tg3_rx_prodring_set *spr)
7066 {
7067 u32 si, di, cpycnt, src_prod_idx;
7068 int i, err = 0;
7069
7070 while (1) {
7071 src_prod_idx = spr->rx_std_prod_idx;
7072
7073 /* Make sure updates to the rx_std_buffers[] entries and the
7074 * standard producer index are seen in the correct order.
7075 */
7076 smp_rmb();
7077
7078 if (spr->rx_std_cons_idx == src_prod_idx)
7079 break;
7080
7081 if (spr->rx_std_cons_idx < src_prod_idx)
7082 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7083 else
7084 cpycnt = tp->rx_std_ring_mask + 1 -
7085 spr->rx_std_cons_idx;
7086
7087 cpycnt = min(cpycnt,
7088 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7089
7090 si = spr->rx_std_cons_idx;
7091 di = dpr->rx_std_prod_idx;
7092
7093 for (i = di; i < di + cpycnt; i++) {
7094 if (dpr->rx_std_buffers[i].data) {
7095 cpycnt = i - di;
7096 err = -ENOSPC;
7097 break;
7098 }
7099 }
7100
7101 if (!cpycnt)
7102 break;
7103
7104 /* Ensure that updates to the rx_std_buffers ring and the
7105 * shadowed hardware producer ring from tg3_recycle_skb() are
7106 * ordered correctly WRT the skb check above.
7107 */
7108 smp_rmb();
7109
7110 memcpy(&dpr->rx_std_buffers[di],
7111 &spr->rx_std_buffers[si],
7112 cpycnt * sizeof(struct ring_info));
7113
7114 for (i = 0; i < cpycnt; i++, di++, si++) {
7115 struct tg3_rx_buffer_desc *sbd, *dbd;
7116 sbd = &spr->rx_std[si];
7117 dbd = &dpr->rx_std[di];
7118 dbd->addr_hi = sbd->addr_hi;
7119 dbd->addr_lo = sbd->addr_lo;
7120 }
7121
7122 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7123 tp->rx_std_ring_mask;
7124 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7125 tp->rx_std_ring_mask;
7126 }
7127
7128 while (1) {
7129 src_prod_idx = spr->rx_jmb_prod_idx;
7130
7131 /* Make sure updates to the rx_jmb_buffers[] entries and
7132 * the jumbo producer index are seen in the correct order.
7133 */
7134 smp_rmb();
7135
7136 if (spr->rx_jmb_cons_idx == src_prod_idx)
7137 break;
7138
7139 if (spr->rx_jmb_cons_idx < src_prod_idx)
7140 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7141 else
7142 cpycnt = tp->rx_jmb_ring_mask + 1 -
7143 spr->rx_jmb_cons_idx;
7144
7145 cpycnt = min(cpycnt,
7146 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7147
7148 si = spr->rx_jmb_cons_idx;
7149 di = dpr->rx_jmb_prod_idx;
7150
7151 for (i = di; i < di + cpycnt; i++) {
7152 if (dpr->rx_jmb_buffers[i].data) {
7153 cpycnt = i - di;
7154 err = -ENOSPC;
7155 break;
7156 }
7157 }
7158
7159 if (!cpycnt)
7160 break;
7161
7162 /* Ensure that updates to the rx_jmb_buffers ring and the
7163 * shadowed hardware producer ring from tg3_recycle_skb() are
7164 * ordered correctly WRT the skb check above.
7165 */
7166 smp_rmb();
7167
7168 memcpy(&dpr->rx_jmb_buffers[di],
7169 &spr->rx_jmb_buffers[si],
7170 cpycnt * sizeof(struct ring_info));
7171
7172 for (i = 0; i < cpycnt; i++, di++, si++) {
7173 struct tg3_rx_buffer_desc *sbd, *dbd;
7174 sbd = &spr->rx_jmb[si].std;
7175 dbd = &dpr->rx_jmb[di].std;
7176 dbd->addr_hi = sbd->addr_hi;
7177 dbd->addr_lo = sbd->addr_lo;
7178 }
7179
7180 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7181 tp->rx_jmb_ring_mask;
7182 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7183 tp->rx_jmb_ring_mask;
7184 }
7185
7186 return err;
7187 }
7188
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7189 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7190 {
7191 struct tg3 *tp = tnapi->tp;
7192
7193 /* run TX completion thread */
7194 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7195 tg3_tx(tnapi);
7196 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7197 return work_done;
7198 }
7199
7200 if (!tnapi->rx_rcb_prod_idx)
7201 return work_done;
7202
7203 /* run RX thread, within the bounds set by NAPI.
7204 * All RX "locking" is done by ensuring outside
7205 * code synchronizes with tg3->napi.poll()
7206 */
7207 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7208 work_done += tg3_rx(tnapi, budget - work_done);
7209
7210 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7211 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7212 int i, err = 0;
7213 u32 std_prod_idx = dpr->rx_std_prod_idx;
7214 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7215
7216 tp->rx_refill = false;
7217 for (i = 1; i <= tp->rxq_cnt; i++)
7218 err |= tg3_rx_prodring_xfer(tp, dpr,
7219 &tp->napi[i].prodring);
7220
7221 wmb();
7222
7223 if (std_prod_idx != dpr->rx_std_prod_idx)
7224 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7225 dpr->rx_std_prod_idx);
7226
7227 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7228 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7229 dpr->rx_jmb_prod_idx);
7230
7231 if (err)
7232 tw32_f(HOSTCC_MODE, tp->coal_now);
7233 }
7234
7235 return work_done;
7236 }
7237
tg3_reset_task_schedule(struct tg3 * tp)7238 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7239 {
7240 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7241 schedule_work(&tp->reset_task);
7242 }
7243
tg3_reset_task_cancel(struct tg3 * tp)7244 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7245 {
7246 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7247 cancel_work_sync(&tp->reset_task);
7248 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7249 }
7250
tg3_poll_msix(struct napi_struct * napi,int budget)7251 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7252 {
7253 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7254 struct tg3 *tp = tnapi->tp;
7255 int work_done = 0;
7256 struct tg3_hw_status *sblk = tnapi->hw_status;
7257
7258 while (1) {
7259 work_done = tg3_poll_work(tnapi, work_done, budget);
7260
7261 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7262 goto tx_recovery;
7263
7264 if (unlikely(work_done >= budget))
7265 break;
7266
7267 /* tp->last_tag is used in tg3_int_reenable() below
7268 * to tell the hw how much work has been processed,
7269 * so we must read it before checking for more work.
7270 */
7271 tnapi->last_tag = sblk->status_tag;
7272 tnapi->last_irq_tag = tnapi->last_tag;
7273 rmb();
7274
7275 /* check for RX/TX work to do */
7276 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7277 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7278
7279 /* This test here is not race free, but will reduce
7280 * the number of interrupts by looping again.
7281 */
7282 if (tnapi == &tp->napi[1] && tp->rx_refill)
7283 continue;
7284
7285 napi_complete_done(napi, work_done);
7286 /* Reenable interrupts. */
7287 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7288
7289 /* This test here is synchronized by napi_schedule()
7290 * and napi_complete() to close the race condition.
7291 */
7292 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7293 tw32(HOSTCC_MODE, tp->coalesce_mode |
7294 HOSTCC_MODE_ENABLE |
7295 tnapi->coal_now);
7296 }
7297 break;
7298 }
7299 }
7300
7301 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7302 return work_done;
7303
7304 tx_recovery:
7305 /* work_done is guaranteed to be less than budget. */
7306 napi_complete(napi);
7307 tg3_reset_task_schedule(tp);
7308 return work_done;
7309 }
7310
tg3_process_error(struct tg3 * tp)7311 static void tg3_process_error(struct tg3 *tp)
7312 {
7313 u32 val;
7314 bool real_error = false;
7315
7316 if (tg3_flag(tp, ERROR_PROCESSED))
7317 return;
7318
7319 /* Check Flow Attention register */
7320 val = tr32(HOSTCC_FLOW_ATTN);
7321 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7322 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7323 real_error = true;
7324 }
7325
7326 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7327 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7328 real_error = true;
7329 }
7330
7331 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7332 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7333 real_error = true;
7334 }
7335
7336 if (!real_error)
7337 return;
7338
7339 tg3_dump_state(tp);
7340
7341 tg3_flag_set(tp, ERROR_PROCESSED);
7342 tg3_reset_task_schedule(tp);
7343 }
7344
tg3_poll(struct napi_struct * napi,int budget)7345 static int tg3_poll(struct napi_struct *napi, int budget)
7346 {
7347 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7348 struct tg3 *tp = tnapi->tp;
7349 int work_done = 0;
7350 struct tg3_hw_status *sblk = tnapi->hw_status;
7351
7352 while (1) {
7353 if (sblk->status & SD_STATUS_ERROR)
7354 tg3_process_error(tp);
7355
7356 tg3_poll_link(tp);
7357
7358 work_done = tg3_poll_work(tnapi, work_done, budget);
7359
7360 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7361 goto tx_recovery;
7362
7363 if (unlikely(work_done >= budget))
7364 break;
7365
7366 if (tg3_flag(tp, TAGGED_STATUS)) {
7367 /* tp->last_tag is used in tg3_int_reenable() below
7368 * to tell the hw how much work has been processed,
7369 * so we must read it before checking for more work.
7370 */
7371 tnapi->last_tag = sblk->status_tag;
7372 tnapi->last_irq_tag = tnapi->last_tag;
7373 rmb();
7374 } else
7375 sblk->status &= ~SD_STATUS_UPDATED;
7376
7377 if (likely(!tg3_has_work(tnapi))) {
7378 napi_complete_done(napi, work_done);
7379 tg3_int_reenable(tnapi);
7380 break;
7381 }
7382 }
7383
7384 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7385 return work_done;
7386
7387 tx_recovery:
7388 /* work_done is guaranteed to be less than budget. */
7389 napi_complete(napi);
7390 tg3_reset_task_schedule(tp);
7391 return work_done;
7392 }
7393
tg3_napi_disable(struct tg3 * tp)7394 static void tg3_napi_disable(struct tg3 *tp)
7395 {
7396 int txq_idx = tp->txq_cnt - 1;
7397 int rxq_idx = tp->rxq_cnt - 1;
7398 struct tg3_napi *tnapi;
7399 int i;
7400
7401 for (i = tp->irq_cnt - 1; i >= 0; i--) {
7402 tnapi = &tp->napi[i];
7403 if (tnapi->tx_buffers) {
7404 netif_queue_set_napi(tp->dev, txq_idx,
7405 NETDEV_QUEUE_TYPE_TX, NULL);
7406 txq_idx--;
7407 }
7408 if (tnapi->rx_rcb) {
7409 netif_queue_set_napi(tp->dev, rxq_idx,
7410 NETDEV_QUEUE_TYPE_RX, NULL);
7411 rxq_idx--;
7412 }
7413 napi_disable(&tnapi->napi);
7414 }
7415 }
7416
tg3_napi_enable(struct tg3 * tp)7417 static void tg3_napi_enable(struct tg3 *tp)
7418 {
7419 int txq_idx = 0, rxq_idx = 0;
7420 struct tg3_napi *tnapi;
7421 int i;
7422
7423 for (i = 0; i < tp->irq_cnt; i++) {
7424 tnapi = &tp->napi[i];
7425 napi_enable_locked(&tnapi->napi);
7426 if (tnapi->tx_buffers) {
7427 netif_queue_set_napi(tp->dev, txq_idx,
7428 NETDEV_QUEUE_TYPE_TX,
7429 &tnapi->napi);
7430 txq_idx++;
7431 }
7432 if (tnapi->rx_rcb) {
7433 netif_queue_set_napi(tp->dev, rxq_idx,
7434 NETDEV_QUEUE_TYPE_RX,
7435 &tnapi->napi);
7436 rxq_idx++;
7437 }
7438 }
7439 }
7440
tg3_napi_init(struct tg3 * tp)7441 static void tg3_napi_init(struct tg3 *tp)
7442 {
7443 int i;
7444
7445 for (i = 0; i < tp->irq_cnt; i++) {
7446 netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
7447 i ? tg3_poll_msix : tg3_poll);
7448 netif_napi_set_irq_locked(&tp->napi[i].napi,
7449 tp->napi[i].irq_vec);
7450 }
7451 }
7452
tg3_napi_fini(struct tg3 * tp)7453 static void tg3_napi_fini(struct tg3 *tp)
7454 {
7455 int i;
7456
7457 for (i = 0; i < tp->irq_cnt; i++)
7458 netif_napi_del(&tp->napi[i].napi);
7459 }
7460
tg3_netif_stop(struct tg3 * tp)7461 static inline void tg3_netif_stop(struct tg3 *tp)
7462 {
7463 netif_trans_update(tp->dev); /* prevent tx timeout */
7464 tg3_napi_disable(tp);
7465 netif_carrier_off(tp->dev);
7466 netif_tx_disable(tp->dev);
7467 }
7468
7469 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7470 static inline void tg3_netif_start(struct tg3 *tp)
7471 {
7472 tg3_ptp_resume(tp);
7473
7474 /* NOTE: unconditional netif_tx_wake_all_queues is only
7475 * appropriate so long as all callers are assured to
7476 * have free tx slots (such as after tg3_init_hw)
7477 */
7478 netif_tx_wake_all_queues(tp->dev);
7479
7480 if (tp->link_up)
7481 netif_carrier_on(tp->dev);
7482
7483 tg3_napi_enable(tp);
7484 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7485 tg3_enable_ints(tp);
7486 }
7487
tg3_irq_quiesce(struct tg3 * tp)7488 static void tg3_irq_quiesce(struct tg3 *tp)
7489 __releases(tp->lock)
7490 __acquires(tp->lock)
7491 {
7492 int i;
7493
7494 BUG_ON(tp->irq_sync);
7495
7496 tp->irq_sync = 1;
7497 smp_mb();
7498
7499 spin_unlock_bh(&tp->lock);
7500
7501 for (i = 0; i < tp->irq_cnt; i++)
7502 synchronize_irq(tp->napi[i].irq_vec);
7503
7504 spin_lock_bh(&tp->lock);
7505 }
7506
7507 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7508 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7509 * with as well. Most of the time, this is not necessary except when
7510 * shutting down the device.
7511 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7512 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7513 {
7514 spin_lock_bh(&tp->lock);
7515 if (irq_sync)
7516 tg3_irq_quiesce(tp);
7517 }
7518
tg3_full_unlock(struct tg3 * tp)7519 static inline void tg3_full_unlock(struct tg3 *tp)
7520 {
7521 spin_unlock_bh(&tp->lock);
7522 }
7523
7524 /* One-shot MSI handler - Chip automatically disables interrupt
7525 * after sending MSI so driver doesn't have to do it.
7526 */
tg3_msi_1shot(int irq,void * dev_id)7527 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7528 {
7529 struct tg3_napi *tnapi = dev_id;
7530 struct tg3 *tp = tnapi->tp;
7531
7532 prefetch(tnapi->hw_status);
7533 if (tnapi->rx_rcb)
7534 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7535
7536 if (likely(!tg3_irq_sync(tp)))
7537 napi_schedule(&tnapi->napi);
7538
7539 return IRQ_HANDLED;
7540 }
7541
7542 /* MSI ISR - No need to check for interrupt sharing and no need to
7543 * flush status block and interrupt mailbox. PCI ordering rules
7544 * guarantee that MSI will arrive after the status block.
7545 */
tg3_msi(int irq,void * dev_id)7546 static irqreturn_t tg3_msi(int irq, void *dev_id)
7547 {
7548 struct tg3_napi *tnapi = dev_id;
7549 struct tg3 *tp = tnapi->tp;
7550
7551 prefetch(tnapi->hw_status);
7552 if (tnapi->rx_rcb)
7553 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7554 /*
7555 * Writing any value to intr-mbox-0 clears PCI INTA# and
7556 * chip-internal interrupt pending events.
7557 * Writing non-zero to intr-mbox-0 additional tells the
7558 * NIC to stop sending us irqs, engaging "in-intr-handler"
7559 * event coalescing.
7560 */
7561 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7562 if (likely(!tg3_irq_sync(tp)))
7563 napi_schedule(&tnapi->napi);
7564
7565 return IRQ_RETVAL(1);
7566 }
7567
tg3_interrupt(int irq,void * dev_id)7568 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7569 {
7570 struct tg3_napi *tnapi = dev_id;
7571 struct tg3 *tp = tnapi->tp;
7572 struct tg3_hw_status *sblk = tnapi->hw_status;
7573 unsigned int handled = 1;
7574
7575 /* In INTx mode, it is possible for the interrupt to arrive at
7576 * the CPU before the status block posted prior to the interrupt.
7577 * Reading the PCI State register will confirm whether the
7578 * interrupt is ours and will flush the status block.
7579 */
7580 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7581 if (tg3_flag(tp, CHIP_RESETTING) ||
7582 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7583 handled = 0;
7584 goto out;
7585 }
7586 }
7587
7588 /*
7589 * Writing any value to intr-mbox-0 clears PCI INTA# and
7590 * chip-internal interrupt pending events.
7591 * Writing non-zero to intr-mbox-0 additional tells the
7592 * NIC to stop sending us irqs, engaging "in-intr-handler"
7593 * event coalescing.
7594 *
7595 * Flush the mailbox to de-assert the IRQ immediately to prevent
7596 * spurious interrupts. The flush impacts performance but
7597 * excessive spurious interrupts can be worse in some cases.
7598 */
7599 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7600 if (tg3_irq_sync(tp))
7601 goto out;
7602 sblk->status &= ~SD_STATUS_UPDATED;
7603 if (likely(tg3_has_work(tnapi))) {
7604 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7605 napi_schedule(&tnapi->napi);
7606 } else {
7607 /* No work, shared interrupt perhaps? re-enable
7608 * interrupts, and flush that PCI write
7609 */
7610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7611 0x00000000);
7612 }
7613 out:
7614 return IRQ_RETVAL(handled);
7615 }
7616
tg3_interrupt_tagged(int irq,void * dev_id)7617 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7618 {
7619 struct tg3_napi *tnapi = dev_id;
7620 struct tg3 *tp = tnapi->tp;
7621 struct tg3_hw_status *sblk = tnapi->hw_status;
7622 unsigned int handled = 1;
7623
7624 /* In INTx mode, it is possible for the interrupt to arrive at
7625 * the CPU before the status block posted prior to the interrupt.
7626 * Reading the PCI State register will confirm whether the
7627 * interrupt is ours and will flush the status block.
7628 */
7629 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7630 if (tg3_flag(tp, CHIP_RESETTING) ||
7631 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7632 handled = 0;
7633 goto out;
7634 }
7635 }
7636
7637 /*
7638 * writing any value to intr-mbox-0 clears PCI INTA# and
7639 * chip-internal interrupt pending events.
7640 * writing non-zero to intr-mbox-0 additional tells the
7641 * NIC to stop sending us irqs, engaging "in-intr-handler"
7642 * event coalescing.
7643 *
7644 * Flush the mailbox to de-assert the IRQ immediately to prevent
7645 * spurious interrupts. The flush impacts performance but
7646 * excessive spurious interrupts can be worse in some cases.
7647 */
7648 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7649
7650 /*
7651 * In a shared interrupt configuration, sometimes other devices'
7652 * interrupts will scream. We record the current status tag here
7653 * so that the above check can report that the screaming interrupts
7654 * are unhandled. Eventually they will be silenced.
7655 */
7656 tnapi->last_irq_tag = sblk->status_tag;
7657
7658 if (tg3_irq_sync(tp))
7659 goto out;
7660
7661 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7662
7663 napi_schedule(&tnapi->napi);
7664
7665 out:
7666 return IRQ_RETVAL(handled);
7667 }
7668
7669 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7670 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7671 {
7672 struct tg3_napi *tnapi = dev_id;
7673 struct tg3 *tp = tnapi->tp;
7674 struct tg3_hw_status *sblk = tnapi->hw_status;
7675
7676 if ((sblk->status & SD_STATUS_UPDATED) ||
7677 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7678 tg3_disable_ints(tp);
7679 return IRQ_RETVAL(1);
7680 }
7681 return IRQ_RETVAL(0);
7682 }
7683
7684 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7685 static void tg3_poll_controller(struct net_device *dev)
7686 {
7687 int i;
7688 struct tg3 *tp = netdev_priv(dev);
7689
7690 if (tg3_irq_sync(tp))
7691 return;
7692
7693 for (i = 0; i < tp->irq_cnt; i++)
7694 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7695 }
7696 #endif
7697
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7698 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7699 {
7700 struct tg3 *tp = netdev_priv(dev);
7701
7702 if (netif_msg_tx_err(tp)) {
7703 netdev_err(dev, "transmit timed out, resetting\n");
7704 tg3_dump_state(tp);
7705 }
7706
7707 tg3_reset_task_schedule(tp);
7708 }
7709
7710 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7711 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7712 {
7713 u32 base = (u32) mapping & 0xffffffff;
7714
7715 return base + len + 8 < base;
7716 }
7717
7718 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7719 * of any 4GB boundaries: 4G, 8G, etc
7720 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7721 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7722 u32 len, u32 mss)
7723 {
7724 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7725 u32 base = (u32) mapping & 0xffffffff;
7726
7727 return ((base + len + (mss & 0x3fff)) < base);
7728 }
7729 return 0;
7730 }
7731
7732 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7733 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7734 int len)
7735 {
7736 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7737 if (tg3_flag(tp, 40BIT_DMA_BUG))
7738 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7739 return 0;
7740 #else
7741 return 0;
7742 #endif
7743 }
7744
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7745 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7746 dma_addr_t mapping, u32 len, u32 flags,
7747 u32 mss, u32 vlan)
7748 {
7749 txbd->addr_hi = ((u64) mapping >> 32);
7750 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7751 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7752 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7753 }
7754
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7755 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7756 dma_addr_t map, u32 len, u32 flags,
7757 u32 mss, u32 vlan)
7758 {
7759 struct tg3 *tp = tnapi->tp;
7760 bool hwbug = false;
7761
7762 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7763 hwbug = true;
7764
7765 if (tg3_4g_overflow_test(map, len))
7766 hwbug = true;
7767
7768 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7769 hwbug = true;
7770
7771 if (tg3_40bit_overflow_test(tp, map, len))
7772 hwbug = true;
7773
7774 if (tp->dma_limit) {
7775 u32 prvidx = *entry;
7776 u32 tmp_flag = flags & ~TXD_FLAG_END;
7777 while (len > tp->dma_limit && *budget) {
7778 u32 frag_len = tp->dma_limit;
7779 len -= tp->dma_limit;
7780
7781 /* Avoid the 8byte DMA problem */
7782 if (len <= 8) {
7783 len += tp->dma_limit / 2;
7784 frag_len = tp->dma_limit / 2;
7785 }
7786
7787 tnapi->tx_buffers[*entry].fragmented = true;
7788
7789 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7790 frag_len, tmp_flag, mss, vlan);
7791 *budget -= 1;
7792 prvidx = *entry;
7793 *entry = NEXT_TX(*entry);
7794
7795 map += frag_len;
7796 }
7797
7798 if (len) {
7799 if (*budget) {
7800 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7801 len, flags, mss, vlan);
7802 *budget -= 1;
7803 *entry = NEXT_TX(*entry);
7804 } else {
7805 hwbug = true;
7806 tnapi->tx_buffers[prvidx].fragmented = false;
7807 }
7808 }
7809 } else {
7810 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7811 len, flags, mss, vlan);
7812 *entry = NEXT_TX(*entry);
7813 }
7814
7815 return hwbug;
7816 }
7817
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7818 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7819 {
7820 int i;
7821 struct sk_buff *skb;
7822 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7823
7824 skb = txb->skb;
7825 txb->skb = NULL;
7826
7827 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7828 skb_headlen(skb), DMA_TO_DEVICE);
7829
7830 while (txb->fragmented) {
7831 txb->fragmented = false;
7832 entry = NEXT_TX(entry);
7833 txb = &tnapi->tx_buffers[entry];
7834 }
7835
7836 for (i = 0; i <= last; i++) {
7837 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7838
7839 entry = NEXT_TX(entry);
7840 txb = &tnapi->tx_buffers[entry];
7841
7842 dma_unmap_page(&tnapi->tp->pdev->dev,
7843 dma_unmap_addr(txb, mapping),
7844 skb_frag_size(frag), DMA_TO_DEVICE);
7845
7846 while (txb->fragmented) {
7847 txb->fragmented = false;
7848 entry = NEXT_TX(entry);
7849 txb = &tnapi->tx_buffers[entry];
7850 }
7851 }
7852 }
7853
7854 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7855 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7856 struct sk_buff **pskb,
7857 u32 *entry, u32 *budget,
7858 u32 base_flags, u32 mss, u32 vlan)
7859 {
7860 struct tg3 *tp = tnapi->tp;
7861 struct sk_buff *new_skb, *skb = *pskb;
7862 dma_addr_t new_addr = 0;
7863 int ret = 0;
7864
7865 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7866 new_skb = skb_copy(skb, GFP_ATOMIC);
7867 else {
7868 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7869
7870 new_skb = skb_copy_expand(skb,
7871 skb_headroom(skb) + more_headroom,
7872 skb_tailroom(skb), GFP_ATOMIC);
7873 }
7874
7875 if (!new_skb) {
7876 ret = -1;
7877 } else {
7878 /* New SKB is guaranteed to be linear. */
7879 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7880 new_skb->len, DMA_TO_DEVICE);
7881 /* Make sure the mapping succeeded */
7882 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7883 dev_kfree_skb_any(new_skb);
7884 ret = -1;
7885 } else {
7886 u32 save_entry = *entry;
7887
7888 base_flags |= TXD_FLAG_END;
7889
7890 tnapi->tx_buffers[*entry].skb = new_skb;
7891 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7892 mapping, new_addr);
7893
7894 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7895 new_skb->len, base_flags,
7896 mss, vlan)) {
7897 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7898 dev_kfree_skb_any(new_skb);
7899 ret = -1;
7900 }
7901 }
7902 }
7903
7904 dev_consume_skb_any(skb);
7905 *pskb = new_skb;
7906 return ret;
7907 }
7908
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7909 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7910 {
7911 /* Check if we will never have enough descriptors,
7912 * as gso_segs can be more than current ring size
7913 */
7914 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7915 }
7916
7917 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7918
7919 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7920 * indicated in tg3_tx_frag_set()
7921 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7922 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7923 struct netdev_queue *txq, struct sk_buff *skb)
7924 {
7925 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7926 struct sk_buff *segs, *seg, *next;
7927
7928 /* Estimate the number of fragments in the worst case */
7929 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7930 netif_tx_stop_queue(txq);
7931
7932 /* netif_tx_stop_queue() must be done before checking
7933 * checking tx index in tg3_tx_avail() below, because in
7934 * tg3_tx(), we update tx index before checking for
7935 * netif_tx_queue_stopped().
7936 */
7937 smp_mb();
7938 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7939 return NETDEV_TX_BUSY;
7940
7941 netif_tx_wake_queue(txq);
7942 }
7943
7944 segs = skb_gso_segment(skb, tp->dev->features &
7945 ~(NETIF_F_TSO | NETIF_F_TSO6));
7946 if (IS_ERR(segs) || !segs) {
7947 tnapi->tx_dropped++;
7948 goto tg3_tso_bug_end;
7949 }
7950
7951 skb_list_walk_safe(segs, seg, next) {
7952 skb_mark_not_on_list(seg);
7953 __tg3_start_xmit(seg, tp->dev);
7954 }
7955
7956 tg3_tso_bug_end:
7957 dev_consume_skb_any(skb);
7958
7959 return NETDEV_TX_OK;
7960 }
7961
7962 /* hard_start_xmit for all devices */
__tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7963 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7964 {
7965 struct tg3 *tp = netdev_priv(dev);
7966 u32 len, entry, base_flags, mss, vlan = 0;
7967 u32 budget;
7968 int i = -1, would_hit_hwbug;
7969 dma_addr_t mapping;
7970 struct tg3_napi *tnapi;
7971 struct netdev_queue *txq;
7972 unsigned int last;
7973 struct iphdr *iph = NULL;
7974 struct tcphdr *tcph = NULL;
7975 __sum16 tcp_csum = 0, ip_csum = 0;
7976 __be16 ip_tot_len = 0;
7977
7978 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7979 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7980 if (tg3_flag(tp, ENABLE_TSS))
7981 tnapi++;
7982
7983 budget = tg3_tx_avail(tnapi);
7984
7985 /* We are running in BH disabled context with netif_tx_lock
7986 * and TX reclaim runs via tp->napi.poll inside of a software
7987 * interrupt. Furthermore, IRQ processing runs lockless so we have
7988 * no IRQ context deadlocks to worry about either. Rejoice!
7989 */
7990 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7991 if (!netif_tx_queue_stopped(txq)) {
7992 netif_tx_stop_queue(txq);
7993
7994 /* This is a hard error, log it. */
7995 netdev_err(dev,
7996 "BUG! Tx Ring full when queue awake!\n");
7997 }
7998 return NETDEV_TX_BUSY;
7999 }
8000
8001 entry = tnapi->tx_prod;
8002 base_flags = 0;
8003
8004 mss = skb_shinfo(skb)->gso_size;
8005 if (mss) {
8006 u32 tcp_opt_len, hdr_len;
8007
8008 if (skb_cow_head(skb, 0))
8009 goto drop;
8010
8011 iph = ip_hdr(skb);
8012 tcp_opt_len = tcp_optlen(skb);
8013
8014 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
8015
8016 /* HW/FW can not correctly segment packets that have been
8017 * vlan encapsulated.
8018 */
8019 if (skb->protocol == htons(ETH_P_8021Q) ||
8020 skb->protocol == htons(ETH_P_8021AD)) {
8021 if (tg3_tso_bug_gso_check(tnapi, skb))
8022 return tg3_tso_bug(tp, tnapi, txq, skb);
8023 goto drop;
8024 }
8025
8026 if (!skb_is_gso_v6(skb)) {
8027 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8028 tg3_flag(tp, TSO_BUG)) {
8029 if (tg3_tso_bug_gso_check(tnapi, skb))
8030 return tg3_tso_bug(tp, tnapi, txq, skb);
8031 goto drop;
8032 }
8033 ip_csum = iph->check;
8034 ip_tot_len = iph->tot_len;
8035 iph->check = 0;
8036 iph->tot_len = htons(mss + hdr_len);
8037 }
8038
8039 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8040 TXD_FLAG_CPU_POST_DMA);
8041
8042 tcph = tcp_hdr(skb);
8043 tcp_csum = tcph->check;
8044
8045 if (tg3_flag(tp, HW_TSO_1) ||
8046 tg3_flag(tp, HW_TSO_2) ||
8047 tg3_flag(tp, HW_TSO_3)) {
8048 tcph->check = 0;
8049 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8050 } else {
8051 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8052 0, IPPROTO_TCP, 0);
8053 }
8054
8055 if (tg3_flag(tp, HW_TSO_3)) {
8056 mss |= (hdr_len & 0xc) << 12;
8057 if (hdr_len & 0x10)
8058 base_flags |= 0x00000010;
8059 base_flags |= (hdr_len & 0x3e0) << 5;
8060 } else if (tg3_flag(tp, HW_TSO_2))
8061 mss |= hdr_len << 9;
8062 else if (tg3_flag(tp, HW_TSO_1) ||
8063 tg3_asic_rev(tp) == ASIC_REV_5705) {
8064 if (tcp_opt_len || iph->ihl > 5) {
8065 int tsflags;
8066
8067 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8068 mss |= (tsflags << 11);
8069 }
8070 } else {
8071 if (tcp_opt_len || iph->ihl > 5) {
8072 int tsflags;
8073
8074 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8075 base_flags |= tsflags << 12;
8076 }
8077 }
8078 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8079 /* HW/FW can not correctly checksum packets that have been
8080 * vlan encapsulated.
8081 */
8082 if (skb->protocol == htons(ETH_P_8021Q) ||
8083 skb->protocol == htons(ETH_P_8021AD)) {
8084 if (skb_checksum_help(skb))
8085 goto drop;
8086 } else {
8087 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8088 }
8089 }
8090
8091 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8092 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8093 base_flags |= TXD_FLAG_JMB_PKT;
8094
8095 if (skb_vlan_tag_present(skb)) {
8096 base_flags |= TXD_FLAG_VLAN;
8097 vlan = skb_vlan_tag_get(skb);
8098 }
8099
8100 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8101 tg3_flag(tp, TX_TSTAMP_EN)) {
8102 tg3_full_lock(tp, 0);
8103 if (!tp->pre_tx_ts) {
8104 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8105 base_flags |= TXD_FLAG_HWTSTAMP;
8106 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8107 }
8108 tg3_full_unlock(tp);
8109 }
8110
8111 len = skb_headlen(skb);
8112
8113 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8114 DMA_TO_DEVICE);
8115 if (dma_mapping_error(&tp->pdev->dev, mapping))
8116 goto drop;
8117
8118
8119 tnapi->tx_buffers[entry].skb = skb;
8120 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8121
8122 would_hit_hwbug = 0;
8123
8124 if (tg3_flag(tp, 5701_DMA_BUG))
8125 would_hit_hwbug = 1;
8126
8127 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8128 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8129 mss, vlan)) {
8130 would_hit_hwbug = 1;
8131 } else if (skb_shinfo(skb)->nr_frags > 0) {
8132 u32 tmp_mss = mss;
8133
8134 if (!tg3_flag(tp, HW_TSO_1) &&
8135 !tg3_flag(tp, HW_TSO_2) &&
8136 !tg3_flag(tp, HW_TSO_3))
8137 tmp_mss = 0;
8138
8139 /* Now loop through additional data
8140 * fragments, and queue them.
8141 */
8142 last = skb_shinfo(skb)->nr_frags - 1;
8143 for (i = 0; i <= last; i++) {
8144 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8145
8146 len = skb_frag_size(frag);
8147 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8148 len, DMA_TO_DEVICE);
8149
8150 tnapi->tx_buffers[entry].skb = NULL;
8151 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8152 mapping);
8153 if (dma_mapping_error(&tp->pdev->dev, mapping))
8154 goto dma_error;
8155
8156 if (!budget ||
8157 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8158 len, base_flags |
8159 ((i == last) ? TXD_FLAG_END : 0),
8160 tmp_mss, vlan)) {
8161 would_hit_hwbug = 1;
8162 break;
8163 }
8164 }
8165 }
8166
8167 if (would_hit_hwbug) {
8168 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8169
8170 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8171 /* If it's a TSO packet, do GSO instead of
8172 * allocating and copying to a large linear SKB
8173 */
8174 if (ip_tot_len) {
8175 iph->check = ip_csum;
8176 iph->tot_len = ip_tot_len;
8177 }
8178 tcph->check = tcp_csum;
8179 return tg3_tso_bug(tp, tnapi, txq, skb);
8180 }
8181
8182 /* If the workaround fails due to memory/mapping
8183 * failure, silently drop this packet.
8184 */
8185 entry = tnapi->tx_prod;
8186 budget = tg3_tx_avail(tnapi);
8187 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8188 base_flags, mss, vlan))
8189 goto drop_nofree;
8190 }
8191
8192 skb_tx_timestamp(skb);
8193 netdev_tx_sent_queue(txq, skb->len);
8194
8195 /* Sync BD data before updating mailbox */
8196 wmb();
8197
8198 tnapi->tx_prod = entry;
8199 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8200 netif_tx_stop_queue(txq);
8201
8202 /* netif_tx_stop_queue() must be done before checking
8203 * checking tx index in tg3_tx_avail() below, because in
8204 * tg3_tx(), we update tx index before checking for
8205 * netif_tx_queue_stopped().
8206 */
8207 smp_mb();
8208 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8209 netif_tx_wake_queue(txq);
8210 }
8211
8212 return NETDEV_TX_OK;
8213
8214 dma_error:
8215 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8216 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8217 drop:
8218 dev_kfree_skb_any(skb);
8219 drop_nofree:
8220 tnapi->tx_dropped++;
8221 return NETDEV_TX_OK;
8222 }
8223
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)8224 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8225 {
8226 struct netdev_queue *txq;
8227 u16 skb_queue_mapping;
8228 netdev_tx_t ret;
8229
8230 skb_queue_mapping = skb_get_queue_mapping(skb);
8231 txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8232
8233 ret = __tg3_start_xmit(skb, dev);
8234
8235 /* Notify the hardware that packets are ready by updating the TX ring
8236 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8237 * the hardware for every packet. To guarantee forward progress the TX
8238 * ring must be drained when it is full as indicated by
8239 * netif_xmit_stopped(). This needs to happen even when the current
8240 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8241 * queued by previous __tg3_start_xmit() calls might get stuck in
8242 * the queue forever.
8243 */
8244 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8245 struct tg3_napi *tnapi;
8246 struct tg3 *tp;
8247
8248 tp = netdev_priv(dev);
8249 tnapi = &tp->napi[skb_queue_mapping];
8250
8251 if (tg3_flag(tp, ENABLE_TSS))
8252 tnapi++;
8253
8254 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8255 }
8256
8257 return ret;
8258 }
8259
tg3_mac_loopback(struct tg3 * tp,bool enable)8260 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8261 {
8262 if (enable) {
8263 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8264 MAC_MODE_PORT_MODE_MASK);
8265
8266 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8267
8268 if (!tg3_flag(tp, 5705_PLUS))
8269 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8270
8271 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8272 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8273 else
8274 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8275 } else {
8276 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8277
8278 if (tg3_flag(tp, 5705_PLUS) ||
8279 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8280 tg3_asic_rev(tp) == ASIC_REV_5700)
8281 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8282 }
8283
8284 tw32(MAC_MODE, tp->mac_mode);
8285 udelay(40);
8286 }
8287
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8288 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8289 {
8290 u32 val, bmcr, mac_mode, ptest = 0;
8291
8292 tg3_phy_toggle_apd(tp, false);
8293 tg3_phy_toggle_automdix(tp, false);
8294
8295 if (extlpbk && tg3_phy_set_extloopbk(tp))
8296 return -EIO;
8297
8298 bmcr = BMCR_FULLDPLX;
8299 switch (speed) {
8300 case SPEED_10:
8301 break;
8302 case SPEED_100:
8303 bmcr |= BMCR_SPEED100;
8304 break;
8305 case SPEED_1000:
8306 default:
8307 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8308 speed = SPEED_100;
8309 bmcr |= BMCR_SPEED100;
8310 } else {
8311 speed = SPEED_1000;
8312 bmcr |= BMCR_SPEED1000;
8313 }
8314 }
8315
8316 if (extlpbk) {
8317 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8318 tg3_readphy(tp, MII_CTRL1000, &val);
8319 val |= CTL1000_AS_MASTER |
8320 CTL1000_ENABLE_MASTER;
8321 tg3_writephy(tp, MII_CTRL1000, val);
8322 } else {
8323 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8324 MII_TG3_FET_PTEST_TRIM_2;
8325 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8326 }
8327 } else
8328 bmcr |= BMCR_LOOPBACK;
8329
8330 tg3_writephy(tp, MII_BMCR, bmcr);
8331
8332 /* The write needs to be flushed for the FETs */
8333 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8334 tg3_readphy(tp, MII_BMCR, &bmcr);
8335
8336 udelay(40);
8337
8338 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8339 tg3_asic_rev(tp) == ASIC_REV_5785) {
8340 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8341 MII_TG3_FET_PTEST_FRC_TX_LINK |
8342 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8343
8344 /* The write needs to be flushed for the AC131 */
8345 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8346 }
8347
8348 /* Reset to prevent losing 1st rx packet intermittently */
8349 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8350 tg3_flag(tp, 5780_CLASS)) {
8351 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8352 udelay(10);
8353 tw32_f(MAC_RX_MODE, tp->rx_mode);
8354 }
8355
8356 mac_mode = tp->mac_mode &
8357 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8358 if (speed == SPEED_1000)
8359 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8360 else
8361 mac_mode |= MAC_MODE_PORT_MODE_MII;
8362
8363 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8364 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8365
8366 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8367 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8368 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8369 mac_mode |= MAC_MODE_LINK_POLARITY;
8370
8371 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8372 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8373 }
8374
8375 tw32(MAC_MODE, mac_mode);
8376 udelay(40);
8377
8378 return 0;
8379 }
8380
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8381 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8382 {
8383 struct tg3 *tp = netdev_priv(dev);
8384
8385 if (features & NETIF_F_LOOPBACK) {
8386 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8387 return;
8388
8389 spin_lock_bh(&tp->lock);
8390 tg3_mac_loopback(tp, true);
8391 netif_carrier_on(tp->dev);
8392 spin_unlock_bh(&tp->lock);
8393 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8394 } else {
8395 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8396 return;
8397
8398 spin_lock_bh(&tp->lock);
8399 tg3_mac_loopback(tp, false);
8400 /* Force link status check */
8401 tg3_setup_phy(tp, true);
8402 spin_unlock_bh(&tp->lock);
8403 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8404 }
8405 }
8406
tg3_fix_features(struct net_device * dev,netdev_features_t features)8407 static netdev_features_t tg3_fix_features(struct net_device *dev,
8408 netdev_features_t features)
8409 {
8410 struct tg3 *tp = netdev_priv(dev);
8411
8412 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8413 features &= ~NETIF_F_ALL_TSO;
8414
8415 return features;
8416 }
8417
tg3_set_features(struct net_device * dev,netdev_features_t features)8418 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8419 {
8420 netdev_features_t changed = dev->features ^ features;
8421
8422 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8423 tg3_set_loopback(dev, features);
8424
8425 return 0;
8426 }
8427
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8428 static void tg3_rx_prodring_free(struct tg3 *tp,
8429 struct tg3_rx_prodring_set *tpr)
8430 {
8431 int i;
8432
8433 if (tpr != &tp->napi[0].prodring) {
8434 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8435 i = (i + 1) & tp->rx_std_ring_mask)
8436 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8437 tp->rx_pkt_map_sz);
8438
8439 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8440 for (i = tpr->rx_jmb_cons_idx;
8441 i != tpr->rx_jmb_prod_idx;
8442 i = (i + 1) & tp->rx_jmb_ring_mask) {
8443 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8444 TG3_RX_JMB_MAP_SZ);
8445 }
8446 }
8447
8448 return;
8449 }
8450
8451 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8452 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8453 tp->rx_pkt_map_sz);
8454
8455 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8456 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8457 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8458 TG3_RX_JMB_MAP_SZ);
8459 }
8460 }
8461
8462 /* Initialize rx rings for packet processing.
8463 *
8464 * The chip has been shut down and the driver detached from
8465 * the networking, so no interrupts or new tx packets will
8466 * end up in the driver. tp->{tx,}lock are held and thus
8467 * we may not sleep.
8468 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8469 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8470 struct tg3_rx_prodring_set *tpr)
8471 {
8472 u32 i, rx_pkt_dma_sz;
8473
8474 tpr->rx_std_cons_idx = 0;
8475 tpr->rx_std_prod_idx = 0;
8476 tpr->rx_jmb_cons_idx = 0;
8477 tpr->rx_jmb_prod_idx = 0;
8478
8479 if (tpr != &tp->napi[0].prodring) {
8480 memset(&tpr->rx_std_buffers[0], 0,
8481 TG3_RX_STD_BUFF_RING_SIZE(tp));
8482 if (tpr->rx_jmb_buffers)
8483 memset(&tpr->rx_jmb_buffers[0], 0,
8484 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8485 goto done;
8486 }
8487
8488 /* Zero out all descriptors. */
8489 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8490
8491 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8492 if (tg3_flag(tp, 5780_CLASS) &&
8493 tp->dev->mtu > ETH_DATA_LEN)
8494 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8495 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8496
8497 /* Initialize invariants of the rings, we only set this
8498 * stuff once. This works because the card does not
8499 * write into the rx buffer posting rings.
8500 */
8501 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8502 struct tg3_rx_buffer_desc *rxd;
8503
8504 rxd = &tpr->rx_std[i];
8505 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8506 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8507 rxd->opaque = (RXD_OPAQUE_RING_STD |
8508 (i << RXD_OPAQUE_INDEX_SHIFT));
8509 }
8510
8511 /* Now allocate fresh SKBs for each rx ring. */
8512 for (i = 0; i < tp->rx_pending; i++) {
8513 unsigned int frag_size;
8514
8515 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8516 &frag_size) < 0) {
8517 netdev_warn(tp->dev,
8518 "Using a smaller RX standard ring. Only "
8519 "%d out of %d buffers were allocated "
8520 "successfully\n", i, tp->rx_pending);
8521 if (i == 0)
8522 goto initfail;
8523 tp->rx_pending = i;
8524 break;
8525 }
8526 }
8527
8528 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8529 goto done;
8530
8531 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8532
8533 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8534 goto done;
8535
8536 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8537 struct tg3_rx_buffer_desc *rxd;
8538
8539 rxd = &tpr->rx_jmb[i].std;
8540 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8541 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8542 RXD_FLAG_JUMBO;
8543 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8544 (i << RXD_OPAQUE_INDEX_SHIFT));
8545 }
8546
8547 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8548 unsigned int frag_size;
8549
8550 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8551 &frag_size) < 0) {
8552 netdev_warn(tp->dev,
8553 "Using a smaller RX jumbo ring. Only %d "
8554 "out of %d buffers were allocated "
8555 "successfully\n", i, tp->rx_jumbo_pending);
8556 if (i == 0)
8557 goto initfail;
8558 tp->rx_jumbo_pending = i;
8559 break;
8560 }
8561 }
8562
8563 done:
8564 return 0;
8565
8566 initfail:
8567 tg3_rx_prodring_free(tp, tpr);
8568 return -ENOMEM;
8569 }
8570
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8571 static void tg3_rx_prodring_fini(struct tg3 *tp,
8572 struct tg3_rx_prodring_set *tpr)
8573 {
8574 kfree(tpr->rx_std_buffers);
8575 tpr->rx_std_buffers = NULL;
8576 kfree(tpr->rx_jmb_buffers);
8577 tpr->rx_jmb_buffers = NULL;
8578 if (tpr->rx_std) {
8579 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8580 tpr->rx_std, tpr->rx_std_mapping);
8581 tpr->rx_std = NULL;
8582 }
8583 if (tpr->rx_jmb) {
8584 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8585 tpr->rx_jmb, tpr->rx_jmb_mapping);
8586 tpr->rx_jmb = NULL;
8587 }
8588 }
8589
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8590 static int tg3_rx_prodring_init(struct tg3 *tp,
8591 struct tg3_rx_prodring_set *tpr)
8592 {
8593 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8594 GFP_KERNEL);
8595 if (!tpr->rx_std_buffers)
8596 return -ENOMEM;
8597
8598 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8599 TG3_RX_STD_RING_BYTES(tp),
8600 &tpr->rx_std_mapping,
8601 GFP_KERNEL);
8602 if (!tpr->rx_std)
8603 goto err_out;
8604
8605 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8606 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8607 GFP_KERNEL);
8608 if (!tpr->rx_jmb_buffers)
8609 goto err_out;
8610
8611 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8612 TG3_RX_JMB_RING_BYTES(tp),
8613 &tpr->rx_jmb_mapping,
8614 GFP_KERNEL);
8615 if (!tpr->rx_jmb)
8616 goto err_out;
8617 }
8618
8619 return 0;
8620
8621 err_out:
8622 tg3_rx_prodring_fini(tp, tpr);
8623 return -ENOMEM;
8624 }
8625
8626 /* Free up pending packets in all rx/tx rings.
8627 *
8628 * The chip has been shut down and the driver detached from
8629 * the networking, so no interrupts or new tx packets will
8630 * end up in the driver. tp->{tx,}lock is not held and we are not
8631 * in an interrupt context and thus may sleep.
8632 */
tg3_free_rings(struct tg3 * tp)8633 static void tg3_free_rings(struct tg3 *tp)
8634 {
8635 int i, j;
8636
8637 for (j = 0; j < tp->irq_cnt; j++) {
8638 struct tg3_napi *tnapi = &tp->napi[j];
8639
8640 tg3_rx_prodring_free(tp, &tnapi->prodring);
8641
8642 if (!tnapi->tx_buffers)
8643 continue;
8644
8645 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8646 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8647
8648 if (!skb)
8649 continue;
8650
8651 tg3_tx_skb_unmap(tnapi, i,
8652 skb_shinfo(skb)->nr_frags - 1);
8653
8654 dev_consume_skb_any(skb);
8655 }
8656 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8657 }
8658 }
8659
8660 /* Initialize tx/rx rings for packet processing.
8661 *
8662 * The chip has been shut down and the driver detached from
8663 * the networking, so no interrupts or new tx packets will
8664 * end up in the driver. tp->{tx,}lock are held and thus
8665 * we may not sleep.
8666 */
tg3_init_rings(struct tg3 * tp)8667 static int tg3_init_rings(struct tg3 *tp)
8668 {
8669 int i;
8670
8671 /* Free up all the SKBs. */
8672 tg3_free_rings(tp);
8673
8674 for (i = 0; i < tp->irq_cnt; i++) {
8675 struct tg3_napi *tnapi = &tp->napi[i];
8676
8677 tnapi->last_tag = 0;
8678 tnapi->last_irq_tag = 0;
8679 tnapi->hw_status->status = 0;
8680 tnapi->hw_status->status_tag = 0;
8681 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8682
8683 tnapi->tx_prod = 0;
8684 tnapi->tx_cons = 0;
8685 if (tnapi->tx_ring)
8686 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8687
8688 tnapi->rx_rcb_ptr = 0;
8689 if (tnapi->rx_rcb)
8690 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8691
8692 if (tnapi->prodring.rx_std &&
8693 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8694 tg3_free_rings(tp);
8695 return -ENOMEM;
8696 }
8697 }
8698
8699 return 0;
8700 }
8701
tg3_mem_tx_release(struct tg3 * tp)8702 static void tg3_mem_tx_release(struct tg3 *tp)
8703 {
8704 int i;
8705
8706 for (i = 0; i < tp->irq_max; i++) {
8707 struct tg3_napi *tnapi = &tp->napi[i];
8708
8709 if (tnapi->tx_ring) {
8710 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8711 tnapi->tx_ring, tnapi->tx_desc_mapping);
8712 tnapi->tx_ring = NULL;
8713 }
8714
8715 kfree(tnapi->tx_buffers);
8716 tnapi->tx_buffers = NULL;
8717 }
8718 }
8719
tg3_mem_tx_acquire(struct tg3 * tp)8720 static int tg3_mem_tx_acquire(struct tg3 *tp)
8721 {
8722 int i;
8723 struct tg3_napi *tnapi = &tp->napi[0];
8724
8725 /* If multivector TSS is enabled, vector 0 does not handle
8726 * tx interrupts. Don't allocate any resources for it.
8727 */
8728 if (tg3_flag(tp, ENABLE_TSS))
8729 tnapi++;
8730
8731 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8732 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8733 sizeof(struct tg3_tx_ring_info),
8734 GFP_KERNEL);
8735 if (!tnapi->tx_buffers)
8736 goto err_out;
8737
8738 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8739 TG3_TX_RING_BYTES,
8740 &tnapi->tx_desc_mapping,
8741 GFP_KERNEL);
8742 if (!tnapi->tx_ring)
8743 goto err_out;
8744 }
8745
8746 return 0;
8747
8748 err_out:
8749 tg3_mem_tx_release(tp);
8750 return -ENOMEM;
8751 }
8752
tg3_mem_rx_release(struct tg3 * tp)8753 static void tg3_mem_rx_release(struct tg3 *tp)
8754 {
8755 int i;
8756
8757 for (i = 0; i < tp->irq_max; i++) {
8758 struct tg3_napi *tnapi = &tp->napi[i];
8759
8760 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8761
8762 if (!tnapi->rx_rcb)
8763 continue;
8764
8765 dma_free_coherent(&tp->pdev->dev,
8766 TG3_RX_RCB_RING_BYTES(tp),
8767 tnapi->rx_rcb,
8768 tnapi->rx_rcb_mapping);
8769 tnapi->rx_rcb = NULL;
8770 }
8771 }
8772
tg3_mem_rx_acquire(struct tg3 * tp)8773 static int tg3_mem_rx_acquire(struct tg3 *tp)
8774 {
8775 unsigned int i, limit;
8776
8777 limit = tp->rxq_cnt;
8778
8779 /* If RSS is enabled, we need a (dummy) producer ring
8780 * set on vector zero. This is the true hw prodring.
8781 */
8782 if (tg3_flag(tp, ENABLE_RSS))
8783 limit++;
8784
8785 for (i = 0; i < limit; i++) {
8786 struct tg3_napi *tnapi = &tp->napi[i];
8787
8788 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8789 goto err_out;
8790
8791 /* If multivector RSS is enabled, vector 0
8792 * does not handle rx or tx interrupts.
8793 * Don't allocate any resources for it.
8794 */
8795 if (!i && tg3_flag(tp, ENABLE_RSS))
8796 continue;
8797
8798 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8799 TG3_RX_RCB_RING_BYTES(tp),
8800 &tnapi->rx_rcb_mapping,
8801 GFP_KERNEL);
8802 if (!tnapi->rx_rcb)
8803 goto err_out;
8804 }
8805
8806 return 0;
8807
8808 err_out:
8809 tg3_mem_rx_release(tp);
8810 return -ENOMEM;
8811 }
8812
8813 /*
8814 * Must not be invoked with interrupt sources disabled and
8815 * the hardware shutdown down.
8816 */
tg3_free_consistent(struct tg3 * tp)8817 static void tg3_free_consistent(struct tg3 *tp)
8818 {
8819 int i;
8820
8821 for (i = 0; i < tp->irq_cnt; i++) {
8822 struct tg3_napi *tnapi = &tp->napi[i];
8823
8824 if (tnapi->hw_status) {
8825 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8826 tnapi->hw_status,
8827 tnapi->status_mapping);
8828 tnapi->hw_status = NULL;
8829 }
8830 }
8831
8832 tg3_mem_rx_release(tp);
8833 tg3_mem_tx_release(tp);
8834
8835 /* tp->hw_stats can be referenced safely:
8836 * 1. under rtnl_lock
8837 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8838 */
8839 if (tp->hw_stats) {
8840 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8841 tp->hw_stats, tp->stats_mapping);
8842 tp->hw_stats = NULL;
8843 }
8844 }
8845
8846 /*
8847 * Must not be invoked with interrupt sources disabled and
8848 * the hardware shutdown down. Can sleep.
8849 */
tg3_alloc_consistent(struct tg3 * tp)8850 static int tg3_alloc_consistent(struct tg3 *tp)
8851 {
8852 int i;
8853
8854 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8855 sizeof(struct tg3_hw_stats),
8856 &tp->stats_mapping, GFP_KERNEL);
8857 if (!tp->hw_stats)
8858 goto err_out;
8859
8860 for (i = 0; i < tp->irq_cnt; i++) {
8861 struct tg3_napi *tnapi = &tp->napi[i];
8862 struct tg3_hw_status *sblk;
8863
8864 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8865 TG3_HW_STATUS_SIZE,
8866 &tnapi->status_mapping,
8867 GFP_KERNEL);
8868 if (!tnapi->hw_status)
8869 goto err_out;
8870
8871 sblk = tnapi->hw_status;
8872
8873 if (tg3_flag(tp, ENABLE_RSS)) {
8874 u16 *prodptr = NULL;
8875
8876 /*
8877 * When RSS is enabled, the status block format changes
8878 * slightly. The "rx_jumbo_consumer", "reserved",
8879 * and "rx_mini_consumer" members get mapped to the
8880 * other three rx return ring producer indexes.
8881 */
8882 switch (i) {
8883 case 1:
8884 prodptr = &sblk->idx[0].rx_producer;
8885 break;
8886 case 2:
8887 prodptr = &sblk->rx_jumbo_consumer;
8888 break;
8889 case 3:
8890 prodptr = &sblk->reserved;
8891 break;
8892 case 4:
8893 prodptr = &sblk->rx_mini_consumer;
8894 break;
8895 }
8896 tnapi->rx_rcb_prod_idx = prodptr;
8897 } else {
8898 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8899 }
8900 }
8901
8902 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8903 goto err_out;
8904
8905 return 0;
8906
8907 err_out:
8908 tg3_free_consistent(tp);
8909 return -ENOMEM;
8910 }
8911
8912 #define MAX_WAIT_CNT 1000
8913
8914 /* To stop a block, clear the enable bit and poll till it
8915 * clears. tp->lock is held.
8916 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8917 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8918 {
8919 unsigned int i;
8920 u32 val;
8921
8922 if (tg3_flag(tp, 5705_PLUS)) {
8923 switch (ofs) {
8924 case RCVLSC_MODE:
8925 case DMAC_MODE:
8926 case MBFREE_MODE:
8927 case BUFMGR_MODE:
8928 case MEMARB_MODE:
8929 /* We can't enable/disable these bits of the
8930 * 5705/5750, just say success.
8931 */
8932 return 0;
8933
8934 default:
8935 break;
8936 }
8937 }
8938
8939 val = tr32(ofs);
8940 val &= ~enable_bit;
8941 tw32_f(ofs, val);
8942
8943 for (i = 0; i < MAX_WAIT_CNT; i++) {
8944 if (pci_channel_offline(tp->pdev)) {
8945 dev_err(&tp->pdev->dev,
8946 "tg3_stop_block device offline, "
8947 "ofs=%lx enable_bit=%x\n",
8948 ofs, enable_bit);
8949 return -ENODEV;
8950 }
8951
8952 udelay(100);
8953 val = tr32(ofs);
8954 if ((val & enable_bit) == 0)
8955 break;
8956 }
8957
8958 if (i == MAX_WAIT_CNT && !silent) {
8959 dev_err(&tp->pdev->dev,
8960 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8961 ofs, enable_bit);
8962 return -ENODEV;
8963 }
8964
8965 return 0;
8966 }
8967
8968 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8969 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8970 {
8971 int i, err;
8972
8973 tg3_disable_ints(tp);
8974
8975 if (pci_channel_offline(tp->pdev)) {
8976 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8977 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8978 err = -ENODEV;
8979 goto err_no_dev;
8980 }
8981
8982 tp->rx_mode &= ~RX_MODE_ENABLE;
8983 tw32_f(MAC_RX_MODE, tp->rx_mode);
8984 udelay(10);
8985
8986 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8987 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8988 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8989 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8990 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8991 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8992
8993 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8994 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8995 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8996 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8997 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8998 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8999 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
9000
9001 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
9002 tw32_f(MAC_MODE, tp->mac_mode);
9003 udelay(40);
9004
9005 tp->tx_mode &= ~TX_MODE_ENABLE;
9006 tw32_f(MAC_TX_MODE, tp->tx_mode);
9007
9008 for (i = 0; i < MAX_WAIT_CNT; i++) {
9009 udelay(100);
9010 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
9011 break;
9012 }
9013 if (i >= MAX_WAIT_CNT) {
9014 dev_err(&tp->pdev->dev,
9015 "%s timed out, TX_MODE_ENABLE will not clear "
9016 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
9017 err |= -ENODEV;
9018 }
9019
9020 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
9021 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
9022 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
9023
9024 tw32(FTQ_RESET, 0xffffffff);
9025 tw32(FTQ_RESET, 0x00000000);
9026
9027 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9028 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9029
9030 err_no_dev:
9031 for (i = 0; i < tp->irq_cnt; i++) {
9032 struct tg3_napi *tnapi = &tp->napi[i];
9033 if (tnapi->hw_status)
9034 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9035 }
9036
9037 return err;
9038 }
9039
9040 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)9041 static void tg3_save_pci_state(struct tg3 *tp)
9042 {
9043 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9044 }
9045
9046 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)9047 static void tg3_restore_pci_state(struct tg3 *tp)
9048 {
9049 u32 val;
9050
9051 /* Re-enable indirect register accesses. */
9052 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9053 tp->misc_host_ctrl);
9054
9055 /* Set MAX PCI retry to zero. */
9056 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9057 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9058 tg3_flag(tp, PCIX_MODE))
9059 val |= PCISTATE_RETRY_SAME_DMA;
9060 /* Allow reads and writes to the APE register and memory space. */
9061 if (tg3_flag(tp, ENABLE_APE))
9062 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9063 PCISTATE_ALLOW_APE_SHMEM_WR |
9064 PCISTATE_ALLOW_APE_PSPACE_WR;
9065 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9066
9067 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9068
9069 if (!tg3_flag(tp, PCI_EXPRESS)) {
9070 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9071 tp->pci_cacheline_sz);
9072 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9073 tp->pci_lat_timer);
9074 }
9075
9076 /* Make sure PCI-X relaxed ordering bit is clear. */
9077 if (tg3_flag(tp, PCIX_MODE)) {
9078 u16 pcix_cmd;
9079
9080 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9081 &pcix_cmd);
9082 pcix_cmd &= ~PCI_X_CMD_ERO;
9083 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9084 pcix_cmd);
9085 }
9086
9087 if (tg3_flag(tp, 5780_CLASS)) {
9088
9089 /* Chip reset on 5780 will reset MSI enable bit,
9090 * so need to restore it.
9091 */
9092 if (tg3_flag(tp, USING_MSI)) {
9093 u16 ctrl;
9094
9095 pci_read_config_word(tp->pdev,
9096 tp->msi_cap + PCI_MSI_FLAGS,
9097 &ctrl);
9098 pci_write_config_word(tp->pdev,
9099 tp->msi_cap + PCI_MSI_FLAGS,
9100 ctrl | PCI_MSI_FLAGS_ENABLE);
9101 val = tr32(MSGINT_MODE);
9102 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9103 }
9104 }
9105 }
9106
tg3_override_clk(struct tg3 * tp)9107 static void tg3_override_clk(struct tg3 *tp)
9108 {
9109 u32 val;
9110
9111 switch (tg3_asic_rev(tp)) {
9112 case ASIC_REV_5717:
9113 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9114 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9115 TG3_CPMU_MAC_ORIDE_ENABLE);
9116 break;
9117
9118 case ASIC_REV_5719:
9119 case ASIC_REV_5720:
9120 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9121 break;
9122
9123 default:
9124 return;
9125 }
9126 }
9127
tg3_restore_clk(struct tg3 * tp)9128 static void tg3_restore_clk(struct tg3 *tp)
9129 {
9130 u32 val;
9131
9132 switch (tg3_asic_rev(tp)) {
9133 case ASIC_REV_5717:
9134 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9135 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9136 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9137 break;
9138
9139 case ASIC_REV_5719:
9140 case ASIC_REV_5720:
9141 val = tr32(TG3_CPMU_CLCK_ORIDE);
9142 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9143 break;
9144
9145 default:
9146 return;
9147 }
9148 }
9149
9150 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9151 static int tg3_chip_reset(struct tg3 *tp)
9152 __releases(tp->lock)
9153 __acquires(tp->lock)
9154 {
9155 u32 val;
9156 void (*write_op)(struct tg3 *, u32, u32);
9157 int i, err;
9158
9159 if (!pci_device_is_present(tp->pdev))
9160 return -ENODEV;
9161
9162 tg3_nvram_lock(tp);
9163
9164 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9165
9166 /* No matching tg3_nvram_unlock() after this because
9167 * chip reset below will undo the nvram lock.
9168 */
9169 tp->nvram_lock_cnt = 0;
9170
9171 /* GRC_MISC_CFG core clock reset will clear the memory
9172 * enable bit in PCI register 4 and the MSI enable bit
9173 * on some chips, so we save relevant registers here.
9174 */
9175 tg3_save_pci_state(tp);
9176
9177 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9178 tg3_flag(tp, 5755_PLUS))
9179 tw32(GRC_FASTBOOT_PC, 0);
9180
9181 /*
9182 * We must avoid the readl() that normally takes place.
9183 * It locks machines, causes machine checks, and other
9184 * fun things. So, temporarily disable the 5701
9185 * hardware workaround, while we do the reset.
9186 */
9187 write_op = tp->write32;
9188 if (write_op == tg3_write_flush_reg32)
9189 tp->write32 = tg3_write32;
9190
9191 /* Prevent the irq handler from reading or writing PCI registers
9192 * during chip reset when the memory enable bit in the PCI command
9193 * register may be cleared. The chip does not generate interrupt
9194 * at this time, but the irq handler may still be called due to irq
9195 * sharing or irqpoll.
9196 */
9197 tg3_flag_set(tp, CHIP_RESETTING);
9198 for (i = 0; i < tp->irq_cnt; i++) {
9199 struct tg3_napi *tnapi = &tp->napi[i];
9200 if (tnapi->hw_status) {
9201 tnapi->hw_status->status = 0;
9202 tnapi->hw_status->status_tag = 0;
9203 }
9204 tnapi->last_tag = 0;
9205 tnapi->last_irq_tag = 0;
9206 }
9207 smp_mb();
9208
9209 tg3_full_unlock(tp);
9210
9211 for (i = 0; i < tp->irq_cnt; i++)
9212 synchronize_irq(tp->napi[i].irq_vec);
9213
9214 tg3_full_lock(tp, 0);
9215
9216 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9217 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9218 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9219 }
9220
9221 /* do the reset */
9222 val = GRC_MISC_CFG_CORECLK_RESET;
9223
9224 if (tg3_flag(tp, PCI_EXPRESS)) {
9225 /* Force PCIe 1.0a mode */
9226 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9227 !tg3_flag(tp, 57765_PLUS) &&
9228 tr32(TG3_PCIE_PHY_TSTCTL) ==
9229 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9230 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9231
9232 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9233 tw32(GRC_MISC_CFG, (1 << 29));
9234 val |= (1 << 29);
9235 }
9236 }
9237
9238 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9239 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9240 tw32(GRC_VCPU_EXT_CTRL,
9241 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9242 }
9243
9244 /* Set the clock to the highest frequency to avoid timeouts. With link
9245 * aware mode, the clock speed could be slow and bootcode does not
9246 * complete within the expected time. Override the clock to allow the
9247 * bootcode to finish sooner and then restore it.
9248 */
9249 tg3_override_clk(tp);
9250
9251 /* Manage gphy power for all CPMU absent PCIe devices. */
9252 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9253 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9254
9255 tw32(GRC_MISC_CFG, val);
9256
9257 /* restore 5701 hardware bug workaround write method */
9258 tp->write32 = write_op;
9259
9260 /* Unfortunately, we have to delay before the PCI read back.
9261 * Some 575X chips even will not respond to a PCI cfg access
9262 * when the reset command is given to the chip.
9263 *
9264 * How do these hardware designers expect things to work
9265 * properly if the PCI write is posted for a long period
9266 * of time? It is always necessary to have some method by
9267 * which a register read back can occur to push the write
9268 * out which does the reset.
9269 *
9270 * For most tg3 variants the trick below was working.
9271 * Ho hum...
9272 */
9273 udelay(120);
9274
9275 /* Flush PCI posted writes. The normal MMIO registers
9276 * are inaccessible at this time so this is the only
9277 * way to make this reliably (actually, this is no longer
9278 * the case, see above). I tried to use indirect
9279 * register read/write but this upset some 5701 variants.
9280 */
9281 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9282
9283 udelay(120);
9284
9285 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9286 u16 val16;
9287
9288 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9289 int j;
9290 u32 cfg_val;
9291
9292 /* Wait for link training to complete. */
9293 for (j = 0; j < 5000; j++)
9294 udelay(100);
9295
9296 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9297 pci_write_config_dword(tp->pdev, 0xc4,
9298 cfg_val | (1 << 15));
9299 }
9300
9301 /* Clear the "no snoop" and "relaxed ordering" bits. */
9302 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9303 /*
9304 * Older PCIe devices only support the 128 byte
9305 * MPS setting. Enforce the restriction.
9306 */
9307 if (!tg3_flag(tp, CPMU_PRESENT))
9308 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9309 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9310
9311 /* Clear error status */
9312 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9313 PCI_EXP_DEVSTA_CED |
9314 PCI_EXP_DEVSTA_NFED |
9315 PCI_EXP_DEVSTA_FED |
9316 PCI_EXP_DEVSTA_URD);
9317 }
9318
9319 tg3_restore_pci_state(tp);
9320
9321 tg3_flag_clear(tp, CHIP_RESETTING);
9322 tg3_flag_clear(tp, ERROR_PROCESSED);
9323
9324 val = 0;
9325 if (tg3_flag(tp, 5780_CLASS))
9326 val = tr32(MEMARB_MODE);
9327 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9328
9329 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9330 tg3_stop_fw(tp);
9331 tw32(0x5000, 0x400);
9332 }
9333
9334 if (tg3_flag(tp, IS_SSB_CORE)) {
9335 /*
9336 * BCM4785: In order to avoid repercussions from using
9337 * potentially defective internal ROM, stop the Rx RISC CPU,
9338 * which is not required.
9339 */
9340 tg3_stop_fw(tp);
9341 tg3_halt_cpu(tp, RX_CPU_BASE);
9342 }
9343
9344 err = tg3_poll_fw(tp);
9345 if (err)
9346 return err;
9347
9348 tw32(GRC_MODE, tp->grc_mode);
9349
9350 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9351 val = tr32(0xc4);
9352
9353 tw32(0xc4, val | (1 << 15));
9354 }
9355
9356 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9357 tg3_asic_rev(tp) == ASIC_REV_5705) {
9358 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9359 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9360 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9361 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9362 }
9363
9364 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9365 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9366 val = tp->mac_mode;
9367 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9368 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9369 val = tp->mac_mode;
9370 } else
9371 val = 0;
9372
9373 tw32_f(MAC_MODE, val);
9374 udelay(40);
9375
9376 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9377
9378 tg3_mdio_start(tp);
9379
9380 if (tg3_flag(tp, PCI_EXPRESS) &&
9381 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9382 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9383 !tg3_flag(tp, 57765_PLUS)) {
9384 val = tr32(0x7c00);
9385
9386 tw32(0x7c00, val | (1 << 25));
9387 }
9388
9389 tg3_restore_clk(tp);
9390
9391 /* Increase the core clock speed to fix tx timeout issue for 5762
9392 * with 100Mbps link speed.
9393 */
9394 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9395 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9396 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9397 TG3_CPMU_MAC_ORIDE_ENABLE);
9398 }
9399
9400 /* Reprobe ASF enable state. */
9401 tg3_flag_clear(tp, ENABLE_ASF);
9402 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9403 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9404
9405 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9406 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9407 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9408 u32 nic_cfg;
9409
9410 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9411 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9412 tg3_flag_set(tp, ENABLE_ASF);
9413 tp->last_event_jiffies = jiffies;
9414 if (tg3_flag(tp, 5750_PLUS))
9415 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9416
9417 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9418 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9419 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9420 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9421 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9422 }
9423 }
9424
9425 return 0;
9426 }
9427
9428 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9429 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9430 static void __tg3_set_rx_mode(struct net_device *);
9431
9432 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9433 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9434 {
9435 int err, i;
9436
9437 tg3_stop_fw(tp);
9438
9439 tg3_write_sig_pre_reset(tp, kind);
9440
9441 tg3_abort_hw(tp, silent);
9442 err = tg3_chip_reset(tp);
9443
9444 __tg3_set_mac_addr(tp, false);
9445
9446 tg3_write_sig_legacy(tp, kind);
9447 tg3_write_sig_post_reset(tp, kind);
9448
9449 if (tp->hw_stats) {
9450 /* Save the stats across chip resets... */
9451 tg3_get_nstats(tp, &tp->net_stats_prev);
9452 tg3_get_estats(tp, &tp->estats_prev);
9453
9454 /* And make sure the next sample is new data */
9455 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9456
9457 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9458 struct tg3_napi *tnapi = &tp->napi[i];
9459
9460 tnapi->rx_dropped = 0;
9461 tnapi->tx_dropped = 0;
9462 }
9463 }
9464
9465 return err;
9466 }
9467
tg3_set_mac_addr(struct net_device * dev,void * p)9468 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9469 {
9470 struct tg3 *tp = netdev_priv(dev);
9471 struct sockaddr *addr = p;
9472 int err = 0;
9473 bool skip_mac_1 = false;
9474
9475 if (!is_valid_ether_addr(addr->sa_data))
9476 return -EADDRNOTAVAIL;
9477
9478 eth_hw_addr_set(dev, addr->sa_data);
9479
9480 if (!netif_running(dev))
9481 return 0;
9482
9483 if (tg3_flag(tp, ENABLE_ASF)) {
9484 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9485
9486 addr0_high = tr32(MAC_ADDR_0_HIGH);
9487 addr0_low = tr32(MAC_ADDR_0_LOW);
9488 addr1_high = tr32(MAC_ADDR_1_HIGH);
9489 addr1_low = tr32(MAC_ADDR_1_LOW);
9490
9491 /* Skip MAC addr 1 if ASF is using it. */
9492 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9493 !(addr1_high == 0 && addr1_low == 0))
9494 skip_mac_1 = true;
9495 }
9496 spin_lock_bh(&tp->lock);
9497 __tg3_set_mac_addr(tp, skip_mac_1);
9498 __tg3_set_rx_mode(dev);
9499 spin_unlock_bh(&tp->lock);
9500
9501 return err;
9502 }
9503
9504 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9505 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9506 dma_addr_t mapping, u32 maxlen_flags,
9507 u32 nic_addr)
9508 {
9509 tg3_write_mem(tp,
9510 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9511 ((u64) mapping >> 32));
9512 tg3_write_mem(tp,
9513 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9514 ((u64) mapping & 0xffffffff));
9515 tg3_write_mem(tp,
9516 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9517 maxlen_flags);
9518
9519 if (!tg3_flag(tp, 5705_PLUS))
9520 tg3_write_mem(tp,
9521 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9522 nic_addr);
9523 }
9524
9525
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9526 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9527 {
9528 int i = 0;
9529
9530 if (!tg3_flag(tp, ENABLE_TSS)) {
9531 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9532 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9533 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9534 } else {
9535 tw32(HOSTCC_TXCOL_TICKS, 0);
9536 tw32(HOSTCC_TXMAX_FRAMES, 0);
9537 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9538
9539 for (; i < tp->txq_cnt; i++) {
9540 u32 reg;
9541
9542 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9543 tw32(reg, ec->tx_coalesce_usecs);
9544 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9545 tw32(reg, ec->tx_max_coalesced_frames);
9546 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9547 tw32(reg, ec->tx_max_coalesced_frames_irq);
9548 }
9549 }
9550
9551 for (; i < tp->irq_max - 1; i++) {
9552 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9553 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9554 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9555 }
9556 }
9557
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9558 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9559 {
9560 int i = 0;
9561 u32 limit = tp->rxq_cnt;
9562
9563 if (!tg3_flag(tp, ENABLE_RSS)) {
9564 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9565 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9566 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9567 limit--;
9568 } else {
9569 tw32(HOSTCC_RXCOL_TICKS, 0);
9570 tw32(HOSTCC_RXMAX_FRAMES, 0);
9571 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9572 }
9573
9574 for (; i < limit; i++) {
9575 u32 reg;
9576
9577 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9578 tw32(reg, ec->rx_coalesce_usecs);
9579 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9580 tw32(reg, ec->rx_max_coalesced_frames);
9581 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9582 tw32(reg, ec->rx_max_coalesced_frames_irq);
9583 }
9584
9585 for (; i < tp->irq_max - 1; i++) {
9586 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9587 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9588 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9589 }
9590 }
9591
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9592 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9593 {
9594 tg3_coal_tx_init(tp, ec);
9595 tg3_coal_rx_init(tp, ec);
9596
9597 if (!tg3_flag(tp, 5705_PLUS)) {
9598 u32 val = ec->stats_block_coalesce_usecs;
9599
9600 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9601 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9602
9603 if (!tp->link_up)
9604 val = 0;
9605
9606 tw32(HOSTCC_STAT_COAL_TICKS, val);
9607 }
9608 }
9609
9610 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9611 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9612 {
9613 u32 txrcb, limit;
9614
9615 /* Disable all transmit rings but the first. */
9616 if (!tg3_flag(tp, 5705_PLUS))
9617 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9618 else if (tg3_flag(tp, 5717_PLUS))
9619 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9620 else if (tg3_flag(tp, 57765_CLASS) ||
9621 tg3_asic_rev(tp) == ASIC_REV_5762)
9622 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9623 else
9624 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9625
9626 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9627 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9628 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9629 BDINFO_FLAGS_DISABLED);
9630 }
9631
9632 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9633 static void tg3_tx_rcbs_init(struct tg3 *tp)
9634 {
9635 int i = 0;
9636 u32 txrcb = NIC_SRAM_SEND_RCB;
9637
9638 if (tg3_flag(tp, ENABLE_TSS))
9639 i++;
9640
9641 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9642 struct tg3_napi *tnapi = &tp->napi[i];
9643
9644 if (!tnapi->tx_ring)
9645 continue;
9646
9647 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9648 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9649 NIC_SRAM_TX_BUFFER_DESC);
9650 }
9651 }
9652
9653 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9654 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9655 {
9656 u32 rxrcb, limit;
9657
9658 /* Disable all receive return rings but the first. */
9659 if (tg3_flag(tp, 5717_PLUS))
9660 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9661 else if (!tg3_flag(tp, 5705_PLUS))
9662 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9663 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9664 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9665 tg3_flag(tp, 57765_CLASS))
9666 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9667 else
9668 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9669
9670 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9671 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9672 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9673 BDINFO_FLAGS_DISABLED);
9674 }
9675
9676 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9677 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9678 {
9679 int i = 0;
9680 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9681
9682 if (tg3_flag(tp, ENABLE_RSS))
9683 i++;
9684
9685 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9686 struct tg3_napi *tnapi = &tp->napi[i];
9687
9688 if (!tnapi->rx_rcb)
9689 continue;
9690
9691 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9692 (tp->rx_ret_ring_mask + 1) <<
9693 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9694 }
9695 }
9696
9697 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9698 static void tg3_rings_reset(struct tg3 *tp)
9699 {
9700 int i;
9701 u32 stblk;
9702 struct tg3_napi *tnapi = &tp->napi[0];
9703
9704 tg3_tx_rcbs_disable(tp);
9705
9706 tg3_rx_ret_rcbs_disable(tp);
9707
9708 /* Disable interrupts */
9709 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9710 tp->napi[0].chk_msi_cnt = 0;
9711 tp->napi[0].last_rx_cons = 0;
9712 tp->napi[0].last_tx_cons = 0;
9713
9714 /* Zero mailbox registers. */
9715 if (tg3_flag(tp, SUPPORT_MSIX)) {
9716 for (i = 1; i < tp->irq_max; i++) {
9717 tp->napi[i].tx_prod = 0;
9718 tp->napi[i].tx_cons = 0;
9719 if (tg3_flag(tp, ENABLE_TSS))
9720 tw32_mailbox(tp->napi[i].prodmbox, 0);
9721 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9722 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9723 tp->napi[i].chk_msi_cnt = 0;
9724 tp->napi[i].last_rx_cons = 0;
9725 tp->napi[i].last_tx_cons = 0;
9726 }
9727 if (!tg3_flag(tp, ENABLE_TSS))
9728 tw32_mailbox(tp->napi[0].prodmbox, 0);
9729 } else {
9730 tp->napi[0].tx_prod = 0;
9731 tp->napi[0].tx_cons = 0;
9732 tw32_mailbox(tp->napi[0].prodmbox, 0);
9733 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9734 }
9735
9736 /* Make sure the NIC-based send BD rings are disabled. */
9737 if (!tg3_flag(tp, 5705_PLUS)) {
9738 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9739 for (i = 0; i < 16; i++)
9740 tw32_tx_mbox(mbox + i * 8, 0);
9741 }
9742
9743 /* Clear status block in ram. */
9744 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9745
9746 /* Set status block DMA address */
9747 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9748 ((u64) tnapi->status_mapping >> 32));
9749 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9750 ((u64) tnapi->status_mapping & 0xffffffff));
9751
9752 stblk = HOSTCC_STATBLCK_RING1;
9753
9754 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9755 u64 mapping = (u64)tnapi->status_mapping;
9756 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9757 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9758 stblk += 8;
9759
9760 /* Clear status block in ram. */
9761 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9762 }
9763
9764 tg3_tx_rcbs_init(tp);
9765 tg3_rx_ret_rcbs_init(tp);
9766 }
9767
tg3_setup_rxbd_thresholds(struct tg3 * tp)9768 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9769 {
9770 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9771
9772 if (!tg3_flag(tp, 5750_PLUS) ||
9773 tg3_flag(tp, 5780_CLASS) ||
9774 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9775 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9776 tg3_flag(tp, 57765_PLUS))
9777 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9778 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9779 tg3_asic_rev(tp) == ASIC_REV_5787)
9780 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9781 else
9782 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9783
9784 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9785 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9786
9787 val = min(nic_rep_thresh, host_rep_thresh);
9788 tw32(RCVBDI_STD_THRESH, val);
9789
9790 if (tg3_flag(tp, 57765_PLUS))
9791 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9792
9793 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9794 return;
9795
9796 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9797
9798 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9799
9800 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9801 tw32(RCVBDI_JUMBO_THRESH, val);
9802
9803 if (tg3_flag(tp, 57765_PLUS))
9804 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9805 }
9806
calc_crc(unsigned char * buf,int len)9807 static inline u32 calc_crc(unsigned char *buf, int len)
9808 {
9809 return ~crc32(~0, buf, len);
9810 }
9811
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9812 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9813 {
9814 /* accept or reject all multicast frames */
9815 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9816 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9817 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9818 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9819 }
9820
__tg3_set_rx_mode(struct net_device * dev)9821 static void __tg3_set_rx_mode(struct net_device *dev)
9822 {
9823 struct tg3 *tp = netdev_priv(dev);
9824 u32 rx_mode;
9825
9826 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9827 RX_MODE_KEEP_VLAN_TAG);
9828
9829 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9830 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9831 * flag clear.
9832 */
9833 if (!tg3_flag(tp, ENABLE_ASF))
9834 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9835 #endif
9836
9837 if (dev->flags & IFF_PROMISC) {
9838 /* Promiscuous mode. */
9839 rx_mode |= RX_MODE_PROMISC;
9840 } else if (dev->flags & IFF_ALLMULTI) {
9841 /* Accept all multicast. */
9842 tg3_set_multi(tp, 1);
9843 } else if (netdev_mc_empty(dev)) {
9844 /* Reject all multicast. */
9845 tg3_set_multi(tp, 0);
9846 } else {
9847 /* Accept one or more multicast(s). */
9848 struct netdev_hw_addr *ha;
9849 u32 mc_filter[4] = { 0, };
9850 u32 regidx;
9851 u32 bit;
9852 u32 crc;
9853
9854 netdev_for_each_mc_addr(ha, dev) {
9855 crc = calc_crc(ha->addr, ETH_ALEN);
9856 bit = ~crc & 0x7f;
9857 regidx = (bit & 0x60) >> 5;
9858 bit &= 0x1f;
9859 mc_filter[regidx] |= (1 << bit);
9860 }
9861
9862 tw32(MAC_HASH_REG_0, mc_filter[0]);
9863 tw32(MAC_HASH_REG_1, mc_filter[1]);
9864 tw32(MAC_HASH_REG_2, mc_filter[2]);
9865 tw32(MAC_HASH_REG_3, mc_filter[3]);
9866 }
9867
9868 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9869 rx_mode |= RX_MODE_PROMISC;
9870 } else if (!(dev->flags & IFF_PROMISC)) {
9871 /* Add all entries into to the mac addr filter list */
9872 int i = 0;
9873 struct netdev_hw_addr *ha;
9874
9875 netdev_for_each_uc_addr(ha, dev) {
9876 __tg3_set_one_mac_addr(tp, ha->addr,
9877 i + TG3_UCAST_ADDR_IDX(tp));
9878 i++;
9879 }
9880 }
9881
9882 if (rx_mode != tp->rx_mode) {
9883 tp->rx_mode = rx_mode;
9884 tw32_f(MAC_RX_MODE, rx_mode);
9885 udelay(10);
9886 }
9887 }
9888
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9889 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9890 {
9891 int i;
9892
9893 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9894 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9895 }
9896
tg3_rss_check_indir_tbl(struct tg3 * tp)9897 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9898 {
9899 int i;
9900
9901 if (!tg3_flag(tp, SUPPORT_MSIX))
9902 return;
9903
9904 if (tp->rxq_cnt == 1) {
9905 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9906 return;
9907 }
9908
9909 /* Validate table against current IRQ count */
9910 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9911 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9912 break;
9913 }
9914
9915 if (i != TG3_RSS_INDIR_TBL_SIZE)
9916 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9917 }
9918
tg3_rss_write_indir_tbl(struct tg3 * tp)9919 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9920 {
9921 int i = 0;
9922 u32 reg = MAC_RSS_INDIR_TBL_0;
9923
9924 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9925 u32 val = tp->rss_ind_tbl[i];
9926 i++;
9927 for (; i % 8; i++) {
9928 val <<= 4;
9929 val |= tp->rss_ind_tbl[i];
9930 }
9931 tw32(reg, val);
9932 reg += 4;
9933 }
9934 }
9935
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9936 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9937 {
9938 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9939 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9940 else
9941 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9942 }
9943
9944 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9945 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9946 {
9947 u32 val, rdmac_mode;
9948 int i, err, limit;
9949 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9950
9951 tg3_disable_ints(tp);
9952
9953 tg3_stop_fw(tp);
9954
9955 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9956
9957 if (tg3_flag(tp, INIT_COMPLETE))
9958 tg3_abort_hw(tp, 1);
9959
9960 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9961 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9962 tg3_phy_pull_config(tp);
9963 tg3_eee_pull_config(tp, NULL);
9964 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9965 }
9966
9967 /* Enable MAC control of LPI */
9968 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9969 tg3_setup_eee(tp);
9970
9971 if (reset_phy)
9972 tg3_phy_reset(tp);
9973
9974 err = tg3_chip_reset(tp);
9975 if (err)
9976 return err;
9977
9978 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9979
9980 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9981 val = tr32(TG3_CPMU_CTRL);
9982 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9983 tw32(TG3_CPMU_CTRL, val);
9984
9985 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9986 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9987 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9988 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9989
9990 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9991 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9992 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9993 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9994
9995 val = tr32(TG3_CPMU_HST_ACC);
9996 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9997 val |= CPMU_HST_ACC_MACCLK_6_25;
9998 tw32(TG3_CPMU_HST_ACC, val);
9999 }
10000
10001 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
10002 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
10003 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
10004 PCIE_PWR_MGMT_L1_THRESH_4MS;
10005 tw32(PCIE_PWR_MGMT_THRESH, val);
10006
10007 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
10008 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
10009
10010 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10011
10012 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10013 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10014 }
10015
10016 if (tg3_flag(tp, L1PLLPD_EN)) {
10017 u32 grc_mode = tr32(GRC_MODE);
10018
10019 /* Access the lower 1K of PL PCIE block registers. */
10020 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10021 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10022
10023 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10024 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10025 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10026
10027 tw32(GRC_MODE, grc_mode);
10028 }
10029
10030 if (tg3_flag(tp, 57765_CLASS)) {
10031 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10032 u32 grc_mode = tr32(GRC_MODE);
10033
10034 /* Access the lower 1K of PL PCIE block registers. */
10035 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10036 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10037
10038 val = tr32(TG3_PCIE_TLDLPL_PORT +
10039 TG3_PCIE_PL_LO_PHYCTL5);
10040 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10041 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10042
10043 tw32(GRC_MODE, grc_mode);
10044 }
10045
10046 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10047 u32 grc_mode;
10048
10049 /* Fix transmit hangs */
10050 val = tr32(TG3_CPMU_PADRNG_CTL);
10051 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10052 tw32(TG3_CPMU_PADRNG_CTL, val);
10053
10054 grc_mode = tr32(GRC_MODE);
10055
10056 /* Access the lower 1K of DL PCIE block registers. */
10057 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10058 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10059
10060 val = tr32(TG3_PCIE_TLDLPL_PORT +
10061 TG3_PCIE_DL_LO_FTSMAX);
10062 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10063 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10064 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10065
10066 tw32(GRC_MODE, grc_mode);
10067 }
10068
10069 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10070 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10071 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10072 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10073 }
10074
10075 /* This works around an issue with Athlon chipsets on
10076 * B3 tigon3 silicon. This bit has no effect on any
10077 * other revision. But do not set this on PCI Express
10078 * chips and don't even touch the clocks if the CPMU is present.
10079 */
10080 if (!tg3_flag(tp, CPMU_PRESENT)) {
10081 if (!tg3_flag(tp, PCI_EXPRESS))
10082 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10083 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10084 }
10085
10086 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10087 tg3_flag(tp, PCIX_MODE)) {
10088 val = tr32(TG3PCI_PCISTATE);
10089 val |= PCISTATE_RETRY_SAME_DMA;
10090 tw32(TG3PCI_PCISTATE, val);
10091 }
10092
10093 if (tg3_flag(tp, ENABLE_APE)) {
10094 /* Allow reads and writes to the
10095 * APE register and memory space.
10096 */
10097 val = tr32(TG3PCI_PCISTATE);
10098 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10099 PCISTATE_ALLOW_APE_SHMEM_WR |
10100 PCISTATE_ALLOW_APE_PSPACE_WR;
10101 tw32(TG3PCI_PCISTATE, val);
10102 }
10103
10104 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10105 /* Enable some hw fixes. */
10106 val = tr32(TG3PCI_MSI_DATA);
10107 val |= (1 << 26) | (1 << 28) | (1 << 29);
10108 tw32(TG3PCI_MSI_DATA, val);
10109 }
10110
10111 /* Descriptor ring init may make accesses to the
10112 * NIC SRAM area to setup the TX descriptors, so we
10113 * can only do this after the hardware has been
10114 * successfully reset.
10115 */
10116 err = tg3_init_rings(tp);
10117 if (err)
10118 return err;
10119
10120 if (tg3_flag(tp, 57765_PLUS)) {
10121 val = tr32(TG3PCI_DMA_RW_CTRL) &
10122 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10123 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10124 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10125 if (!tg3_flag(tp, 57765_CLASS) &&
10126 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10127 tg3_asic_rev(tp) != ASIC_REV_5762)
10128 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10129 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10130 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10131 tg3_asic_rev(tp) != ASIC_REV_5761) {
10132 /* This value is determined during the probe time DMA
10133 * engine test, tg3_test_dma.
10134 */
10135 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10136 }
10137
10138 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10139 GRC_MODE_4X_NIC_SEND_RINGS |
10140 GRC_MODE_NO_TX_PHDR_CSUM |
10141 GRC_MODE_NO_RX_PHDR_CSUM);
10142 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10143
10144 /* Pseudo-header checksum is done by hardware logic and not
10145 * the offload processors, so make the chip do the pseudo-
10146 * header checksums on receive. For transmit it is more
10147 * convenient to do the pseudo-header checksum in software
10148 * as Linux does that on transmit for us in all cases.
10149 */
10150 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10151
10152 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10153 if (tp->rxptpctl)
10154 tw32(TG3_RX_PTP_CTL,
10155 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10156
10157 if (tg3_flag(tp, PTP_CAPABLE))
10158 val |= GRC_MODE_TIME_SYNC_ENABLE;
10159
10160 tw32(GRC_MODE, tp->grc_mode | val);
10161
10162 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10163 * south bridge limitation. As a workaround, Driver is setting MRRS
10164 * to 2048 instead of default 4096.
10165 */
10166 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10167 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10168 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10169 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10170 }
10171
10172 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10173 val = tr32(GRC_MISC_CFG);
10174 val &= ~0xff;
10175 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10176 tw32(GRC_MISC_CFG, val);
10177
10178 /* Initialize MBUF/DESC pool. */
10179 if (tg3_flag(tp, 5750_PLUS)) {
10180 /* Do nothing. */
10181 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10182 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10183 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10184 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10185 else
10186 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10187 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10188 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10189 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10190 int fw_len;
10191
10192 fw_len = tp->fw_len;
10193 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10194 tw32(BUFMGR_MB_POOL_ADDR,
10195 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10196 tw32(BUFMGR_MB_POOL_SIZE,
10197 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10198 }
10199
10200 if (tp->dev->mtu <= ETH_DATA_LEN) {
10201 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10202 tp->bufmgr_config.mbuf_read_dma_low_water);
10203 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10204 tp->bufmgr_config.mbuf_mac_rx_low_water);
10205 tw32(BUFMGR_MB_HIGH_WATER,
10206 tp->bufmgr_config.mbuf_high_water);
10207 } else {
10208 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10209 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10210 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10211 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10212 tw32(BUFMGR_MB_HIGH_WATER,
10213 tp->bufmgr_config.mbuf_high_water_jumbo);
10214 }
10215 tw32(BUFMGR_DMA_LOW_WATER,
10216 tp->bufmgr_config.dma_low_water);
10217 tw32(BUFMGR_DMA_HIGH_WATER,
10218 tp->bufmgr_config.dma_high_water);
10219
10220 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10221 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10222 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10223 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10224 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10225 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10226 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10227 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10228 tw32(BUFMGR_MODE, val);
10229 for (i = 0; i < 2000; i++) {
10230 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10231 break;
10232 udelay(10);
10233 }
10234 if (i >= 2000) {
10235 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10236 return -ENODEV;
10237 }
10238
10239 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10240 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10241
10242 tg3_setup_rxbd_thresholds(tp);
10243
10244 /* Initialize TG3_BDINFO's at:
10245 * RCVDBDI_STD_BD: standard eth size rx ring
10246 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10247 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10248 *
10249 * like so:
10250 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10251 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10252 * ring attribute flags
10253 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10254 *
10255 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10256 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10257 *
10258 * The size of each ring is fixed in the firmware, but the location is
10259 * configurable.
10260 */
10261 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10262 ((u64) tpr->rx_std_mapping >> 32));
10263 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10264 ((u64) tpr->rx_std_mapping & 0xffffffff));
10265 if (!tg3_flag(tp, 5717_PLUS))
10266 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10267 NIC_SRAM_RX_BUFFER_DESC);
10268
10269 /* Disable the mini ring */
10270 if (!tg3_flag(tp, 5705_PLUS))
10271 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10272 BDINFO_FLAGS_DISABLED);
10273
10274 /* Program the jumbo buffer descriptor ring control
10275 * blocks on those devices that have them.
10276 */
10277 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10278 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10279
10280 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10281 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10282 ((u64) tpr->rx_jmb_mapping >> 32));
10283 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10284 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10285 val = TG3_RX_JMB_RING_SIZE(tp) <<
10286 BDINFO_FLAGS_MAXLEN_SHIFT;
10287 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10288 val | BDINFO_FLAGS_USE_EXT_RECV);
10289 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10290 tg3_flag(tp, 57765_CLASS) ||
10291 tg3_asic_rev(tp) == ASIC_REV_5762)
10292 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10293 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10294 } else {
10295 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10296 BDINFO_FLAGS_DISABLED);
10297 }
10298
10299 if (tg3_flag(tp, 57765_PLUS)) {
10300 val = TG3_RX_STD_RING_SIZE(tp);
10301 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10302 val |= (TG3_RX_STD_DMA_SZ << 2);
10303 } else
10304 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10305 } else
10306 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10307
10308 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10309
10310 tpr->rx_std_prod_idx = tp->rx_pending;
10311 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10312
10313 tpr->rx_jmb_prod_idx =
10314 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10315 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10316
10317 tg3_rings_reset(tp);
10318
10319 /* Initialize MAC address and backoff seed. */
10320 __tg3_set_mac_addr(tp, false);
10321
10322 /* MTU + ethernet header + FCS + optional VLAN tag */
10323 tw32(MAC_RX_MTU_SIZE,
10324 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10325
10326 /* The slot time is changed by tg3_setup_phy if we
10327 * run at gigabit with half duplex.
10328 */
10329 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10330 (6 << TX_LENGTHS_IPG_SHIFT) |
10331 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10332
10333 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10334 tg3_asic_rev(tp) == ASIC_REV_5762)
10335 val |= tr32(MAC_TX_LENGTHS) &
10336 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10337 TX_LENGTHS_CNT_DWN_VAL_MSK);
10338
10339 tw32(MAC_TX_LENGTHS, val);
10340
10341 /* Receive rules. */
10342 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10343 tw32(RCVLPC_CONFIG, 0x0181);
10344
10345 /* Calculate RDMAC_MODE setting early, we need it to determine
10346 * the RCVLPC_STATE_ENABLE mask.
10347 */
10348 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10349 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10350 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10351 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10352 RDMAC_MODE_LNGREAD_ENAB);
10353
10354 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10355 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10356
10357 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10358 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10359 tg3_asic_rev(tp) == ASIC_REV_57780)
10360 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10361 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10362 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10363
10364 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10365 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10366 if (tg3_flag(tp, TSO_CAPABLE)) {
10367 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10368 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10369 !tg3_flag(tp, IS_5788)) {
10370 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10371 }
10372 }
10373
10374 if (tg3_flag(tp, PCI_EXPRESS))
10375 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10376
10377 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10378 tp->dma_limit = 0;
10379 if (tp->dev->mtu <= ETH_DATA_LEN) {
10380 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10381 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10382 }
10383 }
10384
10385 if (tg3_flag(tp, HW_TSO_1) ||
10386 tg3_flag(tp, HW_TSO_2) ||
10387 tg3_flag(tp, HW_TSO_3))
10388 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10389
10390 if (tg3_flag(tp, 57765_PLUS) ||
10391 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10392 tg3_asic_rev(tp) == ASIC_REV_57780)
10393 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10394
10395 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10396 tg3_asic_rev(tp) == ASIC_REV_5762)
10397 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10398
10399 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10400 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10401 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10402 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10403 tg3_flag(tp, 57765_PLUS)) {
10404 u32 tgtreg;
10405
10406 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10407 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10408 else
10409 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10410
10411 val = tr32(tgtreg);
10412 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10413 tg3_asic_rev(tp) == ASIC_REV_5762) {
10414 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10415 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10416 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10417 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10418 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10419 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10420 }
10421 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10422 }
10423
10424 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10425 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10426 tg3_asic_rev(tp) == ASIC_REV_5762) {
10427 u32 tgtreg;
10428
10429 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10430 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10431 else
10432 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10433
10434 val = tr32(tgtreg);
10435 tw32(tgtreg, val |
10436 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10437 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10438 }
10439
10440 /* Receive/send statistics. */
10441 if (tg3_flag(tp, 5750_PLUS)) {
10442 val = tr32(RCVLPC_STATS_ENABLE);
10443 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10444 tw32(RCVLPC_STATS_ENABLE, val);
10445 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10446 tg3_flag(tp, TSO_CAPABLE)) {
10447 val = tr32(RCVLPC_STATS_ENABLE);
10448 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10449 tw32(RCVLPC_STATS_ENABLE, val);
10450 } else {
10451 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10452 }
10453 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10454 tw32(SNDDATAI_STATSENAB, 0xffffff);
10455 tw32(SNDDATAI_STATSCTRL,
10456 (SNDDATAI_SCTRL_ENABLE |
10457 SNDDATAI_SCTRL_FASTUPD));
10458
10459 /* Setup host coalescing engine. */
10460 tw32(HOSTCC_MODE, 0);
10461 for (i = 0; i < 2000; i++) {
10462 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10463 break;
10464 udelay(10);
10465 }
10466
10467 __tg3_set_coalesce(tp, &tp->coal);
10468
10469 if (!tg3_flag(tp, 5705_PLUS)) {
10470 /* Status/statistics block address. See tg3_timer,
10471 * the tg3_periodic_fetch_stats call there, and
10472 * tg3_get_stats to see how this works for 5705/5750 chips.
10473 */
10474 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10475 ((u64) tp->stats_mapping >> 32));
10476 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10477 ((u64) tp->stats_mapping & 0xffffffff));
10478 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10479
10480 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10481
10482 /* Clear statistics and status block memory areas */
10483 for (i = NIC_SRAM_STATS_BLK;
10484 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10485 i += sizeof(u32)) {
10486 tg3_write_mem(tp, i, 0);
10487 udelay(40);
10488 }
10489 }
10490
10491 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10492
10493 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10494 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10495 if (!tg3_flag(tp, 5705_PLUS))
10496 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10497
10498 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10499 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10500 /* reset to prevent losing 1st rx packet intermittently */
10501 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10502 udelay(10);
10503 }
10504
10505 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10506 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10507 MAC_MODE_FHDE_ENABLE;
10508 if (tg3_flag(tp, ENABLE_APE))
10509 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10510 if (!tg3_flag(tp, 5705_PLUS) &&
10511 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10512 tg3_asic_rev(tp) != ASIC_REV_5700)
10513 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10514 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10515 udelay(40);
10516
10517 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10518 * If TG3_FLAG_IS_NIC is zero, we should read the
10519 * register to preserve the GPIO settings for LOMs. The GPIOs,
10520 * whether used as inputs or outputs, are set by boot code after
10521 * reset.
10522 */
10523 if (!tg3_flag(tp, IS_NIC)) {
10524 u32 gpio_mask;
10525
10526 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10527 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10528 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10529
10530 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10531 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10532 GRC_LCLCTRL_GPIO_OUTPUT3;
10533
10534 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10535 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10536
10537 tp->grc_local_ctrl &= ~gpio_mask;
10538 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10539
10540 /* GPIO1 must be driven high for eeprom write protect */
10541 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10542 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10543 GRC_LCLCTRL_GPIO_OUTPUT1);
10544 }
10545 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10546 udelay(100);
10547
10548 if (tg3_flag(tp, USING_MSIX)) {
10549 val = tr32(MSGINT_MODE);
10550 val |= MSGINT_MODE_ENABLE;
10551 if (tp->irq_cnt > 1)
10552 val |= MSGINT_MODE_MULTIVEC_EN;
10553 if (!tg3_flag(tp, 1SHOT_MSI))
10554 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10555 tw32(MSGINT_MODE, val);
10556 }
10557
10558 if (!tg3_flag(tp, 5705_PLUS)) {
10559 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10560 udelay(40);
10561 }
10562
10563 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10564 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10565 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10566 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10567 WDMAC_MODE_LNGREAD_ENAB);
10568
10569 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10570 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10571 if (tg3_flag(tp, TSO_CAPABLE) &&
10572 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10573 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10574 /* nothing */
10575 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10576 !tg3_flag(tp, IS_5788)) {
10577 val |= WDMAC_MODE_RX_ACCEL;
10578 }
10579 }
10580
10581 /* Enable host coalescing bug fix */
10582 if (tg3_flag(tp, 5755_PLUS))
10583 val |= WDMAC_MODE_STATUS_TAG_FIX;
10584
10585 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10586 val |= WDMAC_MODE_BURST_ALL_DATA;
10587
10588 tw32_f(WDMAC_MODE, val);
10589 udelay(40);
10590
10591 if (tg3_flag(tp, PCIX_MODE)) {
10592 u16 pcix_cmd;
10593
10594 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10595 &pcix_cmd);
10596 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10597 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10598 pcix_cmd |= PCI_X_CMD_READ_2K;
10599 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10600 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10601 pcix_cmd |= PCI_X_CMD_READ_2K;
10602 }
10603 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10604 pcix_cmd);
10605 }
10606
10607 tw32_f(RDMAC_MODE, rdmac_mode);
10608 udelay(40);
10609
10610 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10611 tg3_asic_rev(tp) == ASIC_REV_5720) {
10612 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10613 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10614 break;
10615 }
10616 if (i < TG3_NUM_RDMA_CHANNELS) {
10617 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10618 val |= tg3_lso_rd_dma_workaround_bit(tp);
10619 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10620 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10621 }
10622 }
10623
10624 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10625 if (!tg3_flag(tp, 5705_PLUS))
10626 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10627
10628 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10629 tw32(SNDDATAC_MODE,
10630 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10631 else
10632 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10633
10634 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10635 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10636 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10637 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10638 val |= RCVDBDI_MODE_LRG_RING_SZ;
10639 tw32(RCVDBDI_MODE, val);
10640 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10641 if (tg3_flag(tp, HW_TSO_1) ||
10642 tg3_flag(tp, HW_TSO_2) ||
10643 tg3_flag(tp, HW_TSO_3))
10644 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10645 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10646 if (tg3_flag(tp, ENABLE_TSS))
10647 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10648 tw32(SNDBDI_MODE, val);
10649 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10650
10651 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10652 err = tg3_load_5701_a0_firmware_fix(tp);
10653 if (err)
10654 return err;
10655 }
10656
10657 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10658 /* Ignore any errors for the firmware download. If download
10659 * fails, the device will operate with EEE disabled
10660 */
10661 tg3_load_57766_firmware(tp);
10662 }
10663
10664 if (tg3_flag(tp, TSO_CAPABLE)) {
10665 err = tg3_load_tso_firmware(tp);
10666 if (err)
10667 return err;
10668 }
10669
10670 tp->tx_mode = TX_MODE_ENABLE;
10671
10672 if (tg3_flag(tp, 5755_PLUS) ||
10673 tg3_asic_rev(tp) == ASIC_REV_5906)
10674 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10675
10676 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10677 tg3_asic_rev(tp) == ASIC_REV_5762) {
10678 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10679 tp->tx_mode &= ~val;
10680 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10681 }
10682
10683 tw32_f(MAC_TX_MODE, tp->tx_mode);
10684 udelay(100);
10685
10686 if (tg3_flag(tp, ENABLE_RSS)) {
10687 u32 rss_key[10];
10688
10689 tg3_rss_write_indir_tbl(tp);
10690
10691 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10692
10693 for (i = 0; i < 10 ; i++)
10694 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10695 }
10696
10697 tp->rx_mode = RX_MODE_ENABLE;
10698 if (tg3_flag(tp, 5755_PLUS))
10699 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10700
10701 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10702 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10703
10704 if (tg3_flag(tp, ENABLE_RSS))
10705 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10706 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10707 RX_MODE_RSS_IPV6_HASH_EN |
10708 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10709 RX_MODE_RSS_IPV4_HASH_EN |
10710 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10711
10712 tw32_f(MAC_RX_MODE, tp->rx_mode);
10713 udelay(10);
10714
10715 tw32(MAC_LED_CTRL, tp->led_ctrl);
10716
10717 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10718 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10719 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10720 udelay(10);
10721 }
10722 tw32_f(MAC_RX_MODE, tp->rx_mode);
10723 udelay(10);
10724
10725 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10726 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10727 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10728 /* Set drive transmission level to 1.2V */
10729 /* only if the signal pre-emphasis bit is not set */
10730 val = tr32(MAC_SERDES_CFG);
10731 val &= 0xfffff000;
10732 val |= 0x880;
10733 tw32(MAC_SERDES_CFG, val);
10734 }
10735 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10736 tw32(MAC_SERDES_CFG, 0x616000);
10737 }
10738
10739 /* Prevent chip from dropping frames when flow control
10740 * is enabled.
10741 */
10742 if (tg3_flag(tp, 57765_CLASS))
10743 val = 1;
10744 else
10745 val = 2;
10746 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10747
10748 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10749 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10750 /* Use hardware link auto-negotiation */
10751 tg3_flag_set(tp, HW_AUTONEG);
10752 }
10753
10754 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10755 tg3_asic_rev(tp) == ASIC_REV_5714) {
10756 u32 tmp;
10757
10758 tmp = tr32(SERDES_RX_CTRL);
10759 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10760 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10761 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10762 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10763 }
10764
10765 if (!tg3_flag(tp, USE_PHYLIB)) {
10766 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10767 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10768
10769 err = tg3_setup_phy(tp, false);
10770 if (err)
10771 return err;
10772
10773 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10774 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10775 u32 tmp;
10776
10777 /* Clear CRC stats. */
10778 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10779 tg3_writephy(tp, MII_TG3_TEST1,
10780 tmp | MII_TG3_TEST1_CRC_EN);
10781 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10782 }
10783 }
10784 }
10785
10786 __tg3_set_rx_mode(tp->dev);
10787
10788 /* Initialize receive rules. */
10789 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10790 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10791 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10792 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10793
10794 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10795 limit = 8;
10796 else
10797 limit = 16;
10798 if (tg3_flag(tp, ENABLE_ASF))
10799 limit -= 4;
10800 switch (limit) {
10801 case 16:
10802 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10803 fallthrough;
10804 case 15:
10805 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10806 fallthrough;
10807 case 14:
10808 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10809 fallthrough;
10810 case 13:
10811 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10812 fallthrough;
10813 case 12:
10814 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10815 fallthrough;
10816 case 11:
10817 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10818 fallthrough;
10819 case 10:
10820 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10821 fallthrough;
10822 case 9:
10823 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10824 fallthrough;
10825 case 8:
10826 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10827 fallthrough;
10828 case 7:
10829 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10830 fallthrough;
10831 case 6:
10832 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10833 fallthrough;
10834 case 5:
10835 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10836 fallthrough;
10837 case 4:
10838 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10839 case 3:
10840 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10841 case 2:
10842 case 1:
10843
10844 default:
10845 break;
10846 }
10847
10848 if (tg3_flag(tp, ENABLE_APE))
10849 /* Write our heartbeat update interval to APE. */
10850 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10851 APE_HOST_HEARTBEAT_INT_5SEC);
10852
10853 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10854
10855 return 0;
10856 }
10857
10858 /* Called at device open time to get the chip ready for
10859 * packet processing. Invoked with tp->lock held.
10860 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10861 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10862 {
10863 /* Chip may have been just powered on. If so, the boot code may still
10864 * be running initialization. Wait for it to finish to avoid races in
10865 * accessing the hardware.
10866 */
10867 tg3_enable_register_access(tp);
10868 tg3_poll_fw(tp);
10869
10870 tg3_switch_clocks(tp);
10871
10872 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10873
10874 return tg3_reset_hw(tp, reset_phy);
10875 }
10876
10877 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10878 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10879 {
10880 u32 off, len = TG3_OCIR_LEN;
10881 int i;
10882
10883 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10884 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10885
10886 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10887 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10888 memset(ocir, 0, len);
10889 }
10890 }
10891
10892 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10893 static ssize_t tg3_show_temp(struct device *dev,
10894 struct device_attribute *devattr, char *buf)
10895 {
10896 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10897 struct tg3 *tp = dev_get_drvdata(dev);
10898 u32 temperature;
10899
10900 spin_lock_bh(&tp->lock);
10901 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10902 sizeof(temperature));
10903 spin_unlock_bh(&tp->lock);
10904 return sprintf(buf, "%u\n", temperature * 1000);
10905 }
10906
10907
10908 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10909 TG3_TEMP_SENSOR_OFFSET);
10910 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10911 TG3_TEMP_CAUTION_OFFSET);
10912 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10913 TG3_TEMP_MAX_OFFSET);
10914
10915 static struct attribute *tg3_attrs[] = {
10916 &sensor_dev_attr_temp1_input.dev_attr.attr,
10917 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10918 &sensor_dev_attr_temp1_max.dev_attr.attr,
10919 NULL
10920 };
10921 ATTRIBUTE_GROUPS(tg3);
10922
tg3_hwmon_close(struct tg3 * tp)10923 static void tg3_hwmon_close(struct tg3 *tp)
10924 {
10925 if (tp->hwmon_dev) {
10926 hwmon_device_unregister(tp->hwmon_dev);
10927 tp->hwmon_dev = NULL;
10928 }
10929 }
10930
tg3_hwmon_open(struct tg3 * tp)10931 static void tg3_hwmon_open(struct tg3 *tp)
10932 {
10933 int i;
10934 u32 size = 0;
10935 struct pci_dev *pdev = tp->pdev;
10936 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10937
10938 tg3_sd_scan_scratchpad(tp, ocirs);
10939
10940 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10941 if (!ocirs[i].src_data_length)
10942 continue;
10943
10944 size += ocirs[i].src_hdr_length;
10945 size += ocirs[i].src_data_length;
10946 }
10947
10948 if (!size)
10949 return;
10950
10951 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10952 tp, tg3_groups);
10953 if (IS_ERR(tp->hwmon_dev)) {
10954 tp->hwmon_dev = NULL;
10955 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10956 }
10957 }
10958 #else
tg3_hwmon_close(struct tg3 * tp)10959 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10960 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10961 #endif /* CONFIG_TIGON3_HWMON */
10962
10963
10964 #define TG3_STAT_ADD32(PSTAT, REG) \
10965 do { u32 __val = tr32(REG); \
10966 (PSTAT)->low += __val; \
10967 if ((PSTAT)->low < __val) \
10968 (PSTAT)->high += 1; \
10969 } while (0)
10970
tg3_periodic_fetch_stats(struct tg3 * tp)10971 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10972 {
10973 struct tg3_hw_stats *sp = tp->hw_stats;
10974
10975 if (!tp->link_up)
10976 return;
10977
10978 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10979 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10980 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10981 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10982 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10983 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10984 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10985 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10986 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10987 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10988 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10989 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10990 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10991 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10992 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10993 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10994 u32 val;
10995
10996 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10997 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10998 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10999 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
11000 }
11001
11002 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
11003 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
11004 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
11005 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
11006 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
11007 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
11008 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
11009 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
11010 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11011 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11012 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11013 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11014 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11015 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11016
11017 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11018 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11019 tg3_asic_rev(tp) != ASIC_REV_5762 &&
11020 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11021 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11022 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11023 } else {
11024 u32 val = tr32(HOSTCC_FLOW_ATTN);
11025 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11026 if (val) {
11027 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11028 sp->rx_discards.low += val;
11029 if (sp->rx_discards.low < val)
11030 sp->rx_discards.high += 1;
11031 }
11032 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11033 }
11034 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11035 }
11036
tg3_chk_missed_msi(struct tg3 * tp)11037 static void tg3_chk_missed_msi(struct tg3 *tp)
11038 {
11039 u32 i;
11040
11041 for (i = 0; i < tp->irq_cnt; i++) {
11042 struct tg3_napi *tnapi = &tp->napi[i];
11043
11044 if (tg3_has_work(tnapi)) {
11045 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11046 tnapi->last_tx_cons == tnapi->tx_cons) {
11047 if (tnapi->chk_msi_cnt < 1) {
11048 tnapi->chk_msi_cnt++;
11049 return;
11050 }
11051 tg3_msi(0, tnapi);
11052 }
11053 }
11054 tnapi->chk_msi_cnt = 0;
11055 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11056 tnapi->last_tx_cons = tnapi->tx_cons;
11057 }
11058 }
11059
tg3_timer(struct timer_list * t)11060 static void tg3_timer(struct timer_list *t)
11061 {
11062 struct tg3 *tp = timer_container_of(tp, t, timer);
11063
11064 spin_lock(&tp->lock);
11065
11066 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11067 spin_unlock(&tp->lock);
11068 goto restart_timer;
11069 }
11070
11071 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11072 tg3_flag(tp, 57765_CLASS))
11073 tg3_chk_missed_msi(tp);
11074
11075 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11076 /* BCM4785: Flush posted writes from GbE to host memory. */
11077 tr32(HOSTCC_MODE);
11078 }
11079
11080 if (!tg3_flag(tp, TAGGED_STATUS)) {
11081 /* All of this garbage is because when using non-tagged
11082 * IRQ status the mailbox/status_block protocol the chip
11083 * uses with the cpu is race prone.
11084 */
11085 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11086 tw32(GRC_LOCAL_CTRL,
11087 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11088 } else {
11089 tw32(HOSTCC_MODE, tp->coalesce_mode |
11090 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11091 }
11092
11093 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11094 spin_unlock(&tp->lock);
11095 tg3_reset_task_schedule(tp);
11096 goto restart_timer;
11097 }
11098 }
11099
11100 /* This part only runs once per second. */
11101 if (!--tp->timer_counter) {
11102 if (tg3_flag(tp, 5705_PLUS))
11103 tg3_periodic_fetch_stats(tp);
11104
11105 if (tp->setlpicnt && !--tp->setlpicnt)
11106 tg3_phy_eee_enable(tp);
11107
11108 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11109 u32 mac_stat;
11110 int phy_event;
11111
11112 mac_stat = tr32(MAC_STATUS);
11113
11114 phy_event = 0;
11115 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11116 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11117 phy_event = 1;
11118 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11119 phy_event = 1;
11120
11121 if (phy_event)
11122 tg3_setup_phy(tp, false);
11123 } else if (tg3_flag(tp, POLL_SERDES)) {
11124 u32 mac_stat = tr32(MAC_STATUS);
11125 int need_setup = 0;
11126
11127 if (tp->link_up &&
11128 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11129 need_setup = 1;
11130 }
11131 if (!tp->link_up &&
11132 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11133 MAC_STATUS_SIGNAL_DET))) {
11134 need_setup = 1;
11135 }
11136 if (need_setup) {
11137 if (!tp->serdes_counter) {
11138 tw32_f(MAC_MODE,
11139 (tp->mac_mode &
11140 ~MAC_MODE_PORT_MODE_MASK));
11141 udelay(40);
11142 tw32_f(MAC_MODE, tp->mac_mode);
11143 udelay(40);
11144 }
11145 tg3_setup_phy(tp, false);
11146 }
11147 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11148 tg3_flag(tp, 5780_CLASS)) {
11149 tg3_serdes_parallel_detect(tp);
11150 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11151 u32 cpmu = tr32(TG3_CPMU_STATUS);
11152 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11153 TG3_CPMU_STATUS_LINK_MASK);
11154
11155 if (link_up != tp->link_up)
11156 tg3_setup_phy(tp, false);
11157 }
11158
11159 tp->timer_counter = tp->timer_multiplier;
11160 }
11161
11162 /* Heartbeat is only sent once every 2 seconds.
11163 *
11164 * The heartbeat is to tell the ASF firmware that the host
11165 * driver is still alive. In the event that the OS crashes,
11166 * ASF needs to reset the hardware to free up the FIFO space
11167 * that may be filled with rx packets destined for the host.
11168 * If the FIFO is full, ASF will no longer function properly.
11169 *
11170 * Unintended resets have been reported on real time kernels
11171 * where the timer doesn't run on time. Netpoll will also have
11172 * same problem.
11173 *
11174 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11175 * to check the ring condition when the heartbeat is expiring
11176 * before doing the reset. This will prevent most unintended
11177 * resets.
11178 */
11179 if (!--tp->asf_counter) {
11180 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11181 tg3_wait_for_event_ack(tp);
11182
11183 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11184 FWCMD_NICDRV_ALIVE3);
11185 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11186 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11187 TG3_FW_UPDATE_TIMEOUT_SEC);
11188
11189 tg3_generate_fw_event(tp);
11190 }
11191 tp->asf_counter = tp->asf_multiplier;
11192 }
11193
11194 /* Update the APE heartbeat every 5 seconds.*/
11195 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11196
11197 spin_unlock(&tp->lock);
11198
11199 restart_timer:
11200 tp->timer.expires = jiffies + tp->timer_offset;
11201 add_timer(&tp->timer);
11202 }
11203
tg3_timer_init(struct tg3 * tp)11204 static void tg3_timer_init(struct tg3 *tp)
11205 {
11206 if (tg3_flag(tp, TAGGED_STATUS) &&
11207 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11208 !tg3_flag(tp, 57765_CLASS))
11209 tp->timer_offset = HZ;
11210 else
11211 tp->timer_offset = HZ / 10;
11212
11213 BUG_ON(tp->timer_offset > HZ);
11214
11215 tp->timer_multiplier = (HZ / tp->timer_offset);
11216 tp->asf_multiplier = (HZ / tp->timer_offset) *
11217 TG3_FW_UPDATE_FREQ_SEC;
11218
11219 timer_setup(&tp->timer, tg3_timer, 0);
11220 }
11221
tg3_timer_start(struct tg3 * tp)11222 static void tg3_timer_start(struct tg3 *tp)
11223 {
11224 tp->asf_counter = tp->asf_multiplier;
11225 tp->timer_counter = tp->timer_multiplier;
11226
11227 tp->timer.expires = jiffies + tp->timer_offset;
11228 add_timer(&tp->timer);
11229 }
11230
tg3_timer_stop(struct tg3 * tp)11231 static void tg3_timer_stop(struct tg3 *tp)
11232 {
11233 timer_delete_sync(&tp->timer);
11234 }
11235
11236 /* Restart hardware after configuration changes, self-test, etc.
11237 * Invoked with tp->lock held.
11238 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11239 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11240 __releases(tp->lock)
11241 __acquires(tp->lock)
11242 __releases(tp->dev->lock)
11243 __acquires(tp->dev->lock)
11244 {
11245 int err;
11246
11247 err = tg3_init_hw(tp, reset_phy);
11248 if (err) {
11249 netdev_err(tp->dev,
11250 "Failed to re-initialize device, aborting\n");
11251 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11252 tg3_full_unlock(tp);
11253 tg3_timer_stop(tp);
11254 tp->irq_sync = 0;
11255 tg3_napi_enable(tp);
11256 netdev_unlock(tp->dev);
11257 dev_close(tp->dev);
11258 netdev_lock(tp->dev);
11259 tg3_full_lock(tp, 0);
11260 }
11261 return err;
11262 }
11263
tg3_reset_task(struct work_struct * work)11264 static void tg3_reset_task(struct work_struct *work)
11265 {
11266 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11267 int err;
11268
11269 rtnl_lock();
11270 tg3_full_lock(tp, 0);
11271
11272 if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11273 tp->pdev->error_state != pci_channel_io_normal) {
11274 tg3_flag_clear(tp, RESET_TASK_PENDING);
11275 tg3_full_unlock(tp);
11276 rtnl_unlock();
11277 return;
11278 }
11279
11280 tg3_full_unlock(tp);
11281
11282 tg3_phy_stop(tp);
11283
11284 tg3_netif_stop(tp);
11285
11286 netdev_lock(tp->dev);
11287 tg3_full_lock(tp, 1);
11288
11289 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11290 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11291 tp->write32_rx_mbox = tg3_write_flush_reg32;
11292 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11293 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11294 }
11295
11296 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11297 err = tg3_init_hw(tp, true);
11298 if (err) {
11299 tg3_full_unlock(tp);
11300 tp->irq_sync = 0;
11301 tg3_napi_enable(tp);
11302 /* Clear this flag so that tg3_reset_task_cancel() will not
11303 * call cancel_work_sync() and wait forever.
11304 */
11305 tg3_flag_clear(tp, RESET_TASK_PENDING);
11306 netdev_unlock(tp->dev);
11307 dev_close(tp->dev);
11308 goto out;
11309 }
11310
11311 tg3_netif_start(tp);
11312 tg3_full_unlock(tp);
11313 netdev_unlock(tp->dev);
11314 tg3_phy_start(tp);
11315 tg3_flag_clear(tp, RESET_TASK_PENDING);
11316 out:
11317 rtnl_unlock();
11318 }
11319
tg3_request_irq(struct tg3 * tp,int irq_num)11320 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11321 {
11322 irq_handler_t fn;
11323 unsigned long flags;
11324 char *name;
11325 struct tg3_napi *tnapi = &tp->napi[irq_num];
11326
11327 if (tp->irq_cnt == 1)
11328 name = tp->dev->name;
11329 else {
11330 name = &tnapi->irq_lbl[0];
11331 if (tnapi->tx_buffers && tnapi->rx_rcb)
11332 snprintf(name, sizeof(tnapi->irq_lbl),
11333 "%s-txrx-%d", tp->dev->name, irq_num);
11334 else if (tnapi->tx_buffers)
11335 snprintf(name, sizeof(tnapi->irq_lbl),
11336 "%s-tx-%d", tp->dev->name, irq_num);
11337 else if (tnapi->rx_rcb)
11338 snprintf(name, sizeof(tnapi->irq_lbl),
11339 "%s-rx-%d", tp->dev->name, irq_num);
11340 else
11341 snprintf(name, sizeof(tnapi->irq_lbl),
11342 "%s-%d", tp->dev->name, irq_num);
11343 }
11344
11345 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11346 fn = tg3_msi;
11347 if (tg3_flag(tp, 1SHOT_MSI))
11348 fn = tg3_msi_1shot;
11349 flags = 0;
11350 } else {
11351 fn = tg3_interrupt;
11352 if (tg3_flag(tp, TAGGED_STATUS))
11353 fn = tg3_interrupt_tagged;
11354 flags = IRQF_SHARED;
11355 }
11356
11357 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11358 }
11359
tg3_test_interrupt(struct tg3 * tp)11360 static int tg3_test_interrupt(struct tg3 *tp)
11361 {
11362 struct tg3_napi *tnapi = &tp->napi[0];
11363 struct net_device *dev = tp->dev;
11364 int err, i, intr_ok = 0;
11365 u32 val;
11366
11367 if (!netif_running(dev))
11368 return -ENODEV;
11369
11370 tg3_disable_ints(tp);
11371
11372 free_irq(tnapi->irq_vec, tnapi);
11373
11374 /*
11375 * Turn off MSI one shot mode. Otherwise this test has no
11376 * observable way to know whether the interrupt was delivered.
11377 */
11378 if (tg3_flag(tp, 57765_PLUS)) {
11379 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11380 tw32(MSGINT_MODE, val);
11381 }
11382
11383 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11384 IRQF_SHARED, dev->name, tnapi);
11385 if (err)
11386 return err;
11387
11388 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11389 tg3_enable_ints(tp);
11390
11391 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11392 tnapi->coal_now);
11393
11394 for (i = 0; i < 5; i++) {
11395 u32 int_mbox, misc_host_ctrl;
11396
11397 int_mbox = tr32_mailbox(tnapi->int_mbox);
11398 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11399
11400 if ((int_mbox != 0) ||
11401 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11402 intr_ok = 1;
11403 break;
11404 }
11405
11406 if (tg3_flag(tp, 57765_PLUS) &&
11407 tnapi->hw_status->status_tag != tnapi->last_tag)
11408 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11409
11410 msleep(10);
11411 }
11412
11413 tg3_disable_ints(tp);
11414
11415 free_irq(tnapi->irq_vec, tnapi);
11416
11417 err = tg3_request_irq(tp, 0);
11418
11419 if (err)
11420 return err;
11421
11422 if (intr_ok) {
11423 /* Reenable MSI one shot mode. */
11424 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11425 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11426 tw32(MSGINT_MODE, val);
11427 }
11428 return 0;
11429 }
11430
11431 return -EIO;
11432 }
11433
11434 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11435 * successfully restored
11436 */
tg3_test_msi(struct tg3 * tp)11437 static int tg3_test_msi(struct tg3 *tp)
11438 {
11439 int err;
11440 u16 pci_cmd;
11441
11442 if (!tg3_flag(tp, USING_MSI))
11443 return 0;
11444
11445 /* Turn off SERR reporting in case MSI terminates with Master
11446 * Abort.
11447 */
11448 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11449 pci_write_config_word(tp->pdev, PCI_COMMAND,
11450 pci_cmd & ~PCI_COMMAND_SERR);
11451
11452 err = tg3_test_interrupt(tp);
11453
11454 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11455
11456 if (!err)
11457 return 0;
11458
11459 /* other failures */
11460 if (err != -EIO)
11461 return err;
11462
11463 /* MSI test failed, go back to INTx mode */
11464 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11465 "to INTx mode. Please report this failure to the PCI "
11466 "maintainer and include system chipset information\n");
11467
11468 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11469
11470 pci_disable_msi(tp->pdev);
11471
11472 tg3_flag_clear(tp, USING_MSI);
11473 tp->napi[0].irq_vec = tp->pdev->irq;
11474
11475 err = tg3_request_irq(tp, 0);
11476 if (err)
11477 return err;
11478
11479 /* Need to reset the chip because the MSI cycle may have terminated
11480 * with Master Abort.
11481 */
11482 tg3_full_lock(tp, 1);
11483
11484 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11485 err = tg3_init_hw(tp, true);
11486
11487 tg3_full_unlock(tp);
11488
11489 if (err)
11490 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11491
11492 return err;
11493 }
11494
tg3_request_firmware(struct tg3 * tp)11495 static int tg3_request_firmware(struct tg3 *tp)
11496 {
11497 const struct tg3_firmware_hdr *fw_hdr;
11498
11499 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11500 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11501 tp->fw_needed);
11502 return -ENOENT;
11503 }
11504
11505 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11506
11507 /* Firmware blob starts with version numbers, followed by
11508 * start address and _full_ length including BSS sections
11509 * (which must be longer than the actual data, of course
11510 */
11511
11512 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11513 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11514 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11515 tp->fw_len, tp->fw_needed);
11516 release_firmware(tp->fw);
11517 tp->fw = NULL;
11518 return -EINVAL;
11519 }
11520
11521 /* We no longer need firmware; we have it. */
11522 tp->fw_needed = NULL;
11523 return 0;
11524 }
11525
tg3_irq_count(struct tg3 * tp)11526 static u32 tg3_irq_count(struct tg3 *tp)
11527 {
11528 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11529
11530 if (irq_cnt > 1) {
11531 /* We want as many rx rings enabled as there are cpus.
11532 * In multiqueue MSI-X mode, the first MSI-X vector
11533 * only deals with link interrupts, etc, so we add
11534 * one to the number of vectors we are requesting.
11535 */
11536 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11537 }
11538
11539 return irq_cnt;
11540 }
11541
tg3_enable_msix(struct tg3 * tp)11542 static bool tg3_enable_msix(struct tg3 *tp)
11543 {
11544 int i, rc;
11545 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11546
11547 tp->txq_cnt = tp->txq_req;
11548 tp->rxq_cnt = tp->rxq_req;
11549 if (!tp->rxq_cnt)
11550 tp->rxq_cnt = netif_get_num_default_rss_queues();
11551 if (tp->rxq_cnt > tp->rxq_max)
11552 tp->rxq_cnt = tp->rxq_max;
11553
11554 /* Disable multiple TX rings by default. Simple round-robin hardware
11555 * scheduling of the TX rings can cause starvation of rings with
11556 * small packets when other rings have TSO or jumbo packets.
11557 */
11558 if (!tp->txq_req)
11559 tp->txq_cnt = 1;
11560
11561 tp->irq_cnt = tg3_irq_count(tp);
11562
11563 for (i = 0; i < tp->irq_max; i++) {
11564 msix_ent[i].entry = i;
11565 msix_ent[i].vector = 0;
11566 }
11567
11568 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11569 if (rc < 0) {
11570 return false;
11571 } else if (rc < tp->irq_cnt) {
11572 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11573 tp->irq_cnt, rc);
11574 tp->irq_cnt = rc;
11575 tp->rxq_cnt = max(rc - 1, 1);
11576 if (tp->txq_cnt)
11577 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11578 }
11579
11580 for (i = 0; i < tp->irq_max; i++)
11581 tp->napi[i].irq_vec = msix_ent[i].vector;
11582
11583 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11584 pci_disable_msix(tp->pdev);
11585 return false;
11586 }
11587
11588 if (tp->irq_cnt == 1)
11589 return true;
11590
11591 tg3_flag_set(tp, ENABLE_RSS);
11592
11593 if (tp->txq_cnt > 1)
11594 tg3_flag_set(tp, ENABLE_TSS);
11595
11596 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11597
11598 return true;
11599 }
11600
tg3_ints_init(struct tg3 * tp)11601 static void tg3_ints_init(struct tg3 *tp)
11602 {
11603 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11604 !tg3_flag(tp, TAGGED_STATUS)) {
11605 /* All MSI supporting chips should support tagged
11606 * status. Assert that this is the case.
11607 */
11608 netdev_warn(tp->dev,
11609 "MSI without TAGGED_STATUS? Not using MSI\n");
11610 goto defcfg;
11611 }
11612
11613 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11614 tg3_flag_set(tp, USING_MSIX);
11615 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11616 tg3_flag_set(tp, USING_MSI);
11617
11618 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11619 u32 msi_mode = tr32(MSGINT_MODE);
11620 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11621 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11622 if (!tg3_flag(tp, 1SHOT_MSI))
11623 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11624 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11625 }
11626 defcfg:
11627 if (!tg3_flag(tp, USING_MSIX)) {
11628 tp->irq_cnt = 1;
11629 tp->napi[0].irq_vec = tp->pdev->irq;
11630 }
11631
11632 if (tp->irq_cnt == 1) {
11633 tp->txq_cnt = 1;
11634 tp->rxq_cnt = 1;
11635 netif_set_real_num_tx_queues(tp->dev, 1);
11636 netif_set_real_num_rx_queues(tp->dev, 1);
11637 }
11638 }
11639
tg3_ints_fini(struct tg3 * tp)11640 static void tg3_ints_fini(struct tg3 *tp)
11641 {
11642 if (tg3_flag(tp, USING_MSIX))
11643 pci_disable_msix(tp->pdev);
11644 else if (tg3_flag(tp, USING_MSI))
11645 pci_disable_msi(tp->pdev);
11646 tg3_flag_clear(tp, USING_MSI);
11647 tg3_flag_clear(tp, USING_MSIX);
11648 tg3_flag_clear(tp, ENABLE_RSS);
11649 tg3_flag_clear(tp, ENABLE_TSS);
11650 }
11651
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11652 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11653 bool init)
11654 {
11655 struct net_device *dev = tp->dev;
11656 int i, err;
11657
11658 /*
11659 * Setup interrupts first so we know how
11660 * many NAPI resources to allocate
11661 */
11662 tg3_ints_init(tp);
11663
11664 tg3_rss_check_indir_tbl(tp);
11665
11666 /* The placement of this call is tied
11667 * to the setup and use of Host TX descriptors.
11668 */
11669 err = tg3_alloc_consistent(tp);
11670 if (err)
11671 goto out_ints_fini;
11672
11673 netdev_lock(dev);
11674 tg3_napi_init(tp);
11675
11676 tg3_napi_enable(tp);
11677 netdev_unlock(dev);
11678
11679 for (i = 0; i < tp->irq_cnt; i++) {
11680 err = tg3_request_irq(tp, i);
11681 if (err) {
11682 for (i--; i >= 0; i--) {
11683 struct tg3_napi *tnapi = &tp->napi[i];
11684
11685 free_irq(tnapi->irq_vec, tnapi);
11686 }
11687 goto out_napi_fini;
11688 }
11689 }
11690
11691 tg3_full_lock(tp, 0);
11692
11693 if (init)
11694 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11695
11696 err = tg3_init_hw(tp, reset_phy);
11697 if (err) {
11698 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11699 tg3_free_rings(tp);
11700 }
11701
11702 tg3_full_unlock(tp);
11703
11704 if (err)
11705 goto out_free_irq;
11706
11707 if (test_irq && tg3_flag(tp, USING_MSI)) {
11708 err = tg3_test_msi(tp);
11709
11710 if (err) {
11711 tg3_full_lock(tp, 0);
11712 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11713 tg3_free_rings(tp);
11714 tg3_full_unlock(tp);
11715
11716 goto out_napi_fini;
11717 }
11718
11719 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11720 u32 val = tr32(PCIE_TRANSACTION_CFG);
11721
11722 tw32(PCIE_TRANSACTION_CFG,
11723 val | PCIE_TRANS_CFG_1SHOT_MSI);
11724 }
11725 }
11726
11727 tg3_phy_start(tp);
11728
11729 tg3_hwmon_open(tp);
11730
11731 tg3_full_lock(tp, 0);
11732
11733 tg3_timer_start(tp);
11734 tg3_flag_set(tp, INIT_COMPLETE);
11735 tg3_enable_ints(tp);
11736
11737 tg3_ptp_resume(tp);
11738
11739 tg3_full_unlock(tp);
11740
11741 netif_tx_start_all_queues(dev);
11742
11743 /*
11744 * Reset loopback feature if it was turned on while the device was down
11745 * make sure that it's installed properly now.
11746 */
11747 if (dev->features & NETIF_F_LOOPBACK)
11748 tg3_set_loopback(dev, dev->features);
11749
11750 return 0;
11751
11752 out_free_irq:
11753 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11754 struct tg3_napi *tnapi = &tp->napi[i];
11755 free_irq(tnapi->irq_vec, tnapi);
11756 }
11757
11758 out_napi_fini:
11759 tg3_napi_disable(tp);
11760 tg3_napi_fini(tp);
11761 tg3_free_consistent(tp);
11762
11763 out_ints_fini:
11764 tg3_ints_fini(tp);
11765
11766 return err;
11767 }
11768
tg3_stop(struct tg3 * tp)11769 static void tg3_stop(struct tg3 *tp)
11770 {
11771 int i;
11772
11773 tg3_reset_task_cancel(tp);
11774 tg3_netif_stop(tp);
11775
11776 tg3_timer_stop(tp);
11777
11778 tg3_hwmon_close(tp);
11779
11780 tg3_phy_stop(tp);
11781
11782 tg3_full_lock(tp, 1);
11783
11784 tg3_disable_ints(tp);
11785
11786 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11787 tg3_free_rings(tp);
11788 tg3_flag_clear(tp, INIT_COMPLETE);
11789
11790 tg3_full_unlock(tp);
11791
11792 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11793 struct tg3_napi *tnapi = &tp->napi[i];
11794 free_irq(tnapi->irq_vec, tnapi);
11795 }
11796
11797 tg3_ints_fini(tp);
11798
11799 tg3_napi_fini(tp);
11800
11801 tg3_free_consistent(tp);
11802 }
11803
tg3_open(struct net_device * dev)11804 static int tg3_open(struct net_device *dev)
11805 {
11806 struct tg3 *tp = netdev_priv(dev);
11807 int err;
11808
11809 if (tp->pcierr_recovery) {
11810 netdev_err(dev, "Failed to open device. PCI error recovery "
11811 "in progress\n");
11812 return -EAGAIN;
11813 }
11814
11815 if (tp->fw_needed) {
11816 err = tg3_request_firmware(tp);
11817 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11818 if (err) {
11819 netdev_warn(tp->dev, "EEE capability disabled\n");
11820 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11821 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11822 netdev_warn(tp->dev, "EEE capability restored\n");
11823 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11824 }
11825 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11826 if (err)
11827 return err;
11828 } else if (err) {
11829 netdev_warn(tp->dev, "TSO capability disabled\n");
11830 tg3_flag_clear(tp, TSO_CAPABLE);
11831 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11832 netdev_notice(tp->dev, "TSO capability restored\n");
11833 tg3_flag_set(tp, TSO_CAPABLE);
11834 }
11835 }
11836
11837 tg3_carrier_off(tp);
11838
11839 err = tg3_power_up(tp);
11840 if (err)
11841 return err;
11842
11843 tg3_full_lock(tp, 0);
11844
11845 tg3_disable_ints(tp);
11846 tg3_flag_clear(tp, INIT_COMPLETE);
11847
11848 tg3_full_unlock(tp);
11849
11850 err = tg3_start(tp,
11851 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11852 true, true);
11853 if (err) {
11854 tg3_frob_aux_power(tp, false);
11855 pci_set_power_state(tp->pdev, PCI_D3hot);
11856 }
11857
11858 return err;
11859 }
11860
tg3_close(struct net_device * dev)11861 static int tg3_close(struct net_device *dev)
11862 {
11863 struct tg3 *tp = netdev_priv(dev);
11864
11865 if (tp->pcierr_recovery) {
11866 netdev_err(dev, "Failed to close device. PCI error recovery "
11867 "in progress\n");
11868 return -EAGAIN;
11869 }
11870
11871 tg3_stop(tp);
11872
11873 if (pci_device_is_present(tp->pdev)) {
11874 tg3_power_down_prepare(tp);
11875
11876 tg3_carrier_off(tp);
11877 }
11878 return 0;
11879 }
11880
get_stat64(tg3_stat64_t * val)11881 static inline u64 get_stat64(tg3_stat64_t *val)
11882 {
11883 return ((u64)val->high << 32) | ((u64)val->low);
11884 }
11885
tg3_calc_crc_errors(struct tg3 * tp)11886 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11887 {
11888 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11889
11890 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11891 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11892 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11893 u32 val;
11894
11895 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11896 tg3_writephy(tp, MII_TG3_TEST1,
11897 val | MII_TG3_TEST1_CRC_EN);
11898 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11899 } else
11900 val = 0;
11901
11902 tp->phy_crc_errors += val;
11903
11904 return tp->phy_crc_errors;
11905 }
11906
11907 return get_stat64(&hw_stats->rx_fcs_errors);
11908 }
11909
11910 #define ESTAT_ADD(member) \
11911 estats->member = old_estats->member + \
11912 get_stat64(&hw_stats->member)
11913
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11914 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11915 {
11916 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11917 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11918
11919 ESTAT_ADD(rx_octets);
11920 ESTAT_ADD(rx_fragments);
11921 ESTAT_ADD(rx_ucast_packets);
11922 ESTAT_ADD(rx_mcast_packets);
11923 ESTAT_ADD(rx_bcast_packets);
11924 ESTAT_ADD(rx_fcs_errors);
11925 ESTAT_ADD(rx_align_errors);
11926 ESTAT_ADD(rx_xon_pause_rcvd);
11927 ESTAT_ADD(rx_xoff_pause_rcvd);
11928 ESTAT_ADD(rx_mac_ctrl_rcvd);
11929 ESTAT_ADD(rx_xoff_entered);
11930 ESTAT_ADD(rx_frame_too_long_errors);
11931 ESTAT_ADD(rx_jabbers);
11932 ESTAT_ADD(rx_undersize_packets);
11933 ESTAT_ADD(rx_in_length_errors);
11934 ESTAT_ADD(rx_out_length_errors);
11935 ESTAT_ADD(rx_64_or_less_octet_packets);
11936 ESTAT_ADD(rx_65_to_127_octet_packets);
11937 ESTAT_ADD(rx_128_to_255_octet_packets);
11938 ESTAT_ADD(rx_256_to_511_octet_packets);
11939 ESTAT_ADD(rx_512_to_1023_octet_packets);
11940 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11941 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11942 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11943 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11944 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11945
11946 ESTAT_ADD(tx_octets);
11947 ESTAT_ADD(tx_collisions);
11948 ESTAT_ADD(tx_xon_sent);
11949 ESTAT_ADD(tx_xoff_sent);
11950 ESTAT_ADD(tx_flow_control);
11951 ESTAT_ADD(tx_mac_errors);
11952 ESTAT_ADD(tx_single_collisions);
11953 ESTAT_ADD(tx_mult_collisions);
11954 ESTAT_ADD(tx_deferred);
11955 ESTAT_ADD(tx_excessive_collisions);
11956 ESTAT_ADD(tx_late_collisions);
11957 ESTAT_ADD(tx_collide_2times);
11958 ESTAT_ADD(tx_collide_3times);
11959 ESTAT_ADD(tx_collide_4times);
11960 ESTAT_ADD(tx_collide_5times);
11961 ESTAT_ADD(tx_collide_6times);
11962 ESTAT_ADD(tx_collide_7times);
11963 ESTAT_ADD(tx_collide_8times);
11964 ESTAT_ADD(tx_collide_9times);
11965 ESTAT_ADD(tx_collide_10times);
11966 ESTAT_ADD(tx_collide_11times);
11967 ESTAT_ADD(tx_collide_12times);
11968 ESTAT_ADD(tx_collide_13times);
11969 ESTAT_ADD(tx_collide_14times);
11970 ESTAT_ADD(tx_collide_15times);
11971 ESTAT_ADD(tx_ucast_packets);
11972 ESTAT_ADD(tx_mcast_packets);
11973 ESTAT_ADD(tx_bcast_packets);
11974 ESTAT_ADD(tx_carrier_sense_errors);
11975 ESTAT_ADD(tx_discards);
11976 ESTAT_ADD(tx_errors);
11977
11978 ESTAT_ADD(dma_writeq_full);
11979 ESTAT_ADD(dma_write_prioq_full);
11980 ESTAT_ADD(rxbds_empty);
11981 ESTAT_ADD(rx_discards);
11982 ESTAT_ADD(rx_errors);
11983 ESTAT_ADD(rx_threshold_hit);
11984
11985 ESTAT_ADD(dma_readq_full);
11986 ESTAT_ADD(dma_read_prioq_full);
11987 ESTAT_ADD(tx_comp_queue_full);
11988
11989 ESTAT_ADD(ring_set_send_prod_index);
11990 ESTAT_ADD(ring_status_update);
11991 ESTAT_ADD(nic_irqs);
11992 ESTAT_ADD(nic_avoided_irqs);
11993 ESTAT_ADD(nic_tx_threshold_hit);
11994
11995 ESTAT_ADD(mbuf_lwm_thresh_hit);
11996 }
11997
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11998 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11999 {
12000 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
12001 struct tg3_hw_stats *hw_stats = tp->hw_stats;
12002 unsigned long rx_dropped;
12003 unsigned long tx_dropped;
12004 int i;
12005
12006 stats->rx_packets = old_stats->rx_packets +
12007 get_stat64(&hw_stats->rx_ucast_packets) +
12008 get_stat64(&hw_stats->rx_mcast_packets) +
12009 get_stat64(&hw_stats->rx_bcast_packets);
12010
12011 stats->tx_packets = old_stats->tx_packets +
12012 get_stat64(&hw_stats->tx_ucast_packets) +
12013 get_stat64(&hw_stats->tx_mcast_packets) +
12014 get_stat64(&hw_stats->tx_bcast_packets);
12015
12016 stats->rx_bytes = old_stats->rx_bytes +
12017 get_stat64(&hw_stats->rx_octets);
12018 stats->tx_bytes = old_stats->tx_bytes +
12019 get_stat64(&hw_stats->tx_octets);
12020
12021 stats->rx_errors = old_stats->rx_errors +
12022 get_stat64(&hw_stats->rx_errors);
12023 stats->tx_errors = old_stats->tx_errors +
12024 get_stat64(&hw_stats->tx_errors) +
12025 get_stat64(&hw_stats->tx_mac_errors) +
12026 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12027 get_stat64(&hw_stats->tx_discards);
12028
12029 stats->multicast = old_stats->multicast +
12030 get_stat64(&hw_stats->rx_mcast_packets);
12031 stats->collisions = old_stats->collisions +
12032 get_stat64(&hw_stats->tx_collisions);
12033
12034 stats->rx_length_errors = old_stats->rx_length_errors +
12035 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12036 get_stat64(&hw_stats->rx_undersize_packets);
12037
12038 stats->rx_frame_errors = old_stats->rx_frame_errors +
12039 get_stat64(&hw_stats->rx_align_errors);
12040 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12041 get_stat64(&hw_stats->tx_discards);
12042 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12043 get_stat64(&hw_stats->tx_carrier_sense_errors);
12044
12045 stats->rx_crc_errors = old_stats->rx_crc_errors +
12046 tg3_calc_crc_errors(tp);
12047
12048 stats->rx_missed_errors = old_stats->rx_missed_errors +
12049 get_stat64(&hw_stats->rx_discards);
12050
12051 /* Aggregate per-queue counters. The per-queue counters are updated
12052 * by a single writer, race-free. The result computed by this loop
12053 * might not be 100% accurate (counters can be updated in the middle of
12054 * the loop) but the next tg3_get_nstats() will recompute the current
12055 * value so it is acceptable.
12056 *
12057 * Note that these counters wrap around at 4G on 32bit machines.
12058 */
12059 rx_dropped = (unsigned long)(old_stats->rx_dropped);
12060 tx_dropped = (unsigned long)(old_stats->tx_dropped);
12061
12062 for (i = 0; i < tp->irq_cnt; i++) {
12063 struct tg3_napi *tnapi = &tp->napi[i];
12064
12065 rx_dropped += tnapi->rx_dropped;
12066 tx_dropped += tnapi->tx_dropped;
12067 }
12068
12069 stats->rx_dropped = rx_dropped;
12070 stats->tx_dropped = tx_dropped;
12071 }
12072
tg3_get_regs_len(struct net_device * dev)12073 static int tg3_get_regs_len(struct net_device *dev)
12074 {
12075 return TG3_REG_BLK_SIZE;
12076 }
12077
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)12078 static void tg3_get_regs(struct net_device *dev,
12079 struct ethtool_regs *regs, void *_p)
12080 {
12081 struct tg3 *tp = netdev_priv(dev);
12082
12083 regs->version = 0;
12084
12085 memset(_p, 0, TG3_REG_BLK_SIZE);
12086
12087 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12088 return;
12089
12090 tg3_full_lock(tp, 0);
12091
12092 tg3_dump_legacy_regs(tp, (u32 *)_p);
12093
12094 tg3_full_unlock(tp);
12095 }
12096
tg3_get_eeprom_len(struct net_device * dev)12097 static int tg3_get_eeprom_len(struct net_device *dev)
12098 {
12099 struct tg3 *tp = netdev_priv(dev);
12100
12101 return tp->nvram_size;
12102 }
12103
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12104 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12105 {
12106 struct tg3 *tp = netdev_priv(dev);
12107 int ret, cpmu_restore = 0;
12108 u8 *pd;
12109 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12110 __be32 val;
12111
12112 if (tg3_flag(tp, NO_NVRAM))
12113 return -EINVAL;
12114
12115 offset = eeprom->offset;
12116 len = eeprom->len;
12117 eeprom->len = 0;
12118
12119 eeprom->magic = TG3_EEPROM_MAGIC;
12120
12121 /* Override clock, link aware and link idle modes */
12122 if (tg3_flag(tp, CPMU_PRESENT)) {
12123 cpmu_val = tr32(TG3_CPMU_CTRL);
12124 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12125 CPMU_CTRL_LINK_IDLE_MODE)) {
12126 tw32(TG3_CPMU_CTRL, cpmu_val &
12127 ~(CPMU_CTRL_LINK_AWARE_MODE |
12128 CPMU_CTRL_LINK_IDLE_MODE));
12129 cpmu_restore = 1;
12130 }
12131 }
12132 tg3_override_clk(tp);
12133
12134 if (offset & 3) {
12135 /* adjustments to start on required 4 byte boundary */
12136 b_offset = offset & 3;
12137 b_count = 4 - b_offset;
12138 if (b_count > len) {
12139 /* i.e. offset=1 len=2 */
12140 b_count = len;
12141 }
12142 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12143 if (ret)
12144 goto eeprom_done;
12145 memcpy(data, ((char *)&val) + b_offset, b_count);
12146 len -= b_count;
12147 offset += b_count;
12148 eeprom->len += b_count;
12149 }
12150
12151 /* read bytes up to the last 4 byte boundary */
12152 pd = &data[eeprom->len];
12153 for (i = 0; i < (len - (len & 3)); i += 4) {
12154 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12155 if (ret) {
12156 if (i)
12157 i -= 4;
12158 eeprom->len += i;
12159 goto eeprom_done;
12160 }
12161 memcpy(pd + i, &val, 4);
12162 if (need_resched()) {
12163 if (signal_pending(current)) {
12164 eeprom->len += i;
12165 ret = -EINTR;
12166 goto eeprom_done;
12167 }
12168 cond_resched();
12169 }
12170 }
12171 eeprom->len += i;
12172
12173 if (len & 3) {
12174 /* read last bytes not ending on 4 byte boundary */
12175 pd = &data[eeprom->len];
12176 b_count = len & 3;
12177 b_offset = offset + len - b_count;
12178 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12179 if (ret)
12180 goto eeprom_done;
12181 memcpy(pd, &val, b_count);
12182 eeprom->len += b_count;
12183 }
12184 ret = 0;
12185
12186 eeprom_done:
12187 /* Restore clock, link aware and link idle modes */
12188 tg3_restore_clk(tp);
12189 if (cpmu_restore)
12190 tw32(TG3_CPMU_CTRL, cpmu_val);
12191
12192 return ret;
12193 }
12194
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12195 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12196 {
12197 struct tg3 *tp = netdev_priv(dev);
12198 int ret;
12199 u32 offset, len, b_offset, odd_len;
12200 u8 *buf;
12201 __be32 start = 0, end;
12202
12203 if (tg3_flag(tp, NO_NVRAM) ||
12204 eeprom->magic != TG3_EEPROM_MAGIC)
12205 return -EINVAL;
12206
12207 offset = eeprom->offset;
12208 len = eeprom->len;
12209
12210 if ((b_offset = (offset & 3))) {
12211 /* adjustments to start on required 4 byte boundary */
12212 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12213 if (ret)
12214 return ret;
12215 len += b_offset;
12216 offset &= ~3;
12217 if (len < 4)
12218 len = 4;
12219 }
12220
12221 odd_len = 0;
12222 if (len & 3) {
12223 /* adjustments to end on required 4 byte boundary */
12224 odd_len = 1;
12225 len = (len + 3) & ~3;
12226 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12227 if (ret)
12228 return ret;
12229 }
12230
12231 buf = data;
12232 if (b_offset || odd_len) {
12233 buf = kmalloc(len, GFP_KERNEL);
12234 if (!buf)
12235 return -ENOMEM;
12236 if (b_offset)
12237 memcpy(buf, &start, 4);
12238 if (odd_len)
12239 memcpy(buf+len-4, &end, 4);
12240 memcpy(buf + b_offset, data, eeprom->len);
12241 }
12242
12243 ret = tg3_nvram_write_block(tp, offset, len, buf);
12244
12245 if (buf != data)
12246 kfree(buf);
12247
12248 return ret;
12249 }
12250
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12251 static int tg3_get_link_ksettings(struct net_device *dev,
12252 struct ethtool_link_ksettings *cmd)
12253 {
12254 struct tg3 *tp = netdev_priv(dev);
12255 u32 supported, advertising;
12256
12257 if (tg3_flag(tp, USE_PHYLIB)) {
12258 struct phy_device *phydev;
12259 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12260 return -EAGAIN;
12261 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12262 phy_ethtool_ksettings_get(phydev, cmd);
12263
12264 return 0;
12265 }
12266
12267 supported = (SUPPORTED_Autoneg);
12268
12269 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12270 supported |= (SUPPORTED_1000baseT_Half |
12271 SUPPORTED_1000baseT_Full);
12272
12273 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12274 supported |= (SUPPORTED_100baseT_Half |
12275 SUPPORTED_100baseT_Full |
12276 SUPPORTED_10baseT_Half |
12277 SUPPORTED_10baseT_Full |
12278 SUPPORTED_TP);
12279 cmd->base.port = PORT_TP;
12280 } else {
12281 supported |= SUPPORTED_FIBRE;
12282 cmd->base.port = PORT_FIBRE;
12283 }
12284 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12285 supported);
12286
12287 advertising = tp->link_config.advertising;
12288 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12289 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12290 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12291 advertising |= ADVERTISED_Pause;
12292 } else {
12293 advertising |= ADVERTISED_Pause |
12294 ADVERTISED_Asym_Pause;
12295 }
12296 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12297 advertising |= ADVERTISED_Asym_Pause;
12298 }
12299 }
12300 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12301 advertising);
12302
12303 if (netif_running(dev) && tp->link_up) {
12304 cmd->base.speed = tp->link_config.active_speed;
12305 cmd->base.duplex = tp->link_config.active_duplex;
12306 ethtool_convert_legacy_u32_to_link_mode(
12307 cmd->link_modes.lp_advertising,
12308 tp->link_config.rmt_adv);
12309
12310 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12311 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12312 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12313 else
12314 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12315 }
12316 } else {
12317 cmd->base.speed = SPEED_UNKNOWN;
12318 cmd->base.duplex = DUPLEX_UNKNOWN;
12319 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12320 }
12321 cmd->base.phy_address = tp->phy_addr;
12322 cmd->base.autoneg = tp->link_config.autoneg;
12323 return 0;
12324 }
12325
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12326 static int tg3_set_link_ksettings(struct net_device *dev,
12327 const struct ethtool_link_ksettings *cmd)
12328 {
12329 struct tg3 *tp = netdev_priv(dev);
12330 u32 speed = cmd->base.speed;
12331 u32 advertising;
12332
12333 if (tg3_flag(tp, USE_PHYLIB)) {
12334 struct phy_device *phydev;
12335 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12336 return -EAGAIN;
12337 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12338 return phy_ethtool_ksettings_set(phydev, cmd);
12339 }
12340
12341 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12342 cmd->base.autoneg != AUTONEG_DISABLE)
12343 return -EINVAL;
12344
12345 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12346 cmd->base.duplex != DUPLEX_FULL &&
12347 cmd->base.duplex != DUPLEX_HALF)
12348 return -EINVAL;
12349
12350 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12351 cmd->link_modes.advertising);
12352
12353 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12354 u32 mask = ADVERTISED_Autoneg |
12355 ADVERTISED_Pause |
12356 ADVERTISED_Asym_Pause;
12357
12358 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12359 mask |= ADVERTISED_1000baseT_Half |
12360 ADVERTISED_1000baseT_Full;
12361
12362 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12363 mask |= ADVERTISED_100baseT_Half |
12364 ADVERTISED_100baseT_Full |
12365 ADVERTISED_10baseT_Half |
12366 ADVERTISED_10baseT_Full |
12367 ADVERTISED_TP;
12368 else
12369 mask |= ADVERTISED_FIBRE;
12370
12371 if (advertising & ~mask)
12372 return -EINVAL;
12373
12374 mask &= (ADVERTISED_1000baseT_Half |
12375 ADVERTISED_1000baseT_Full |
12376 ADVERTISED_100baseT_Half |
12377 ADVERTISED_100baseT_Full |
12378 ADVERTISED_10baseT_Half |
12379 ADVERTISED_10baseT_Full);
12380
12381 advertising &= mask;
12382 } else {
12383 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12384 if (speed != SPEED_1000)
12385 return -EINVAL;
12386
12387 if (cmd->base.duplex != DUPLEX_FULL)
12388 return -EINVAL;
12389 } else {
12390 if (speed != SPEED_100 &&
12391 speed != SPEED_10)
12392 return -EINVAL;
12393 }
12394 }
12395
12396 tg3_full_lock(tp, 0);
12397
12398 tp->link_config.autoneg = cmd->base.autoneg;
12399 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12400 tp->link_config.advertising = (advertising |
12401 ADVERTISED_Autoneg);
12402 tp->link_config.speed = SPEED_UNKNOWN;
12403 tp->link_config.duplex = DUPLEX_UNKNOWN;
12404 } else {
12405 tp->link_config.advertising = 0;
12406 tp->link_config.speed = speed;
12407 tp->link_config.duplex = cmd->base.duplex;
12408 }
12409
12410 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12411
12412 tg3_warn_mgmt_link_flap(tp);
12413
12414 if (netif_running(dev))
12415 tg3_setup_phy(tp, true);
12416
12417 tg3_full_unlock(tp);
12418
12419 return 0;
12420 }
12421
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12422 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12423 {
12424 struct tg3 *tp = netdev_priv(dev);
12425
12426 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12427 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12428 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12429 }
12430
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12431 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12432 {
12433 struct tg3 *tp = netdev_priv(dev);
12434
12435 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12436 wol->supported = WAKE_MAGIC;
12437 else
12438 wol->supported = 0;
12439 wol->wolopts = 0;
12440 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12441 wol->wolopts = WAKE_MAGIC;
12442 memset(&wol->sopass, 0, sizeof(wol->sopass));
12443 }
12444
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12445 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12446 {
12447 struct tg3 *tp = netdev_priv(dev);
12448 struct device *dp = &tp->pdev->dev;
12449
12450 if (wol->wolopts & ~WAKE_MAGIC)
12451 return -EINVAL;
12452 if ((wol->wolopts & WAKE_MAGIC) &&
12453 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12454 return -EINVAL;
12455
12456 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12457
12458 if (device_may_wakeup(dp))
12459 tg3_flag_set(tp, WOL_ENABLE);
12460 else
12461 tg3_flag_clear(tp, WOL_ENABLE);
12462
12463 return 0;
12464 }
12465
tg3_get_msglevel(struct net_device * dev)12466 static u32 tg3_get_msglevel(struct net_device *dev)
12467 {
12468 struct tg3 *tp = netdev_priv(dev);
12469 return tp->msg_enable;
12470 }
12471
tg3_set_msglevel(struct net_device * dev,u32 value)12472 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12473 {
12474 struct tg3 *tp = netdev_priv(dev);
12475 tp->msg_enable = value;
12476 }
12477
tg3_nway_reset(struct net_device * dev)12478 static int tg3_nway_reset(struct net_device *dev)
12479 {
12480 struct tg3 *tp = netdev_priv(dev);
12481 int r;
12482
12483 if (!netif_running(dev))
12484 return -EAGAIN;
12485
12486 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12487 return -EINVAL;
12488
12489 tg3_warn_mgmt_link_flap(tp);
12490
12491 if (tg3_flag(tp, USE_PHYLIB)) {
12492 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12493 return -EAGAIN;
12494 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12495 } else {
12496 u32 bmcr;
12497
12498 spin_lock_bh(&tp->lock);
12499 r = -EINVAL;
12500 tg3_readphy(tp, MII_BMCR, &bmcr);
12501 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12502 ((bmcr & BMCR_ANENABLE) ||
12503 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12504 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12505 BMCR_ANENABLE);
12506 r = 0;
12507 }
12508 spin_unlock_bh(&tp->lock);
12509 }
12510
12511 return r;
12512 }
12513
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12514 static void tg3_get_ringparam(struct net_device *dev,
12515 struct ethtool_ringparam *ering,
12516 struct kernel_ethtool_ringparam *kernel_ering,
12517 struct netlink_ext_ack *extack)
12518 {
12519 struct tg3 *tp = netdev_priv(dev);
12520
12521 ering->rx_max_pending = tp->rx_std_ring_mask;
12522 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12523 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12524 else
12525 ering->rx_jumbo_max_pending = 0;
12526
12527 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12528
12529 ering->rx_pending = tp->rx_pending;
12530 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12531 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12532 else
12533 ering->rx_jumbo_pending = 0;
12534
12535 ering->tx_pending = tp->napi[0].tx_pending;
12536 }
12537
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12538 static int tg3_set_ringparam(struct net_device *dev,
12539 struct ethtool_ringparam *ering,
12540 struct kernel_ethtool_ringparam *kernel_ering,
12541 struct netlink_ext_ack *extack)
12542 {
12543 struct tg3 *tp = netdev_priv(dev);
12544 int i, irq_sync = 0, err = 0;
12545 bool reset_phy = false;
12546
12547 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12548 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12549 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12550 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12551 (tg3_flag(tp, TSO_BUG) &&
12552 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12553 return -EINVAL;
12554
12555 if (netif_running(dev)) {
12556 tg3_phy_stop(tp);
12557 tg3_netif_stop(tp);
12558 irq_sync = 1;
12559 }
12560
12561 netdev_lock(dev);
12562 tg3_full_lock(tp, irq_sync);
12563
12564 tp->rx_pending = ering->rx_pending;
12565
12566 if (tg3_flag(tp, MAX_RXPEND_64) &&
12567 tp->rx_pending > 63)
12568 tp->rx_pending = 63;
12569
12570 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12571 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12572
12573 for (i = 0; i < tp->irq_max; i++)
12574 tp->napi[i].tx_pending = ering->tx_pending;
12575
12576 if (netif_running(dev)) {
12577 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12578 /* Reset PHY to avoid PHY lock up */
12579 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12580 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12581 tg3_asic_rev(tp) == ASIC_REV_5720)
12582 reset_phy = true;
12583
12584 err = tg3_restart_hw(tp, reset_phy);
12585 if (!err)
12586 tg3_netif_start(tp);
12587 }
12588
12589 tg3_full_unlock(tp);
12590 netdev_unlock(dev);
12591
12592 if (irq_sync && !err)
12593 tg3_phy_start(tp);
12594
12595 return err;
12596 }
12597
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12598 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12599 {
12600 struct tg3 *tp = netdev_priv(dev);
12601
12602 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12603
12604 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12605 epause->rx_pause = 1;
12606 else
12607 epause->rx_pause = 0;
12608
12609 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12610 epause->tx_pause = 1;
12611 else
12612 epause->tx_pause = 0;
12613 }
12614
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12615 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12616 {
12617 struct tg3 *tp = netdev_priv(dev);
12618 int err = 0;
12619 bool reset_phy = false;
12620
12621 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12622 tg3_warn_mgmt_link_flap(tp);
12623
12624 if (tg3_flag(tp, USE_PHYLIB)) {
12625 struct phy_device *phydev;
12626
12627 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12628
12629 if (!phy_validate_pause(phydev, epause))
12630 return -EINVAL;
12631
12632 tp->link_config.flowctrl = 0;
12633 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12634 if (epause->rx_pause) {
12635 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12636
12637 if (epause->tx_pause) {
12638 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12639 }
12640 } else if (epause->tx_pause) {
12641 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12642 }
12643
12644 if (epause->autoneg)
12645 tg3_flag_set(tp, PAUSE_AUTONEG);
12646 else
12647 tg3_flag_clear(tp, PAUSE_AUTONEG);
12648
12649 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12650 if (phydev->autoneg) {
12651 /* phy_set_asym_pause() will
12652 * renegotiate the link to inform our
12653 * link partner of our flow control
12654 * settings, even if the flow control
12655 * is forced. Let tg3_adjust_link()
12656 * do the final flow control setup.
12657 */
12658 return 0;
12659 }
12660
12661 if (!epause->autoneg)
12662 tg3_setup_flow_control(tp, 0, 0);
12663 }
12664 } else {
12665 int irq_sync = 0;
12666
12667 if (netif_running(dev)) {
12668 tg3_netif_stop(tp);
12669 irq_sync = 1;
12670 }
12671
12672 netdev_lock(dev);
12673 tg3_full_lock(tp, irq_sync);
12674
12675 if (epause->autoneg)
12676 tg3_flag_set(tp, PAUSE_AUTONEG);
12677 else
12678 tg3_flag_clear(tp, PAUSE_AUTONEG);
12679 if (epause->rx_pause)
12680 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12681 else
12682 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12683 if (epause->tx_pause)
12684 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12685 else
12686 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12687
12688 if (netif_running(dev)) {
12689 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12690 /* Reset PHY to avoid PHY lock up */
12691 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12692 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12693 tg3_asic_rev(tp) == ASIC_REV_5720)
12694 reset_phy = true;
12695
12696 err = tg3_restart_hw(tp, reset_phy);
12697 if (!err)
12698 tg3_netif_start(tp);
12699 }
12700
12701 tg3_full_unlock(tp);
12702 netdev_unlock(dev);
12703 }
12704
12705 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12706
12707 return err;
12708 }
12709
tg3_get_sset_count(struct net_device * dev,int sset)12710 static int tg3_get_sset_count(struct net_device *dev, int sset)
12711 {
12712 switch (sset) {
12713 case ETH_SS_TEST:
12714 return TG3_NUM_TEST;
12715 case ETH_SS_STATS:
12716 return TG3_NUM_STATS;
12717 default:
12718 return -EOPNOTSUPP;
12719 }
12720 }
12721
tg3_get_rx_ring_count(struct net_device * dev)12722 static u32 tg3_get_rx_ring_count(struct net_device *dev)
12723 {
12724 struct tg3 *tp = netdev_priv(dev);
12725
12726 if (!tg3_flag(tp, SUPPORT_MSIX))
12727 return 1;
12728
12729 if (netif_running(tp->dev))
12730 return tp->rxq_cnt;
12731
12732 return min_t(u32, netif_get_num_default_rss_queues(), tp->rxq_max);
12733 }
12734
tg3_get_rxfh_indir_size(struct net_device * dev)12735 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12736 {
12737 u32 size = 0;
12738 struct tg3 *tp = netdev_priv(dev);
12739
12740 if (tg3_flag(tp, SUPPORT_MSIX))
12741 size = TG3_RSS_INDIR_TBL_SIZE;
12742
12743 return size;
12744 }
12745
tg3_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)12746 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12747 {
12748 struct tg3 *tp = netdev_priv(dev);
12749 int i;
12750
12751 rxfh->hfunc = ETH_RSS_HASH_TOP;
12752 if (!rxfh->indir)
12753 return 0;
12754
12755 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12756 rxfh->indir[i] = tp->rss_ind_tbl[i];
12757
12758 return 0;
12759 }
12760
tg3_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)12761 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12762 struct netlink_ext_ack *extack)
12763 {
12764 struct tg3 *tp = netdev_priv(dev);
12765 size_t i;
12766
12767 /* We require at least one supported parameter to be changed and no
12768 * change in any of the unsupported parameters
12769 */
12770 if (rxfh->key ||
12771 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12772 rxfh->hfunc != ETH_RSS_HASH_TOP))
12773 return -EOPNOTSUPP;
12774
12775 if (!rxfh->indir)
12776 return 0;
12777
12778 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12779 tp->rss_ind_tbl[i] = rxfh->indir[i];
12780
12781 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12782 return 0;
12783
12784 /* It is legal to write the indirection
12785 * table while the device is running.
12786 */
12787 tg3_full_lock(tp, 0);
12788 tg3_rss_write_indir_tbl(tp);
12789 tg3_full_unlock(tp);
12790
12791 return 0;
12792 }
12793
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12794 static void tg3_get_channels(struct net_device *dev,
12795 struct ethtool_channels *channel)
12796 {
12797 struct tg3 *tp = netdev_priv(dev);
12798 u32 deflt_qs = netif_get_num_default_rss_queues();
12799
12800 channel->max_rx = tp->rxq_max;
12801 channel->max_tx = tp->txq_max;
12802
12803 if (netif_running(dev)) {
12804 channel->rx_count = tp->rxq_cnt;
12805 channel->tx_count = tp->txq_cnt;
12806 } else {
12807 if (tp->rxq_req)
12808 channel->rx_count = tp->rxq_req;
12809 else
12810 channel->rx_count = min(deflt_qs, tp->rxq_max);
12811
12812 if (tp->txq_req)
12813 channel->tx_count = tp->txq_req;
12814 else
12815 channel->tx_count = min(deflt_qs, tp->txq_max);
12816 }
12817 }
12818
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12819 static int tg3_set_channels(struct net_device *dev,
12820 struct ethtool_channels *channel)
12821 {
12822 struct tg3 *tp = netdev_priv(dev);
12823
12824 if (!tg3_flag(tp, SUPPORT_MSIX))
12825 return -EOPNOTSUPP;
12826
12827 if (channel->rx_count > tp->rxq_max ||
12828 channel->tx_count > tp->txq_max)
12829 return -EINVAL;
12830
12831 tp->rxq_req = channel->rx_count;
12832 tp->txq_req = channel->tx_count;
12833
12834 if (!netif_running(dev))
12835 return 0;
12836
12837 tg3_stop(tp);
12838
12839 tg3_carrier_off(tp);
12840
12841 tg3_start(tp, true, false, false);
12842
12843 return 0;
12844 }
12845
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12846 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12847 {
12848 switch (stringset) {
12849 case ETH_SS_STATS:
12850 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12851 break;
12852 case ETH_SS_TEST:
12853 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12854 break;
12855 default:
12856 WARN_ON(1); /* we need a WARN() */
12857 break;
12858 }
12859 }
12860
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12861 static int tg3_set_phys_id(struct net_device *dev,
12862 enum ethtool_phys_id_state state)
12863 {
12864 struct tg3 *tp = netdev_priv(dev);
12865
12866 switch (state) {
12867 case ETHTOOL_ID_ACTIVE:
12868 return 1; /* cycle on/off once per second */
12869
12870 case ETHTOOL_ID_ON:
12871 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12872 LED_CTRL_1000MBPS_ON |
12873 LED_CTRL_100MBPS_ON |
12874 LED_CTRL_10MBPS_ON |
12875 LED_CTRL_TRAFFIC_OVERRIDE |
12876 LED_CTRL_TRAFFIC_BLINK |
12877 LED_CTRL_TRAFFIC_LED);
12878 break;
12879
12880 case ETHTOOL_ID_OFF:
12881 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12882 LED_CTRL_TRAFFIC_OVERRIDE);
12883 break;
12884
12885 case ETHTOOL_ID_INACTIVE:
12886 tw32(MAC_LED_CTRL, tp->led_ctrl);
12887 break;
12888 }
12889
12890 return 0;
12891 }
12892
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12893 static void tg3_get_ethtool_stats(struct net_device *dev,
12894 struct ethtool_stats *estats, u64 *tmp_stats)
12895 {
12896 struct tg3 *tp = netdev_priv(dev);
12897
12898 if (tp->hw_stats)
12899 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12900 else
12901 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12902 }
12903
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12904 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12905 {
12906 int i;
12907 __be32 *buf;
12908 u32 offset = 0, len = 0;
12909 u32 magic, val;
12910
12911 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12912 return NULL;
12913
12914 if (magic == TG3_EEPROM_MAGIC) {
12915 for (offset = TG3_NVM_DIR_START;
12916 offset < TG3_NVM_DIR_END;
12917 offset += TG3_NVM_DIRENT_SIZE) {
12918 if (tg3_nvram_read(tp, offset, &val))
12919 return NULL;
12920
12921 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12922 TG3_NVM_DIRTYPE_EXTVPD)
12923 break;
12924 }
12925
12926 if (offset != TG3_NVM_DIR_END) {
12927 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12928 if (tg3_nvram_read(tp, offset + 4, &offset))
12929 return NULL;
12930
12931 offset = tg3_nvram_logical_addr(tp, offset);
12932 }
12933
12934 if (!offset || !len) {
12935 offset = TG3_NVM_VPD_OFF;
12936 len = TG3_NVM_VPD_LEN;
12937 }
12938
12939 buf = kmalloc(len, GFP_KERNEL);
12940 if (!buf)
12941 return NULL;
12942
12943 for (i = 0; i < len; i += 4) {
12944 /* The data is in little-endian format in NVRAM.
12945 * Use the big-endian read routines to preserve
12946 * the byte order as it exists in NVRAM.
12947 */
12948 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12949 goto error;
12950 }
12951 *vpdlen = len;
12952 } else {
12953 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12954 if (IS_ERR(buf))
12955 return NULL;
12956 }
12957
12958 return buf;
12959
12960 error:
12961 kfree(buf);
12962 return NULL;
12963 }
12964
12965 #define NVRAM_TEST_SIZE 0x100
12966 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12967 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12968 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12969 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12970 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12971 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12972 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12973 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12974
tg3_test_nvram(struct tg3 * tp)12975 static int tg3_test_nvram(struct tg3 *tp)
12976 {
12977 u32 csum, magic;
12978 __be32 *buf;
12979 int i, j, k, err = 0, size;
12980 unsigned int len;
12981
12982 if (tg3_flag(tp, NO_NVRAM))
12983 return 0;
12984
12985 if (tg3_nvram_read(tp, 0, &magic) != 0)
12986 return -EIO;
12987
12988 if (magic == TG3_EEPROM_MAGIC)
12989 size = NVRAM_TEST_SIZE;
12990 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12991 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12992 TG3_EEPROM_SB_FORMAT_1) {
12993 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12994 case TG3_EEPROM_SB_REVISION_0:
12995 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12996 break;
12997 case TG3_EEPROM_SB_REVISION_2:
12998 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12999 break;
13000 case TG3_EEPROM_SB_REVISION_3:
13001 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
13002 break;
13003 case TG3_EEPROM_SB_REVISION_4:
13004 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
13005 break;
13006 case TG3_EEPROM_SB_REVISION_5:
13007 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
13008 break;
13009 case TG3_EEPROM_SB_REVISION_6:
13010 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13011 break;
13012 default:
13013 return -EIO;
13014 }
13015 } else
13016 return 0;
13017 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13018 size = NVRAM_SELFBOOT_HW_SIZE;
13019 else
13020 return -EIO;
13021
13022 buf = kmalloc(size, GFP_KERNEL);
13023 if (buf == NULL)
13024 return -ENOMEM;
13025
13026 err = -EIO;
13027 for (i = 0, j = 0; i < size; i += 4, j++) {
13028 err = tg3_nvram_read_be32(tp, i, &buf[j]);
13029 if (err)
13030 break;
13031 }
13032 if (i < size)
13033 goto out;
13034
13035 /* Selfboot format */
13036 magic = be32_to_cpu(buf[0]);
13037 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13038 TG3_EEPROM_MAGIC_FW) {
13039 u8 *buf8 = (u8 *) buf, csum8 = 0;
13040
13041 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13042 TG3_EEPROM_SB_REVISION_2) {
13043 /* For rev 2, the csum doesn't include the MBA. */
13044 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13045 csum8 += buf8[i];
13046 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13047 csum8 += buf8[i];
13048 } else {
13049 for (i = 0; i < size; i++)
13050 csum8 += buf8[i];
13051 }
13052
13053 if (csum8 == 0) {
13054 err = 0;
13055 goto out;
13056 }
13057
13058 err = -EIO;
13059 goto out;
13060 }
13061
13062 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13063 TG3_EEPROM_MAGIC_HW) {
13064 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13065 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13066 u8 *buf8 = (u8 *) buf;
13067
13068 /* Separate the parity bits and the data bytes. */
13069 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13070 if ((i == 0) || (i == 8)) {
13071 int l;
13072 u8 msk;
13073
13074 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13075 parity[k++] = buf8[i] & msk;
13076 i++;
13077 } else if (i == 16) {
13078 int l;
13079 u8 msk;
13080
13081 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13082 parity[k++] = buf8[i] & msk;
13083 i++;
13084
13085 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13086 parity[k++] = buf8[i] & msk;
13087 i++;
13088 }
13089 data[j++] = buf8[i];
13090 }
13091
13092 err = -EIO;
13093 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13094 u8 hw8 = hweight8(data[i]);
13095
13096 if ((hw8 & 0x1) && parity[i])
13097 goto out;
13098 else if (!(hw8 & 0x1) && !parity[i])
13099 goto out;
13100 }
13101 err = 0;
13102 goto out;
13103 }
13104
13105 err = -EIO;
13106
13107 /* Bootstrap checksum at offset 0x10 */
13108 csum = calc_crc((unsigned char *) buf, 0x10);
13109
13110 /* The type of buf is __be32 *, but this value is __le32 */
13111 if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
13112 goto out;
13113
13114 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13115 csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
13116
13117 /* The type of buf is __be32 *, but this value is __le32 */
13118 if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
13119 goto out;
13120
13121 kfree(buf);
13122
13123 buf = tg3_vpd_readblock(tp, &len);
13124 if (!buf)
13125 return -ENOMEM;
13126
13127 err = pci_vpd_check_csum(buf, len);
13128 /* go on if no checksum found */
13129 if (err == 1)
13130 err = 0;
13131 out:
13132 kfree(buf);
13133 return err;
13134 }
13135
13136 #define TG3_SERDES_TIMEOUT_SEC 2
13137 #define TG3_COPPER_TIMEOUT_SEC 6
13138
tg3_test_link(struct tg3 * tp)13139 static int tg3_test_link(struct tg3 *tp)
13140 {
13141 int i, max;
13142
13143 if (!netif_running(tp->dev))
13144 return -ENODEV;
13145
13146 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13147 max = TG3_SERDES_TIMEOUT_SEC;
13148 else
13149 max = TG3_COPPER_TIMEOUT_SEC;
13150
13151 for (i = 0; i < max; i++) {
13152 if (tp->link_up)
13153 return 0;
13154
13155 if (msleep_interruptible(1000))
13156 break;
13157 }
13158
13159 return -EIO;
13160 }
13161
13162 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13163 static int tg3_test_registers(struct tg3 *tp)
13164 {
13165 int i, is_5705, is_5750;
13166 u32 offset, read_mask, write_mask, val, save_val, read_val;
13167 static struct {
13168 u16 offset;
13169 u16 flags;
13170 #define TG3_FL_5705 0x1
13171 #define TG3_FL_NOT_5705 0x2
13172 #define TG3_FL_NOT_5788 0x4
13173 #define TG3_FL_NOT_5750 0x8
13174 u32 read_mask;
13175 u32 write_mask;
13176 } reg_tbl[] = {
13177 /* MAC Control Registers */
13178 { MAC_MODE, TG3_FL_NOT_5705,
13179 0x00000000, 0x00ef6f8c },
13180 { MAC_MODE, TG3_FL_5705,
13181 0x00000000, 0x01ef6b8c },
13182 { MAC_STATUS, TG3_FL_NOT_5705,
13183 0x03800107, 0x00000000 },
13184 { MAC_STATUS, TG3_FL_5705,
13185 0x03800100, 0x00000000 },
13186 { MAC_ADDR_0_HIGH, 0x0000,
13187 0x00000000, 0x0000ffff },
13188 { MAC_ADDR_0_LOW, 0x0000,
13189 0x00000000, 0xffffffff },
13190 { MAC_RX_MTU_SIZE, 0x0000,
13191 0x00000000, 0x0000ffff },
13192 { MAC_TX_MODE, 0x0000,
13193 0x00000000, 0x00000070 },
13194 { MAC_TX_LENGTHS, 0x0000,
13195 0x00000000, 0x00003fff },
13196 { MAC_RX_MODE, TG3_FL_NOT_5705,
13197 0x00000000, 0x000007fc },
13198 { MAC_RX_MODE, TG3_FL_5705,
13199 0x00000000, 0x000007dc },
13200 { MAC_HASH_REG_0, 0x0000,
13201 0x00000000, 0xffffffff },
13202 { MAC_HASH_REG_1, 0x0000,
13203 0x00000000, 0xffffffff },
13204 { MAC_HASH_REG_2, 0x0000,
13205 0x00000000, 0xffffffff },
13206 { MAC_HASH_REG_3, 0x0000,
13207 0x00000000, 0xffffffff },
13208
13209 /* Receive Data and Receive BD Initiator Control Registers. */
13210 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13211 0x00000000, 0xffffffff },
13212 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13213 0x00000000, 0xffffffff },
13214 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13215 0x00000000, 0x00000003 },
13216 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13217 0x00000000, 0xffffffff },
13218 { RCVDBDI_STD_BD+0, 0x0000,
13219 0x00000000, 0xffffffff },
13220 { RCVDBDI_STD_BD+4, 0x0000,
13221 0x00000000, 0xffffffff },
13222 { RCVDBDI_STD_BD+8, 0x0000,
13223 0x00000000, 0xffff0002 },
13224 { RCVDBDI_STD_BD+0xc, 0x0000,
13225 0x00000000, 0xffffffff },
13226
13227 /* Receive BD Initiator Control Registers. */
13228 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13229 0x00000000, 0xffffffff },
13230 { RCVBDI_STD_THRESH, TG3_FL_5705,
13231 0x00000000, 0x000003ff },
13232 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13233 0x00000000, 0xffffffff },
13234
13235 /* Host Coalescing Control Registers. */
13236 { HOSTCC_MODE, TG3_FL_NOT_5705,
13237 0x00000000, 0x00000004 },
13238 { HOSTCC_MODE, TG3_FL_5705,
13239 0x00000000, 0x000000f6 },
13240 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13241 0x00000000, 0xffffffff },
13242 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13243 0x00000000, 0x000003ff },
13244 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13245 0x00000000, 0xffffffff },
13246 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13247 0x00000000, 0x000003ff },
13248 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13249 0x00000000, 0xffffffff },
13250 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13251 0x00000000, 0x000000ff },
13252 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13253 0x00000000, 0xffffffff },
13254 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13255 0x00000000, 0x000000ff },
13256 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13257 0x00000000, 0xffffffff },
13258 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13259 0x00000000, 0xffffffff },
13260 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13261 0x00000000, 0xffffffff },
13262 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13263 0x00000000, 0x000000ff },
13264 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13265 0x00000000, 0xffffffff },
13266 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13267 0x00000000, 0x000000ff },
13268 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13269 0x00000000, 0xffffffff },
13270 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13271 0x00000000, 0xffffffff },
13272 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13273 0x00000000, 0xffffffff },
13274 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13275 0x00000000, 0xffffffff },
13276 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13277 0x00000000, 0xffffffff },
13278 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13279 0xffffffff, 0x00000000 },
13280 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13281 0xffffffff, 0x00000000 },
13282
13283 /* Buffer Manager Control Registers. */
13284 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13285 0x00000000, 0x007fff80 },
13286 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13287 0x00000000, 0x007fffff },
13288 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13289 0x00000000, 0x0000003f },
13290 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13291 0x00000000, 0x000001ff },
13292 { BUFMGR_MB_HIGH_WATER, 0x0000,
13293 0x00000000, 0x000001ff },
13294 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13295 0xffffffff, 0x00000000 },
13296 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13297 0xffffffff, 0x00000000 },
13298
13299 /* Mailbox Registers */
13300 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13301 0x00000000, 0x000001ff },
13302 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13303 0x00000000, 0x000001ff },
13304 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13305 0x00000000, 0x000007ff },
13306 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13307 0x00000000, 0x000001ff },
13308
13309 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13310 };
13311
13312 is_5705 = is_5750 = 0;
13313 if (tg3_flag(tp, 5705_PLUS)) {
13314 is_5705 = 1;
13315 if (tg3_flag(tp, 5750_PLUS))
13316 is_5750 = 1;
13317 }
13318
13319 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13320 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13321 continue;
13322
13323 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13324 continue;
13325
13326 if (tg3_flag(tp, IS_5788) &&
13327 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13328 continue;
13329
13330 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13331 continue;
13332
13333 offset = (u32) reg_tbl[i].offset;
13334 read_mask = reg_tbl[i].read_mask;
13335 write_mask = reg_tbl[i].write_mask;
13336
13337 /* Save the original register content */
13338 save_val = tr32(offset);
13339
13340 /* Determine the read-only value. */
13341 read_val = save_val & read_mask;
13342
13343 /* Write zero to the register, then make sure the read-only bits
13344 * are not changed and the read/write bits are all zeros.
13345 */
13346 tw32(offset, 0);
13347
13348 val = tr32(offset);
13349
13350 /* Test the read-only and read/write bits. */
13351 if (((val & read_mask) != read_val) || (val & write_mask))
13352 goto out;
13353
13354 /* Write ones to all the bits defined by RdMask and WrMask, then
13355 * make sure the read-only bits are not changed and the
13356 * read/write bits are all ones.
13357 */
13358 tw32(offset, read_mask | write_mask);
13359
13360 val = tr32(offset);
13361
13362 /* Test the read-only bits. */
13363 if ((val & read_mask) != read_val)
13364 goto out;
13365
13366 /* Test the read/write bits. */
13367 if ((val & write_mask) != write_mask)
13368 goto out;
13369
13370 tw32(offset, save_val);
13371 }
13372
13373 return 0;
13374
13375 out:
13376 if (netif_msg_hw(tp))
13377 netdev_err(tp->dev,
13378 "Register test failed at offset %x\n", offset);
13379 tw32(offset, save_val);
13380 return -EIO;
13381 }
13382
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13383 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13384 {
13385 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13386 int i;
13387 u32 j;
13388
13389 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13390 for (j = 0; j < len; j += 4) {
13391 u32 val;
13392
13393 tg3_write_mem(tp, offset + j, test_pattern[i]);
13394 tg3_read_mem(tp, offset + j, &val);
13395 if (val != test_pattern[i])
13396 return -EIO;
13397 }
13398 }
13399 return 0;
13400 }
13401
tg3_test_memory(struct tg3 * tp)13402 static int tg3_test_memory(struct tg3 *tp)
13403 {
13404 static struct mem_entry {
13405 u32 offset;
13406 u32 len;
13407 } mem_tbl_570x[] = {
13408 { 0x00000000, 0x00b50},
13409 { 0x00002000, 0x1c000},
13410 { 0xffffffff, 0x00000}
13411 }, mem_tbl_5705[] = {
13412 { 0x00000100, 0x0000c},
13413 { 0x00000200, 0x00008},
13414 { 0x00004000, 0x00800},
13415 { 0x00006000, 0x01000},
13416 { 0x00008000, 0x02000},
13417 { 0x00010000, 0x0e000},
13418 { 0xffffffff, 0x00000}
13419 }, mem_tbl_5755[] = {
13420 { 0x00000200, 0x00008},
13421 { 0x00004000, 0x00800},
13422 { 0x00006000, 0x00800},
13423 { 0x00008000, 0x02000},
13424 { 0x00010000, 0x0c000},
13425 { 0xffffffff, 0x00000}
13426 }, mem_tbl_5906[] = {
13427 { 0x00000200, 0x00008},
13428 { 0x00004000, 0x00400},
13429 { 0x00006000, 0x00400},
13430 { 0x00008000, 0x01000},
13431 { 0x00010000, 0x01000},
13432 { 0xffffffff, 0x00000}
13433 }, mem_tbl_5717[] = {
13434 { 0x00000200, 0x00008},
13435 { 0x00010000, 0x0a000},
13436 { 0x00020000, 0x13c00},
13437 { 0xffffffff, 0x00000}
13438 }, mem_tbl_57765[] = {
13439 { 0x00000200, 0x00008},
13440 { 0x00004000, 0x00800},
13441 { 0x00006000, 0x09800},
13442 { 0x00010000, 0x0a000},
13443 { 0xffffffff, 0x00000}
13444 };
13445 struct mem_entry *mem_tbl;
13446 int err = 0;
13447 int i;
13448
13449 if (tg3_flag(tp, 5717_PLUS))
13450 mem_tbl = mem_tbl_5717;
13451 else if (tg3_flag(tp, 57765_CLASS) ||
13452 tg3_asic_rev(tp) == ASIC_REV_5762)
13453 mem_tbl = mem_tbl_57765;
13454 else if (tg3_flag(tp, 5755_PLUS))
13455 mem_tbl = mem_tbl_5755;
13456 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13457 mem_tbl = mem_tbl_5906;
13458 else if (tg3_flag(tp, 5705_PLUS))
13459 mem_tbl = mem_tbl_5705;
13460 else
13461 mem_tbl = mem_tbl_570x;
13462
13463 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13464 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13465 if (err)
13466 break;
13467 }
13468
13469 return err;
13470 }
13471
13472 #define TG3_TSO_MSS 500
13473
13474 #define TG3_TSO_IP_HDR_LEN 20
13475 #define TG3_TSO_TCP_HDR_LEN 20
13476 #define TG3_TSO_TCP_OPT_LEN 12
13477
13478 static const u8 tg3_tso_header[] = {
13479 0x08, 0x00,
13480 0x45, 0x00, 0x00, 0x00,
13481 0x00, 0x00, 0x40, 0x00,
13482 0x40, 0x06, 0x00, 0x00,
13483 0x0a, 0x00, 0x00, 0x01,
13484 0x0a, 0x00, 0x00, 0x02,
13485 0x0d, 0x00, 0xe0, 0x00,
13486 0x00, 0x00, 0x01, 0x00,
13487 0x00, 0x00, 0x02, 0x00,
13488 0x80, 0x10, 0x10, 0x00,
13489 0x14, 0x09, 0x00, 0x00,
13490 0x01, 0x01, 0x08, 0x0a,
13491 0x11, 0x11, 0x11, 0x11,
13492 0x11, 0x11, 0x11, 0x11,
13493 };
13494
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13495 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13496 {
13497 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13498 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13499 u32 budget;
13500 struct sk_buff *skb;
13501 u8 *tx_data, *rx_data;
13502 dma_addr_t map;
13503 int num_pkts, tx_len, rx_len, i, err;
13504 struct tg3_rx_buffer_desc *desc;
13505 struct tg3_napi *tnapi, *rnapi;
13506 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13507
13508 tnapi = &tp->napi[0];
13509 rnapi = &tp->napi[0];
13510 if (tp->irq_cnt > 1) {
13511 if (tg3_flag(tp, ENABLE_RSS))
13512 rnapi = &tp->napi[1];
13513 if (tg3_flag(tp, ENABLE_TSS))
13514 tnapi = &tp->napi[1];
13515 }
13516 coal_now = tnapi->coal_now | rnapi->coal_now;
13517
13518 err = -EIO;
13519
13520 tx_len = pktsz;
13521 skb = netdev_alloc_skb(tp->dev, tx_len);
13522 if (!skb)
13523 return -ENOMEM;
13524
13525 tx_data = skb_put(skb, tx_len);
13526 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13527 memset(tx_data + ETH_ALEN, 0x0, 8);
13528
13529 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13530
13531 if (tso_loopback) {
13532 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13533
13534 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13535 TG3_TSO_TCP_OPT_LEN;
13536
13537 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13538 sizeof(tg3_tso_header));
13539 mss = TG3_TSO_MSS;
13540
13541 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13542 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13543
13544 /* Set the total length field in the IP header */
13545 iph->tot_len = htons((u16)(mss + hdr_len));
13546
13547 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13548 TXD_FLAG_CPU_POST_DMA);
13549
13550 if (tg3_flag(tp, HW_TSO_1) ||
13551 tg3_flag(tp, HW_TSO_2) ||
13552 tg3_flag(tp, HW_TSO_3)) {
13553 struct tcphdr *th;
13554 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13555 th = (struct tcphdr *)&tx_data[val];
13556 th->check = 0;
13557 } else
13558 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13559
13560 if (tg3_flag(tp, HW_TSO_3)) {
13561 mss |= (hdr_len & 0xc) << 12;
13562 if (hdr_len & 0x10)
13563 base_flags |= 0x00000010;
13564 base_flags |= (hdr_len & 0x3e0) << 5;
13565 } else if (tg3_flag(tp, HW_TSO_2))
13566 mss |= hdr_len << 9;
13567 else if (tg3_flag(tp, HW_TSO_1) ||
13568 tg3_asic_rev(tp) == ASIC_REV_5705) {
13569 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13570 } else {
13571 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13572 }
13573
13574 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13575 } else {
13576 num_pkts = 1;
13577 data_off = ETH_HLEN;
13578
13579 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13580 tx_len > VLAN_ETH_FRAME_LEN)
13581 base_flags |= TXD_FLAG_JMB_PKT;
13582 }
13583
13584 for (i = data_off; i < tx_len; i++)
13585 tx_data[i] = (u8) (i & 0xff);
13586
13587 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13588 if (dma_mapping_error(&tp->pdev->dev, map)) {
13589 dev_kfree_skb(skb);
13590 return -EIO;
13591 }
13592
13593 val = tnapi->tx_prod;
13594 tnapi->tx_buffers[val].skb = skb;
13595 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13596
13597 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13598 rnapi->coal_now);
13599
13600 udelay(10);
13601
13602 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13603
13604 budget = tg3_tx_avail(tnapi);
13605 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13606 base_flags | TXD_FLAG_END, mss, 0)) {
13607 tnapi->tx_buffers[val].skb = NULL;
13608 dev_kfree_skb(skb);
13609 return -EIO;
13610 }
13611
13612 tnapi->tx_prod++;
13613
13614 /* Sync BD data before updating mailbox */
13615 wmb();
13616
13617 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13618 tr32_mailbox(tnapi->prodmbox);
13619
13620 udelay(10);
13621
13622 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13623 for (i = 0; i < 35; i++) {
13624 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13625 coal_now);
13626
13627 udelay(10);
13628
13629 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13630 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13631 if ((tx_idx == tnapi->tx_prod) &&
13632 (rx_idx == (rx_start_idx + num_pkts)))
13633 break;
13634 }
13635
13636 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13637 dev_kfree_skb(skb);
13638
13639 if (tx_idx != tnapi->tx_prod)
13640 goto out;
13641
13642 if (rx_idx != rx_start_idx + num_pkts)
13643 goto out;
13644
13645 val = data_off;
13646 while (rx_idx != rx_start_idx) {
13647 desc = &rnapi->rx_rcb[rx_start_idx++];
13648 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13649 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13650
13651 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13652 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13653 goto out;
13654
13655 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13656 - ETH_FCS_LEN;
13657
13658 if (!tso_loopback) {
13659 if (rx_len != tx_len)
13660 goto out;
13661
13662 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13663 if (opaque_key != RXD_OPAQUE_RING_STD)
13664 goto out;
13665 } else {
13666 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13667 goto out;
13668 }
13669 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13670 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13671 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13672 goto out;
13673 }
13674
13675 if (opaque_key == RXD_OPAQUE_RING_STD) {
13676 rx_data = tpr->rx_std_buffers[desc_idx].data;
13677 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13678 mapping);
13679 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13680 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13681 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13682 mapping);
13683 } else
13684 goto out;
13685
13686 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13687 DMA_FROM_DEVICE);
13688
13689 rx_data += TG3_RX_OFFSET(tp);
13690 for (i = data_off; i < rx_len; i++, val++) {
13691 if (*(rx_data + i) != (u8) (val & 0xff))
13692 goto out;
13693 }
13694 }
13695
13696 err = 0;
13697
13698 /* tg3_free_rings will unmap and free the rx_data */
13699 out:
13700 return err;
13701 }
13702
13703 #define TG3_STD_LOOPBACK_FAILED 1
13704 #define TG3_JMB_LOOPBACK_FAILED 2
13705 #define TG3_TSO_LOOPBACK_FAILED 4
13706 #define TG3_LOOPBACK_FAILED \
13707 (TG3_STD_LOOPBACK_FAILED | \
13708 TG3_JMB_LOOPBACK_FAILED | \
13709 TG3_TSO_LOOPBACK_FAILED)
13710
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13711 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13712 {
13713 int err = -EIO;
13714 u32 eee_cap;
13715 u32 jmb_pkt_sz = 9000;
13716
13717 if (tp->dma_limit)
13718 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13719
13720 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13721 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13722
13723 if (!netif_running(tp->dev)) {
13724 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13725 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13726 if (do_extlpbk)
13727 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13728 goto done;
13729 }
13730
13731 err = tg3_reset_hw(tp, true);
13732 if (err) {
13733 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13734 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13735 if (do_extlpbk)
13736 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13737 goto done;
13738 }
13739
13740 if (tg3_flag(tp, ENABLE_RSS)) {
13741 int i;
13742
13743 /* Reroute all rx packets to the 1st queue */
13744 for (i = MAC_RSS_INDIR_TBL_0;
13745 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13746 tw32(i, 0x0);
13747 }
13748
13749 /* HW errata - mac loopback fails in some cases on 5780.
13750 * Normal traffic and PHY loopback are not affected by
13751 * errata. Also, the MAC loopback test is deprecated for
13752 * all newer ASIC revisions.
13753 */
13754 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13755 !tg3_flag(tp, CPMU_PRESENT)) {
13756 tg3_mac_loopback(tp, true);
13757
13758 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13759 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13760
13761 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13762 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13763 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13764
13765 tg3_mac_loopback(tp, false);
13766 }
13767
13768 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13769 !tg3_flag(tp, USE_PHYLIB)) {
13770 int i;
13771
13772 tg3_phy_lpbk_set(tp, 0, false);
13773
13774 /* Wait for link */
13775 for (i = 0; i < 100; i++) {
13776 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13777 break;
13778 mdelay(1);
13779 }
13780
13781 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13782 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13783 if (tg3_flag(tp, TSO_CAPABLE) &&
13784 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13785 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13786 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13787 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13788 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13789
13790 if (do_extlpbk) {
13791 tg3_phy_lpbk_set(tp, 0, true);
13792
13793 /* All link indications report up, but the hardware
13794 * isn't really ready for about 20 msec. Double it
13795 * to be sure.
13796 */
13797 mdelay(40);
13798
13799 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13800 data[TG3_EXT_LOOPB_TEST] |=
13801 TG3_STD_LOOPBACK_FAILED;
13802 if (tg3_flag(tp, TSO_CAPABLE) &&
13803 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13804 data[TG3_EXT_LOOPB_TEST] |=
13805 TG3_TSO_LOOPBACK_FAILED;
13806 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13807 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13808 data[TG3_EXT_LOOPB_TEST] |=
13809 TG3_JMB_LOOPBACK_FAILED;
13810 }
13811
13812 /* Re-enable gphy autopowerdown. */
13813 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13814 tg3_phy_toggle_apd(tp, true);
13815 }
13816
13817 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13818 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13819
13820 done:
13821 tp->phy_flags |= eee_cap;
13822
13823 return err;
13824 }
13825
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13826 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13827 u64 *data)
13828 {
13829 struct tg3 *tp = netdev_priv(dev);
13830 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13831
13832 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13833 if (tg3_power_up(tp)) {
13834 etest->flags |= ETH_TEST_FL_FAILED;
13835 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13836 return;
13837 }
13838 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13839 }
13840
13841 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13842
13843 if (tg3_test_nvram(tp) != 0) {
13844 etest->flags |= ETH_TEST_FL_FAILED;
13845 data[TG3_NVRAM_TEST] = 1;
13846 }
13847 if (!doextlpbk && tg3_test_link(tp)) {
13848 etest->flags |= ETH_TEST_FL_FAILED;
13849 data[TG3_LINK_TEST] = 1;
13850 }
13851 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13852 int err, err2 = 0, irq_sync = 0;
13853
13854 if (netif_running(dev)) {
13855 tg3_phy_stop(tp);
13856 tg3_netif_stop(tp);
13857 irq_sync = 1;
13858 }
13859
13860 tg3_full_lock(tp, irq_sync);
13861 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13862 err = tg3_nvram_lock(tp);
13863 tg3_halt_cpu(tp, RX_CPU_BASE);
13864 if (!tg3_flag(tp, 5705_PLUS))
13865 tg3_halt_cpu(tp, TX_CPU_BASE);
13866 if (!err)
13867 tg3_nvram_unlock(tp);
13868
13869 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13870 tg3_phy_reset(tp);
13871
13872 if (tg3_test_registers(tp) != 0) {
13873 etest->flags |= ETH_TEST_FL_FAILED;
13874 data[TG3_REGISTER_TEST] = 1;
13875 }
13876
13877 if (tg3_test_memory(tp) != 0) {
13878 etest->flags |= ETH_TEST_FL_FAILED;
13879 data[TG3_MEMORY_TEST] = 1;
13880 }
13881
13882 if (doextlpbk)
13883 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13884
13885 if (tg3_test_loopback(tp, data, doextlpbk))
13886 etest->flags |= ETH_TEST_FL_FAILED;
13887
13888 tg3_full_unlock(tp);
13889
13890 if (tg3_test_interrupt(tp) != 0) {
13891 etest->flags |= ETH_TEST_FL_FAILED;
13892 data[TG3_INTERRUPT_TEST] = 1;
13893 }
13894
13895 netdev_lock(dev);
13896 tg3_full_lock(tp, 0);
13897
13898 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13899 if (netif_running(dev)) {
13900 tg3_flag_set(tp, INIT_COMPLETE);
13901 err2 = tg3_restart_hw(tp, true);
13902 if (!err2)
13903 tg3_netif_start(tp);
13904 }
13905
13906 tg3_full_unlock(tp);
13907 netdev_unlock(dev);
13908
13909 if (irq_sync && !err2)
13910 tg3_phy_start(tp);
13911 }
13912 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13913 tg3_power_down_prepare(tp);
13914
13915 }
13916
tg3_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * stmpconf,struct netlink_ext_ack * extack)13917 static int tg3_hwtstamp_set(struct net_device *dev,
13918 struct kernel_hwtstamp_config *stmpconf,
13919 struct netlink_ext_ack *extack)
13920 {
13921 struct tg3 *tp = netdev_priv(dev);
13922
13923 if (!tg3_flag(tp, PTP_CAPABLE))
13924 return -EOPNOTSUPP;
13925
13926 if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
13927 stmpconf->tx_type != HWTSTAMP_TX_OFF)
13928 return -ERANGE;
13929
13930 switch (stmpconf->rx_filter) {
13931 case HWTSTAMP_FILTER_NONE:
13932 tp->rxptpctl = 0;
13933 break;
13934 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13935 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13936 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13937 break;
13938 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13939 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13940 TG3_RX_PTP_CTL_SYNC_EVNT;
13941 break;
13942 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13943 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13944 TG3_RX_PTP_CTL_DELAY_REQ;
13945 break;
13946 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13947 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13948 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13949 break;
13950 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13951 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13952 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13953 break;
13954 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13955 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13956 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13957 break;
13958 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13959 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13960 TG3_RX_PTP_CTL_SYNC_EVNT;
13961 break;
13962 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13963 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13964 TG3_RX_PTP_CTL_SYNC_EVNT;
13965 break;
13966 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13967 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13968 TG3_RX_PTP_CTL_SYNC_EVNT;
13969 break;
13970 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13971 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13972 TG3_RX_PTP_CTL_DELAY_REQ;
13973 break;
13974 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13975 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13976 TG3_RX_PTP_CTL_DELAY_REQ;
13977 break;
13978 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13979 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13980 TG3_RX_PTP_CTL_DELAY_REQ;
13981 break;
13982 default:
13983 return -ERANGE;
13984 }
13985
13986 if (netif_running(dev) && tp->rxptpctl)
13987 tw32(TG3_RX_PTP_CTL,
13988 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13989
13990 if (stmpconf->tx_type == HWTSTAMP_TX_ON)
13991 tg3_flag_set(tp, TX_TSTAMP_EN);
13992 else
13993 tg3_flag_clear(tp, TX_TSTAMP_EN);
13994
13995 return 0;
13996 }
13997
tg3_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * stmpconf)13998 static int tg3_hwtstamp_get(struct net_device *dev,
13999 struct kernel_hwtstamp_config *stmpconf)
14000 {
14001 struct tg3 *tp = netdev_priv(dev);
14002
14003 if (!tg3_flag(tp, PTP_CAPABLE))
14004 return -EOPNOTSUPP;
14005
14006 stmpconf->flags = 0;
14007 stmpconf->tx_type = tg3_flag(tp, TX_TSTAMP_EN) ?
14008 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
14009
14010 switch (tp->rxptpctl) {
14011 case 0:
14012 stmpconf->rx_filter = HWTSTAMP_FILTER_NONE;
14013 break;
14014 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14015 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14016 break;
14017 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14018 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14019 break;
14020 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14021 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14022 break;
14023 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14024 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14025 break;
14026 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14027 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14028 break;
14029 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14030 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14031 break;
14032 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14033 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14034 break;
14035 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14036 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14037 break;
14038 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14039 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14040 break;
14041 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14042 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14043 break;
14044 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14045 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14046 break;
14047 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14048 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14049 break;
14050 default:
14051 WARN_ON_ONCE(1);
14052 return -ERANGE;
14053 }
14054
14055 return 0;
14056 }
14057
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)14058 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14059 {
14060 struct mii_ioctl_data *data = if_mii(ifr);
14061 struct tg3 *tp = netdev_priv(dev);
14062 int err;
14063
14064 if (tg3_flag(tp, USE_PHYLIB)) {
14065 struct phy_device *phydev;
14066 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14067 return -EAGAIN;
14068 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14069 return phy_mii_ioctl(phydev, ifr, cmd);
14070 }
14071
14072 switch (cmd) {
14073 case SIOCGMIIPHY:
14074 data->phy_id = tp->phy_addr;
14075
14076 fallthrough;
14077 case SIOCGMIIREG: {
14078 u32 mii_regval;
14079
14080 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14081 break; /* We have no PHY */
14082
14083 if (!netif_running(dev))
14084 return -EAGAIN;
14085
14086 spin_lock_bh(&tp->lock);
14087 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14088 data->reg_num & 0x1f, &mii_regval);
14089 spin_unlock_bh(&tp->lock);
14090
14091 data->val_out = mii_regval;
14092
14093 return err;
14094 }
14095
14096 case SIOCSMIIREG:
14097 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14098 break; /* We have no PHY */
14099
14100 if (!netif_running(dev))
14101 return -EAGAIN;
14102
14103 spin_lock_bh(&tp->lock);
14104 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14105 data->reg_num & 0x1f, data->val_in);
14106 spin_unlock_bh(&tp->lock);
14107
14108 return err;
14109
14110 default:
14111 /* do nothing */
14112 break;
14113 }
14114 return -EOPNOTSUPP;
14115 }
14116
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14117 static int tg3_get_coalesce(struct net_device *dev,
14118 struct ethtool_coalesce *ec,
14119 struct kernel_ethtool_coalesce *kernel_coal,
14120 struct netlink_ext_ack *extack)
14121 {
14122 struct tg3 *tp = netdev_priv(dev);
14123
14124 memcpy(ec, &tp->coal, sizeof(*ec));
14125 return 0;
14126 }
14127
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14128 static int tg3_set_coalesce(struct net_device *dev,
14129 struct ethtool_coalesce *ec,
14130 struct kernel_ethtool_coalesce *kernel_coal,
14131 struct netlink_ext_ack *extack)
14132 {
14133 struct tg3 *tp = netdev_priv(dev);
14134 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14135 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14136
14137 if (!tg3_flag(tp, 5705_PLUS)) {
14138 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14139 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14140 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14141 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14142 }
14143
14144 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14145 (!ec->rx_coalesce_usecs) ||
14146 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14147 (!ec->tx_coalesce_usecs) ||
14148 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14149 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14150 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14151 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14152 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14153 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14154 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14155 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14156 return -EINVAL;
14157
14158 /* Only copy relevant parameters, ignore all others. */
14159 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14160 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14161 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14162 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14163 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14164 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14165 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14166 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14167 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14168
14169 if (netif_running(dev)) {
14170 tg3_full_lock(tp, 0);
14171 __tg3_set_coalesce(tp, &tp->coal);
14172 tg3_full_unlock(tp);
14173 }
14174 return 0;
14175 }
14176
tg3_set_eee(struct net_device * dev,struct ethtool_keee * edata)14177 static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
14178 {
14179 struct tg3 *tp = netdev_priv(dev);
14180
14181 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14182 netdev_warn(tp->dev, "Board does not support EEE!\n");
14183 return -EOPNOTSUPP;
14184 }
14185
14186 if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
14187 netdev_warn(tp->dev,
14188 "Direct manipulation of EEE advertisement is not supported\n");
14189 return -EINVAL;
14190 }
14191
14192 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14193 netdev_warn(tp->dev,
14194 "Maximal Tx Lpi timer supported is %#x(u)\n",
14195 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14196 return -EINVAL;
14197 }
14198
14199 tp->eee.eee_enabled = edata->eee_enabled;
14200 tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
14201 tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
14202
14203 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14204 tg3_warn_mgmt_link_flap(tp);
14205
14206 if (netif_running(tp->dev)) {
14207 tg3_full_lock(tp, 0);
14208 tg3_setup_eee(tp);
14209 tg3_phy_reset(tp);
14210 tg3_full_unlock(tp);
14211 }
14212
14213 return 0;
14214 }
14215
tg3_get_eee(struct net_device * dev,struct ethtool_keee * edata)14216 static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
14217 {
14218 struct tg3 *tp = netdev_priv(dev);
14219
14220 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14221 netdev_warn(tp->dev,
14222 "Board does not support EEE!\n");
14223 return -EOPNOTSUPP;
14224 }
14225
14226 *edata = tp->eee;
14227 return 0;
14228 }
14229
14230 static const struct ethtool_ops tg3_ethtool_ops = {
14231 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14232 ETHTOOL_COALESCE_MAX_FRAMES |
14233 ETHTOOL_COALESCE_USECS_IRQ |
14234 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14235 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14236 .get_drvinfo = tg3_get_drvinfo,
14237 .get_regs_len = tg3_get_regs_len,
14238 .get_regs = tg3_get_regs,
14239 .get_wol = tg3_get_wol,
14240 .set_wol = tg3_set_wol,
14241 .get_msglevel = tg3_get_msglevel,
14242 .set_msglevel = tg3_set_msglevel,
14243 .nway_reset = tg3_nway_reset,
14244 .get_link = ethtool_op_get_link,
14245 .get_eeprom_len = tg3_get_eeprom_len,
14246 .get_eeprom = tg3_get_eeprom,
14247 .set_eeprom = tg3_set_eeprom,
14248 .get_ringparam = tg3_get_ringparam,
14249 .set_ringparam = tg3_set_ringparam,
14250 .get_pauseparam = tg3_get_pauseparam,
14251 .set_pauseparam = tg3_set_pauseparam,
14252 .self_test = tg3_self_test,
14253 .get_strings = tg3_get_strings,
14254 .set_phys_id = tg3_set_phys_id,
14255 .get_ethtool_stats = tg3_get_ethtool_stats,
14256 .get_coalesce = tg3_get_coalesce,
14257 .set_coalesce = tg3_set_coalesce,
14258 .get_sset_count = tg3_get_sset_count,
14259 .get_rx_ring_count = tg3_get_rx_ring_count,
14260 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14261 .get_rxfh = tg3_get_rxfh,
14262 .set_rxfh = tg3_set_rxfh,
14263 .get_channels = tg3_get_channels,
14264 .set_channels = tg3_set_channels,
14265 .get_ts_info = tg3_get_ts_info,
14266 .get_eee = tg3_get_eee,
14267 .set_eee = tg3_set_eee,
14268 .get_link_ksettings = tg3_get_link_ksettings,
14269 .set_link_ksettings = tg3_set_link_ksettings,
14270 };
14271
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14272 static void tg3_get_stats64(struct net_device *dev,
14273 struct rtnl_link_stats64 *stats)
14274 {
14275 struct tg3 *tp = netdev_priv(dev);
14276
14277 spin_lock_bh(&tp->lock);
14278 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14279 *stats = tp->net_stats_prev;
14280 spin_unlock_bh(&tp->lock);
14281 return;
14282 }
14283
14284 tg3_get_nstats(tp, stats);
14285 spin_unlock_bh(&tp->lock);
14286 }
14287
tg3_set_rx_mode(struct net_device * dev)14288 static void tg3_set_rx_mode(struct net_device *dev)
14289 {
14290 struct tg3 *tp = netdev_priv(dev);
14291
14292 if (!netif_running(dev))
14293 return;
14294
14295 tg3_full_lock(tp, 0);
14296 __tg3_set_rx_mode(dev);
14297 tg3_full_unlock(tp);
14298 }
14299
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14300 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14301 int new_mtu)
14302 {
14303 WRITE_ONCE(dev->mtu, new_mtu);
14304
14305 if (new_mtu > ETH_DATA_LEN) {
14306 if (tg3_flag(tp, 5780_CLASS)) {
14307 netdev_update_features(dev);
14308 tg3_flag_clear(tp, TSO_CAPABLE);
14309 } else {
14310 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14311 }
14312 } else {
14313 if (tg3_flag(tp, 5780_CLASS)) {
14314 tg3_flag_set(tp, TSO_CAPABLE);
14315 netdev_update_features(dev);
14316 }
14317 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14318 }
14319 }
14320
tg3_change_mtu(struct net_device * dev,int new_mtu)14321 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14322 {
14323 struct tg3 *tp = netdev_priv(dev);
14324 int err;
14325 bool reset_phy = false;
14326
14327 if (!netif_running(dev)) {
14328 /* We'll just catch it later when the
14329 * device is up'd.
14330 */
14331 tg3_set_mtu(dev, tp, new_mtu);
14332 return 0;
14333 }
14334
14335 tg3_phy_stop(tp);
14336
14337 tg3_netif_stop(tp);
14338
14339 tg3_set_mtu(dev, tp, new_mtu);
14340
14341 netdev_lock(dev);
14342 tg3_full_lock(tp, 1);
14343
14344 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14345
14346 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14347 * breaks all requests to 256 bytes.
14348 */
14349 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14350 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14351 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14352 tg3_asic_rev(tp) == ASIC_REV_5720)
14353 reset_phy = true;
14354
14355 err = tg3_restart_hw(tp, reset_phy);
14356
14357 if (!err)
14358 tg3_netif_start(tp);
14359
14360 tg3_full_unlock(tp);
14361 netdev_unlock(dev);
14362
14363 if (!err)
14364 tg3_phy_start(tp);
14365
14366 return err;
14367 }
14368
14369 static const struct net_device_ops tg3_netdev_ops = {
14370 .ndo_open = tg3_open,
14371 .ndo_stop = tg3_close,
14372 .ndo_start_xmit = tg3_start_xmit,
14373 .ndo_get_stats64 = tg3_get_stats64,
14374 .ndo_validate_addr = eth_validate_addr,
14375 .ndo_set_rx_mode = tg3_set_rx_mode,
14376 .ndo_set_mac_address = tg3_set_mac_addr,
14377 .ndo_eth_ioctl = tg3_ioctl,
14378 .ndo_tx_timeout = tg3_tx_timeout,
14379 .ndo_change_mtu = tg3_change_mtu,
14380 .ndo_fix_features = tg3_fix_features,
14381 .ndo_set_features = tg3_set_features,
14382 #ifdef CONFIG_NET_POLL_CONTROLLER
14383 .ndo_poll_controller = tg3_poll_controller,
14384 #endif
14385 .ndo_hwtstamp_get = tg3_hwtstamp_get,
14386 .ndo_hwtstamp_set = tg3_hwtstamp_set,
14387 };
14388
tg3_get_eeprom_size(struct tg3 * tp)14389 static void tg3_get_eeprom_size(struct tg3 *tp)
14390 {
14391 u32 cursize, val, magic;
14392
14393 tp->nvram_size = EEPROM_CHIP_SIZE;
14394
14395 if (tg3_nvram_read(tp, 0, &magic) != 0)
14396 return;
14397
14398 if ((magic != TG3_EEPROM_MAGIC) &&
14399 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14400 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14401 return;
14402
14403 /*
14404 * Size the chip by reading offsets at increasing powers of two.
14405 * When we encounter our validation signature, we know the addressing
14406 * has wrapped around, and thus have our chip size.
14407 */
14408 cursize = 0x10;
14409
14410 while (cursize < tp->nvram_size) {
14411 if (tg3_nvram_read(tp, cursize, &val) != 0)
14412 return;
14413
14414 if (val == magic)
14415 break;
14416
14417 cursize <<= 1;
14418 }
14419
14420 tp->nvram_size = cursize;
14421 }
14422
tg3_get_nvram_size(struct tg3 * tp)14423 static void tg3_get_nvram_size(struct tg3 *tp)
14424 {
14425 u32 val;
14426
14427 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14428 return;
14429
14430 /* Selfboot format */
14431 if (val != TG3_EEPROM_MAGIC) {
14432 tg3_get_eeprom_size(tp);
14433 return;
14434 }
14435
14436 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14437 if (val != 0) {
14438 /* This is confusing. We want to operate on the
14439 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14440 * call will read from NVRAM and byteswap the data
14441 * according to the byteswapping settings for all
14442 * other register accesses. This ensures the data we
14443 * want will always reside in the lower 16-bits.
14444 * However, the data in NVRAM is in LE format, which
14445 * means the data from the NVRAM read will always be
14446 * opposite the endianness of the CPU. The 16-bit
14447 * byteswap then brings the data to CPU endianness.
14448 */
14449 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14450 return;
14451 }
14452 }
14453 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14454 }
14455
tg3_get_nvram_info(struct tg3 * tp)14456 static void tg3_get_nvram_info(struct tg3 *tp)
14457 {
14458 u32 nvcfg1;
14459
14460 nvcfg1 = tr32(NVRAM_CFG1);
14461 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14462 tg3_flag_set(tp, FLASH);
14463 } else {
14464 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14465 tw32(NVRAM_CFG1, nvcfg1);
14466 }
14467
14468 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14469 tg3_flag(tp, 5780_CLASS)) {
14470 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14471 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14472 tp->nvram_jedecnum = JEDEC_ATMEL;
14473 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14474 tg3_flag_set(tp, NVRAM_BUFFERED);
14475 break;
14476 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14477 tp->nvram_jedecnum = JEDEC_ATMEL;
14478 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14479 break;
14480 case FLASH_VENDOR_ATMEL_EEPROM:
14481 tp->nvram_jedecnum = JEDEC_ATMEL;
14482 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14483 tg3_flag_set(tp, NVRAM_BUFFERED);
14484 break;
14485 case FLASH_VENDOR_ST:
14486 tp->nvram_jedecnum = JEDEC_ST;
14487 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14488 tg3_flag_set(tp, NVRAM_BUFFERED);
14489 break;
14490 case FLASH_VENDOR_SAIFUN:
14491 tp->nvram_jedecnum = JEDEC_SAIFUN;
14492 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14493 break;
14494 case FLASH_VENDOR_SST_SMALL:
14495 case FLASH_VENDOR_SST_LARGE:
14496 tp->nvram_jedecnum = JEDEC_SST;
14497 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14498 break;
14499 }
14500 } else {
14501 tp->nvram_jedecnum = JEDEC_ATMEL;
14502 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14503 tg3_flag_set(tp, NVRAM_BUFFERED);
14504 }
14505 }
14506
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14507 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14508 {
14509 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14510 case FLASH_5752PAGE_SIZE_256:
14511 tp->nvram_pagesize = 256;
14512 break;
14513 case FLASH_5752PAGE_SIZE_512:
14514 tp->nvram_pagesize = 512;
14515 break;
14516 case FLASH_5752PAGE_SIZE_1K:
14517 tp->nvram_pagesize = 1024;
14518 break;
14519 case FLASH_5752PAGE_SIZE_2K:
14520 tp->nvram_pagesize = 2048;
14521 break;
14522 case FLASH_5752PAGE_SIZE_4K:
14523 tp->nvram_pagesize = 4096;
14524 break;
14525 case FLASH_5752PAGE_SIZE_264:
14526 tp->nvram_pagesize = 264;
14527 break;
14528 case FLASH_5752PAGE_SIZE_528:
14529 tp->nvram_pagesize = 528;
14530 break;
14531 }
14532 }
14533
tg3_get_5752_nvram_info(struct tg3 * tp)14534 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14535 {
14536 u32 nvcfg1;
14537
14538 nvcfg1 = tr32(NVRAM_CFG1);
14539
14540 /* NVRAM protection for TPM */
14541 if (nvcfg1 & (1 << 27))
14542 tg3_flag_set(tp, PROTECTED_NVRAM);
14543
14544 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14545 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14546 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14547 tp->nvram_jedecnum = JEDEC_ATMEL;
14548 tg3_flag_set(tp, NVRAM_BUFFERED);
14549 break;
14550 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14551 tp->nvram_jedecnum = JEDEC_ATMEL;
14552 tg3_flag_set(tp, NVRAM_BUFFERED);
14553 tg3_flag_set(tp, FLASH);
14554 break;
14555 case FLASH_5752VENDOR_ST_M45PE10:
14556 case FLASH_5752VENDOR_ST_M45PE20:
14557 case FLASH_5752VENDOR_ST_M45PE40:
14558 tp->nvram_jedecnum = JEDEC_ST;
14559 tg3_flag_set(tp, NVRAM_BUFFERED);
14560 tg3_flag_set(tp, FLASH);
14561 break;
14562 }
14563
14564 if (tg3_flag(tp, FLASH)) {
14565 tg3_nvram_get_pagesize(tp, nvcfg1);
14566 } else {
14567 /* For eeprom, set pagesize to maximum eeprom size */
14568 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14569
14570 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14571 tw32(NVRAM_CFG1, nvcfg1);
14572 }
14573 }
14574
tg3_get_5755_nvram_info(struct tg3 * tp)14575 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14576 {
14577 u32 nvcfg1, protect = 0;
14578
14579 nvcfg1 = tr32(NVRAM_CFG1);
14580
14581 /* NVRAM protection for TPM */
14582 if (nvcfg1 & (1 << 27)) {
14583 tg3_flag_set(tp, PROTECTED_NVRAM);
14584 protect = 1;
14585 }
14586
14587 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14588 switch (nvcfg1) {
14589 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14590 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14591 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14592 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14593 tp->nvram_jedecnum = JEDEC_ATMEL;
14594 tg3_flag_set(tp, NVRAM_BUFFERED);
14595 tg3_flag_set(tp, FLASH);
14596 tp->nvram_pagesize = 264;
14597 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14598 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14599 tp->nvram_size = (protect ? 0x3e200 :
14600 TG3_NVRAM_SIZE_512KB);
14601 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14602 tp->nvram_size = (protect ? 0x1f200 :
14603 TG3_NVRAM_SIZE_256KB);
14604 else
14605 tp->nvram_size = (protect ? 0x1f200 :
14606 TG3_NVRAM_SIZE_128KB);
14607 break;
14608 case FLASH_5752VENDOR_ST_M45PE10:
14609 case FLASH_5752VENDOR_ST_M45PE20:
14610 case FLASH_5752VENDOR_ST_M45PE40:
14611 tp->nvram_jedecnum = JEDEC_ST;
14612 tg3_flag_set(tp, NVRAM_BUFFERED);
14613 tg3_flag_set(tp, FLASH);
14614 tp->nvram_pagesize = 256;
14615 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14616 tp->nvram_size = (protect ?
14617 TG3_NVRAM_SIZE_64KB :
14618 TG3_NVRAM_SIZE_128KB);
14619 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14620 tp->nvram_size = (protect ?
14621 TG3_NVRAM_SIZE_64KB :
14622 TG3_NVRAM_SIZE_256KB);
14623 else
14624 tp->nvram_size = (protect ?
14625 TG3_NVRAM_SIZE_128KB :
14626 TG3_NVRAM_SIZE_512KB);
14627 break;
14628 }
14629 }
14630
tg3_get_5787_nvram_info(struct tg3 * tp)14631 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14632 {
14633 u32 nvcfg1;
14634
14635 nvcfg1 = tr32(NVRAM_CFG1);
14636
14637 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14638 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14639 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14640 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14641 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14642 tp->nvram_jedecnum = JEDEC_ATMEL;
14643 tg3_flag_set(tp, NVRAM_BUFFERED);
14644 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14645
14646 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14647 tw32(NVRAM_CFG1, nvcfg1);
14648 break;
14649 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14650 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14651 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14652 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14653 tp->nvram_jedecnum = JEDEC_ATMEL;
14654 tg3_flag_set(tp, NVRAM_BUFFERED);
14655 tg3_flag_set(tp, FLASH);
14656 tp->nvram_pagesize = 264;
14657 break;
14658 case FLASH_5752VENDOR_ST_M45PE10:
14659 case FLASH_5752VENDOR_ST_M45PE20:
14660 case FLASH_5752VENDOR_ST_M45PE40:
14661 tp->nvram_jedecnum = JEDEC_ST;
14662 tg3_flag_set(tp, NVRAM_BUFFERED);
14663 tg3_flag_set(tp, FLASH);
14664 tp->nvram_pagesize = 256;
14665 break;
14666 }
14667 }
14668
tg3_get_5761_nvram_info(struct tg3 * tp)14669 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14670 {
14671 u32 nvcfg1, protect = 0;
14672
14673 nvcfg1 = tr32(NVRAM_CFG1);
14674
14675 /* NVRAM protection for TPM */
14676 if (nvcfg1 & (1 << 27)) {
14677 tg3_flag_set(tp, PROTECTED_NVRAM);
14678 protect = 1;
14679 }
14680
14681 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14682 switch (nvcfg1) {
14683 case FLASH_5761VENDOR_ATMEL_ADB021D:
14684 case FLASH_5761VENDOR_ATMEL_ADB041D:
14685 case FLASH_5761VENDOR_ATMEL_ADB081D:
14686 case FLASH_5761VENDOR_ATMEL_ADB161D:
14687 case FLASH_5761VENDOR_ATMEL_MDB021D:
14688 case FLASH_5761VENDOR_ATMEL_MDB041D:
14689 case FLASH_5761VENDOR_ATMEL_MDB081D:
14690 case FLASH_5761VENDOR_ATMEL_MDB161D:
14691 tp->nvram_jedecnum = JEDEC_ATMEL;
14692 tg3_flag_set(tp, NVRAM_BUFFERED);
14693 tg3_flag_set(tp, FLASH);
14694 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14695 tp->nvram_pagesize = 256;
14696 break;
14697 case FLASH_5761VENDOR_ST_A_M45PE20:
14698 case FLASH_5761VENDOR_ST_A_M45PE40:
14699 case FLASH_5761VENDOR_ST_A_M45PE80:
14700 case FLASH_5761VENDOR_ST_A_M45PE16:
14701 case FLASH_5761VENDOR_ST_M_M45PE20:
14702 case FLASH_5761VENDOR_ST_M_M45PE40:
14703 case FLASH_5761VENDOR_ST_M_M45PE80:
14704 case FLASH_5761VENDOR_ST_M_M45PE16:
14705 tp->nvram_jedecnum = JEDEC_ST;
14706 tg3_flag_set(tp, NVRAM_BUFFERED);
14707 tg3_flag_set(tp, FLASH);
14708 tp->nvram_pagesize = 256;
14709 break;
14710 }
14711
14712 if (protect) {
14713 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14714 } else {
14715 switch (nvcfg1) {
14716 case FLASH_5761VENDOR_ATMEL_ADB161D:
14717 case FLASH_5761VENDOR_ATMEL_MDB161D:
14718 case FLASH_5761VENDOR_ST_A_M45PE16:
14719 case FLASH_5761VENDOR_ST_M_M45PE16:
14720 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14721 break;
14722 case FLASH_5761VENDOR_ATMEL_ADB081D:
14723 case FLASH_5761VENDOR_ATMEL_MDB081D:
14724 case FLASH_5761VENDOR_ST_A_M45PE80:
14725 case FLASH_5761VENDOR_ST_M_M45PE80:
14726 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14727 break;
14728 case FLASH_5761VENDOR_ATMEL_ADB041D:
14729 case FLASH_5761VENDOR_ATMEL_MDB041D:
14730 case FLASH_5761VENDOR_ST_A_M45PE40:
14731 case FLASH_5761VENDOR_ST_M_M45PE40:
14732 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14733 break;
14734 case FLASH_5761VENDOR_ATMEL_ADB021D:
14735 case FLASH_5761VENDOR_ATMEL_MDB021D:
14736 case FLASH_5761VENDOR_ST_A_M45PE20:
14737 case FLASH_5761VENDOR_ST_M_M45PE20:
14738 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14739 break;
14740 }
14741 }
14742 }
14743
tg3_get_5906_nvram_info(struct tg3 * tp)14744 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14745 {
14746 tp->nvram_jedecnum = JEDEC_ATMEL;
14747 tg3_flag_set(tp, NVRAM_BUFFERED);
14748 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14749 }
14750
tg3_get_57780_nvram_info(struct tg3 * tp)14751 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14752 {
14753 u32 nvcfg1;
14754
14755 nvcfg1 = tr32(NVRAM_CFG1);
14756
14757 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14758 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14759 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14760 tp->nvram_jedecnum = JEDEC_ATMEL;
14761 tg3_flag_set(tp, NVRAM_BUFFERED);
14762 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14763
14764 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14765 tw32(NVRAM_CFG1, nvcfg1);
14766 return;
14767 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14768 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14769 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14770 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14771 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14772 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14773 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14774 tp->nvram_jedecnum = JEDEC_ATMEL;
14775 tg3_flag_set(tp, NVRAM_BUFFERED);
14776 tg3_flag_set(tp, FLASH);
14777
14778 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14779 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14780 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14781 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14782 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14783 break;
14784 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14785 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14786 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14787 break;
14788 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14789 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14790 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14791 break;
14792 }
14793 break;
14794 case FLASH_5752VENDOR_ST_M45PE10:
14795 case FLASH_5752VENDOR_ST_M45PE20:
14796 case FLASH_5752VENDOR_ST_M45PE40:
14797 tp->nvram_jedecnum = JEDEC_ST;
14798 tg3_flag_set(tp, NVRAM_BUFFERED);
14799 tg3_flag_set(tp, FLASH);
14800
14801 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14802 case FLASH_5752VENDOR_ST_M45PE10:
14803 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14804 break;
14805 case FLASH_5752VENDOR_ST_M45PE20:
14806 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14807 break;
14808 case FLASH_5752VENDOR_ST_M45PE40:
14809 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14810 break;
14811 }
14812 break;
14813 default:
14814 tg3_flag_set(tp, NO_NVRAM);
14815 return;
14816 }
14817
14818 tg3_nvram_get_pagesize(tp, nvcfg1);
14819 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14820 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14821 }
14822
14823
tg3_get_5717_nvram_info(struct tg3 * tp)14824 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14825 {
14826 u32 nvcfg1;
14827
14828 nvcfg1 = tr32(NVRAM_CFG1);
14829
14830 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14831 case FLASH_5717VENDOR_ATMEL_EEPROM:
14832 case FLASH_5717VENDOR_MICRO_EEPROM:
14833 tp->nvram_jedecnum = JEDEC_ATMEL;
14834 tg3_flag_set(tp, NVRAM_BUFFERED);
14835 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14836
14837 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14838 tw32(NVRAM_CFG1, nvcfg1);
14839 return;
14840 case FLASH_5717VENDOR_ATMEL_MDB011D:
14841 case FLASH_5717VENDOR_ATMEL_ADB011B:
14842 case FLASH_5717VENDOR_ATMEL_ADB011D:
14843 case FLASH_5717VENDOR_ATMEL_MDB021D:
14844 case FLASH_5717VENDOR_ATMEL_ADB021B:
14845 case FLASH_5717VENDOR_ATMEL_ADB021D:
14846 case FLASH_5717VENDOR_ATMEL_45USPT:
14847 tp->nvram_jedecnum = JEDEC_ATMEL;
14848 tg3_flag_set(tp, NVRAM_BUFFERED);
14849 tg3_flag_set(tp, FLASH);
14850
14851 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14852 case FLASH_5717VENDOR_ATMEL_MDB021D:
14853 /* Detect size with tg3_nvram_get_size() */
14854 break;
14855 case FLASH_5717VENDOR_ATMEL_ADB021B:
14856 case FLASH_5717VENDOR_ATMEL_ADB021D:
14857 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14858 break;
14859 default:
14860 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14861 break;
14862 }
14863 break;
14864 case FLASH_5717VENDOR_ST_M_M25PE10:
14865 case FLASH_5717VENDOR_ST_A_M25PE10:
14866 case FLASH_5717VENDOR_ST_M_M45PE10:
14867 case FLASH_5717VENDOR_ST_A_M45PE10:
14868 case FLASH_5717VENDOR_ST_M_M25PE20:
14869 case FLASH_5717VENDOR_ST_A_M25PE20:
14870 case FLASH_5717VENDOR_ST_M_M45PE20:
14871 case FLASH_5717VENDOR_ST_A_M45PE20:
14872 case FLASH_5717VENDOR_ST_25USPT:
14873 case FLASH_5717VENDOR_ST_45USPT:
14874 tp->nvram_jedecnum = JEDEC_ST;
14875 tg3_flag_set(tp, NVRAM_BUFFERED);
14876 tg3_flag_set(tp, FLASH);
14877
14878 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14879 case FLASH_5717VENDOR_ST_M_M25PE20:
14880 case FLASH_5717VENDOR_ST_M_M45PE20:
14881 /* Detect size with tg3_nvram_get_size() */
14882 break;
14883 case FLASH_5717VENDOR_ST_A_M25PE20:
14884 case FLASH_5717VENDOR_ST_A_M45PE20:
14885 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14886 break;
14887 default:
14888 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14889 break;
14890 }
14891 break;
14892 default:
14893 tg3_flag_set(tp, NO_NVRAM);
14894 return;
14895 }
14896
14897 tg3_nvram_get_pagesize(tp, nvcfg1);
14898 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14899 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14900 }
14901
tg3_get_5720_nvram_info(struct tg3 * tp)14902 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14903 {
14904 u32 nvcfg1, nvmpinstrp, nv_status;
14905
14906 nvcfg1 = tr32(NVRAM_CFG1);
14907 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14908
14909 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14910 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14911 tg3_flag_set(tp, NO_NVRAM);
14912 return;
14913 }
14914
14915 switch (nvmpinstrp) {
14916 case FLASH_5762_MX25L_100:
14917 case FLASH_5762_MX25L_200:
14918 case FLASH_5762_MX25L_400:
14919 case FLASH_5762_MX25L_800:
14920 case FLASH_5762_MX25L_160_320:
14921 tp->nvram_pagesize = 4096;
14922 tp->nvram_jedecnum = JEDEC_MACRONIX;
14923 tg3_flag_set(tp, NVRAM_BUFFERED);
14924 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14925 tg3_flag_set(tp, FLASH);
14926 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14927 tp->nvram_size =
14928 (1 << (nv_status >> AUTOSENSE_DEVID &
14929 AUTOSENSE_DEVID_MASK)
14930 << AUTOSENSE_SIZE_IN_MB);
14931 return;
14932
14933 case FLASH_5762_EEPROM_HD:
14934 nvmpinstrp = FLASH_5720_EEPROM_HD;
14935 break;
14936 case FLASH_5762_EEPROM_LD:
14937 nvmpinstrp = FLASH_5720_EEPROM_LD;
14938 break;
14939 case FLASH_5720VENDOR_M_ST_M45PE20:
14940 /* This pinstrap supports multiple sizes, so force it
14941 * to read the actual size from location 0xf0.
14942 */
14943 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14944 break;
14945 }
14946 }
14947
14948 switch (nvmpinstrp) {
14949 case FLASH_5720_EEPROM_HD:
14950 case FLASH_5720_EEPROM_LD:
14951 tp->nvram_jedecnum = JEDEC_ATMEL;
14952 tg3_flag_set(tp, NVRAM_BUFFERED);
14953
14954 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14955 tw32(NVRAM_CFG1, nvcfg1);
14956 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14957 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14958 else
14959 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14960 return;
14961 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14962 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14963 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14964 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14965 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14966 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14967 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14968 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14969 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14970 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14971 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14972 case FLASH_5720VENDOR_ATMEL_45USPT:
14973 tp->nvram_jedecnum = JEDEC_ATMEL;
14974 tg3_flag_set(tp, NVRAM_BUFFERED);
14975 tg3_flag_set(tp, FLASH);
14976
14977 switch (nvmpinstrp) {
14978 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14979 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14980 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14981 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14982 break;
14983 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14984 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14985 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14986 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14987 break;
14988 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14989 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14990 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14991 break;
14992 default:
14993 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14994 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14995 break;
14996 }
14997 break;
14998 case FLASH_5720VENDOR_M_ST_M25PE10:
14999 case FLASH_5720VENDOR_M_ST_M45PE10:
15000 case FLASH_5720VENDOR_A_ST_M25PE10:
15001 case FLASH_5720VENDOR_A_ST_M45PE10:
15002 case FLASH_5720VENDOR_M_ST_M25PE20:
15003 case FLASH_5720VENDOR_M_ST_M45PE20:
15004 case FLASH_5720VENDOR_A_ST_M25PE20:
15005 case FLASH_5720VENDOR_A_ST_M45PE20:
15006 case FLASH_5720VENDOR_M_ST_M25PE40:
15007 case FLASH_5720VENDOR_M_ST_M45PE40:
15008 case FLASH_5720VENDOR_A_ST_M25PE40:
15009 case FLASH_5720VENDOR_A_ST_M45PE40:
15010 case FLASH_5720VENDOR_M_ST_M25PE80:
15011 case FLASH_5720VENDOR_M_ST_M45PE80:
15012 case FLASH_5720VENDOR_A_ST_M25PE80:
15013 case FLASH_5720VENDOR_A_ST_M45PE80:
15014 case FLASH_5720VENDOR_ST_25USPT:
15015 case FLASH_5720VENDOR_ST_45USPT:
15016 tp->nvram_jedecnum = JEDEC_ST;
15017 tg3_flag_set(tp, NVRAM_BUFFERED);
15018 tg3_flag_set(tp, FLASH);
15019
15020 switch (nvmpinstrp) {
15021 case FLASH_5720VENDOR_M_ST_M25PE20:
15022 case FLASH_5720VENDOR_M_ST_M45PE20:
15023 case FLASH_5720VENDOR_A_ST_M25PE20:
15024 case FLASH_5720VENDOR_A_ST_M45PE20:
15025 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15026 break;
15027 case FLASH_5720VENDOR_M_ST_M25PE40:
15028 case FLASH_5720VENDOR_M_ST_M45PE40:
15029 case FLASH_5720VENDOR_A_ST_M25PE40:
15030 case FLASH_5720VENDOR_A_ST_M45PE40:
15031 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15032 break;
15033 case FLASH_5720VENDOR_M_ST_M25PE80:
15034 case FLASH_5720VENDOR_M_ST_M45PE80:
15035 case FLASH_5720VENDOR_A_ST_M25PE80:
15036 case FLASH_5720VENDOR_A_ST_M45PE80:
15037 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15038 break;
15039 default:
15040 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15041 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15042 break;
15043 }
15044 break;
15045 default:
15046 tg3_flag_set(tp, NO_NVRAM);
15047 return;
15048 }
15049
15050 tg3_nvram_get_pagesize(tp, nvcfg1);
15051 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15052 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15053
15054 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15055 u32 val;
15056
15057 if (tg3_nvram_read(tp, 0, &val))
15058 return;
15059
15060 if (val != TG3_EEPROM_MAGIC &&
15061 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15062 tg3_flag_set(tp, NO_NVRAM);
15063 }
15064 }
15065
15066 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)15067 static void tg3_nvram_init(struct tg3 *tp)
15068 {
15069 if (tg3_flag(tp, IS_SSB_CORE)) {
15070 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15071 tg3_flag_clear(tp, NVRAM);
15072 tg3_flag_clear(tp, NVRAM_BUFFERED);
15073 tg3_flag_set(tp, NO_NVRAM);
15074 return;
15075 }
15076
15077 tw32_f(GRC_EEPROM_ADDR,
15078 (EEPROM_ADDR_FSM_RESET |
15079 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15080 EEPROM_ADDR_CLKPERD_SHIFT)));
15081
15082 msleep(1);
15083
15084 /* Enable seeprom accesses. */
15085 tw32_f(GRC_LOCAL_CTRL,
15086 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15087 udelay(100);
15088
15089 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15090 tg3_asic_rev(tp) != ASIC_REV_5701) {
15091 tg3_flag_set(tp, NVRAM);
15092
15093 if (tg3_nvram_lock(tp)) {
15094 netdev_warn(tp->dev,
15095 "Cannot get nvram lock, %s failed\n",
15096 __func__);
15097 return;
15098 }
15099 tg3_enable_nvram_access(tp);
15100
15101 tp->nvram_size = 0;
15102
15103 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15104 tg3_get_5752_nvram_info(tp);
15105 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15106 tg3_get_5755_nvram_info(tp);
15107 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15108 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15109 tg3_asic_rev(tp) == ASIC_REV_5785)
15110 tg3_get_5787_nvram_info(tp);
15111 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15112 tg3_get_5761_nvram_info(tp);
15113 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15114 tg3_get_5906_nvram_info(tp);
15115 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15116 tg3_flag(tp, 57765_CLASS))
15117 tg3_get_57780_nvram_info(tp);
15118 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15119 tg3_asic_rev(tp) == ASIC_REV_5719)
15120 tg3_get_5717_nvram_info(tp);
15121 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15122 tg3_asic_rev(tp) == ASIC_REV_5762)
15123 tg3_get_5720_nvram_info(tp);
15124 else
15125 tg3_get_nvram_info(tp);
15126
15127 if (tp->nvram_size == 0)
15128 tg3_get_nvram_size(tp);
15129
15130 tg3_disable_nvram_access(tp);
15131 tg3_nvram_unlock(tp);
15132
15133 } else {
15134 tg3_flag_clear(tp, NVRAM);
15135 tg3_flag_clear(tp, NVRAM_BUFFERED);
15136
15137 tg3_get_eeprom_size(tp);
15138 }
15139 }
15140
15141 struct subsys_tbl_ent {
15142 u16 subsys_vendor, subsys_devid;
15143 u32 phy_id;
15144 };
15145
15146 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15147 /* Broadcom boards. */
15148 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15149 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15150 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15151 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15152 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15153 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15154 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15155 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15156 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15157 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15158 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15159 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15160 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15161 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15162 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15163 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15164 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15165 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15166 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15167 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15168 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15169 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15170
15171 /* 3com boards. */
15172 { TG3PCI_SUBVENDOR_ID_3COM,
15173 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15174 { TG3PCI_SUBVENDOR_ID_3COM,
15175 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15176 { TG3PCI_SUBVENDOR_ID_3COM,
15177 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15178 { TG3PCI_SUBVENDOR_ID_3COM,
15179 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15180 { TG3PCI_SUBVENDOR_ID_3COM,
15181 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15182
15183 /* DELL boards. */
15184 { TG3PCI_SUBVENDOR_ID_DELL,
15185 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15186 { TG3PCI_SUBVENDOR_ID_DELL,
15187 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15188 { TG3PCI_SUBVENDOR_ID_DELL,
15189 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15190 { TG3PCI_SUBVENDOR_ID_DELL,
15191 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15192
15193 /* Compaq boards. */
15194 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15195 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15196 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15197 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15198 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15199 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15200 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15201 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15202 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15203 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15204
15205 /* IBM boards. */
15206 { TG3PCI_SUBVENDOR_ID_IBM,
15207 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15208 };
15209
tg3_lookup_by_subsys(struct tg3 * tp)15210 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15211 {
15212 int i;
15213
15214 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15215 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15216 tp->pdev->subsystem_vendor) &&
15217 (subsys_id_to_phy_id[i].subsys_devid ==
15218 tp->pdev->subsystem_device))
15219 return &subsys_id_to_phy_id[i];
15220 }
15221 return NULL;
15222 }
15223
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15224 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15225 {
15226 u32 val;
15227
15228 tp->phy_id = TG3_PHY_ID_INVALID;
15229 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15230
15231 /* Assume an onboard device and WOL capable by default. */
15232 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15233 tg3_flag_set(tp, WOL_CAP);
15234
15235 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15236 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15237 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15238 tg3_flag_set(tp, IS_NIC);
15239 }
15240 val = tr32(VCPU_CFGSHDW);
15241 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15242 tg3_flag_set(tp, ASPM_WORKAROUND);
15243 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15244 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15245 tg3_flag_set(tp, WOL_ENABLE);
15246 device_set_wakeup_enable(&tp->pdev->dev, true);
15247 }
15248 goto done;
15249 }
15250
15251 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15252 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15253 u32 nic_cfg, led_cfg;
15254 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15255 u32 nic_phy_id, ver, eeprom_phy_id;
15256 int eeprom_phy_serdes = 0;
15257
15258 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15259 tp->nic_sram_data_cfg = nic_cfg;
15260
15261 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15262 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15263 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15264 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15265 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15266 (ver > 0) && (ver < 0x100))
15267 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15268
15269 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15270 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15271
15272 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15273 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15274 tg3_asic_rev(tp) == ASIC_REV_5720)
15275 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15276
15277 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15278 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15279 eeprom_phy_serdes = 1;
15280
15281 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15282 if (nic_phy_id != 0) {
15283 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15284 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15285
15286 eeprom_phy_id = (id1 >> 16) << 10;
15287 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15288 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15289 } else
15290 eeprom_phy_id = 0;
15291
15292 tp->phy_id = eeprom_phy_id;
15293 if (eeprom_phy_serdes) {
15294 if (!tg3_flag(tp, 5705_PLUS))
15295 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15296 else
15297 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15298 }
15299
15300 if (tg3_flag(tp, 5750_PLUS))
15301 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15302 SHASTA_EXT_LED_MODE_MASK);
15303 else
15304 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15305
15306 switch (led_cfg) {
15307 default:
15308 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15309 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15310 break;
15311
15312 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15313 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15314 break;
15315
15316 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15317 tp->led_ctrl = LED_CTRL_MODE_MAC;
15318
15319 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15320 * read on some older 5700/5701 bootcode.
15321 */
15322 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15323 tg3_asic_rev(tp) == ASIC_REV_5701)
15324 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15325
15326 break;
15327
15328 case SHASTA_EXT_LED_SHARED:
15329 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15330 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15331 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15332 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15333 LED_CTRL_MODE_PHY_2);
15334
15335 if (tg3_flag(tp, 5717_PLUS) ||
15336 tg3_asic_rev(tp) == ASIC_REV_5762)
15337 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15338 LED_CTRL_BLINK_RATE_MASK;
15339
15340 break;
15341
15342 case SHASTA_EXT_LED_MAC:
15343 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15344 break;
15345
15346 case SHASTA_EXT_LED_COMBO:
15347 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15348 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15349 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15350 LED_CTRL_MODE_PHY_2);
15351 break;
15352
15353 }
15354
15355 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15356 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15357 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15358 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15359
15360 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15361 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15362
15363 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15364 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15365 if ((tp->pdev->subsystem_vendor ==
15366 PCI_VENDOR_ID_ARIMA) &&
15367 (tp->pdev->subsystem_device == 0x205a ||
15368 tp->pdev->subsystem_device == 0x2063))
15369 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15370 } else {
15371 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15372 tg3_flag_set(tp, IS_NIC);
15373 }
15374
15375 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15376 tg3_flag_set(tp, ENABLE_ASF);
15377 if (tg3_flag(tp, 5750_PLUS))
15378 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15379 }
15380
15381 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15382 tg3_flag(tp, 5750_PLUS))
15383 tg3_flag_set(tp, ENABLE_APE);
15384
15385 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15386 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15387 tg3_flag_clear(tp, WOL_CAP);
15388
15389 if (tg3_flag(tp, WOL_CAP) &&
15390 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15391 tg3_flag_set(tp, WOL_ENABLE);
15392 device_set_wakeup_enable(&tp->pdev->dev, true);
15393 }
15394
15395 if (cfg2 & (1 << 17))
15396 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15397
15398 /* serdes signal pre-emphasis in register 0x590 set by */
15399 /* bootcode if bit 18 is set */
15400 if (cfg2 & (1 << 18))
15401 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15402
15403 if ((tg3_flag(tp, 57765_PLUS) ||
15404 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15405 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15406 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15407 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15408
15409 if (tg3_flag(tp, PCI_EXPRESS)) {
15410 u32 cfg3;
15411
15412 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15413 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15414 !tg3_flag(tp, 57765_PLUS) &&
15415 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15416 tg3_flag_set(tp, ASPM_WORKAROUND);
15417 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15418 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15419 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15420 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15421 }
15422
15423 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15424 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15425 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15426 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15427 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15428 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15429
15430 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15431 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15432 }
15433 done:
15434 if (tg3_flag(tp, WOL_CAP))
15435 device_set_wakeup_enable(&tp->pdev->dev,
15436 tg3_flag(tp, WOL_ENABLE));
15437 else
15438 device_set_wakeup_capable(&tp->pdev->dev, false);
15439 }
15440
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15441 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15442 {
15443 int i, err;
15444 u32 val2, off = offset * 8;
15445
15446 err = tg3_nvram_lock(tp);
15447 if (err)
15448 return err;
15449
15450 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15451 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15452 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15453 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15454 udelay(10);
15455
15456 for (i = 0; i < 100; i++) {
15457 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15458 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15459 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15460 break;
15461 }
15462 udelay(10);
15463 }
15464
15465 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15466
15467 tg3_nvram_unlock(tp);
15468 if (val2 & APE_OTP_STATUS_CMD_DONE)
15469 return 0;
15470
15471 return -EBUSY;
15472 }
15473
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15474 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15475 {
15476 int i;
15477 u32 val;
15478
15479 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15480 tw32(OTP_CTRL, cmd);
15481
15482 /* Wait for up to 1 ms for command to execute. */
15483 for (i = 0; i < 100; i++) {
15484 val = tr32(OTP_STATUS);
15485 if (val & OTP_STATUS_CMD_DONE)
15486 break;
15487 udelay(10);
15488 }
15489
15490 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15491 }
15492
15493 /* Read the gphy configuration from the OTP region of the chip. The gphy
15494 * configuration is a 32-bit value that straddles the alignment boundary.
15495 * We do two 32-bit reads and then shift and merge the results.
15496 */
tg3_read_otp_phycfg(struct tg3 * tp)15497 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15498 {
15499 u32 bhalf_otp, thalf_otp;
15500
15501 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15502
15503 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15504 return 0;
15505
15506 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15507
15508 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15509 return 0;
15510
15511 thalf_otp = tr32(OTP_READ_DATA);
15512
15513 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15514
15515 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15516 return 0;
15517
15518 bhalf_otp = tr32(OTP_READ_DATA);
15519
15520 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15521 }
15522
tg3_phy_init_link_config(struct tg3 * tp)15523 static void tg3_phy_init_link_config(struct tg3 *tp)
15524 {
15525 u32 adv = ADVERTISED_Autoneg;
15526
15527 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15528 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15529 adv |= ADVERTISED_1000baseT_Half;
15530 adv |= ADVERTISED_1000baseT_Full;
15531 }
15532
15533 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15534 adv |= ADVERTISED_100baseT_Half |
15535 ADVERTISED_100baseT_Full |
15536 ADVERTISED_10baseT_Half |
15537 ADVERTISED_10baseT_Full |
15538 ADVERTISED_TP;
15539 else
15540 adv |= ADVERTISED_FIBRE;
15541
15542 tp->link_config.advertising = adv;
15543 tp->link_config.speed = SPEED_UNKNOWN;
15544 tp->link_config.duplex = DUPLEX_UNKNOWN;
15545 tp->link_config.autoneg = AUTONEG_ENABLE;
15546 tp->link_config.active_speed = SPEED_UNKNOWN;
15547 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15548
15549 tp->old_link = -1;
15550 }
15551
tg3_phy_probe(struct tg3 * tp)15552 static int tg3_phy_probe(struct tg3 *tp)
15553 {
15554 u32 hw_phy_id_1, hw_phy_id_2;
15555 u32 hw_phy_id, hw_phy_id_masked;
15556 int err;
15557
15558 /* flow control autonegotiation is default behavior */
15559 tg3_flag_set(tp, PAUSE_AUTONEG);
15560 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15561
15562 if (tg3_flag(tp, ENABLE_APE)) {
15563 switch (tp->pci_fn) {
15564 case 0:
15565 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15566 break;
15567 case 1:
15568 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15569 break;
15570 case 2:
15571 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15572 break;
15573 case 3:
15574 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15575 break;
15576 }
15577 }
15578
15579 if (!tg3_flag(tp, ENABLE_ASF) &&
15580 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15581 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15582 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15583 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15584
15585 if (tg3_flag(tp, USE_PHYLIB))
15586 return tg3_phy_init(tp);
15587
15588 /* Reading the PHY ID register can conflict with ASF
15589 * firmware access to the PHY hardware.
15590 */
15591 err = 0;
15592 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15593 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15594 } else {
15595 /* Now read the physical PHY_ID from the chip and verify
15596 * that it is sane. If it doesn't look good, we fall back
15597 * to either the hard-coded table based PHY_ID and failing
15598 * that the value found in the eeprom area.
15599 */
15600 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15601 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15602
15603 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15604 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15605 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15606
15607 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15608 }
15609
15610 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15611 tp->phy_id = hw_phy_id;
15612 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15613 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15614 else
15615 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15616 } else {
15617 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15618 /* Do nothing, phy ID already set up in
15619 * tg3_get_eeprom_hw_cfg().
15620 */
15621 } else {
15622 struct subsys_tbl_ent *p;
15623
15624 /* No eeprom signature? Try the hardcoded
15625 * subsys device table.
15626 */
15627 p = tg3_lookup_by_subsys(tp);
15628 if (p) {
15629 tp->phy_id = p->phy_id;
15630 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15631 /* For now we saw the IDs 0xbc050cd0,
15632 * 0xbc050f80 and 0xbc050c30 on devices
15633 * connected to an BCM4785 and there are
15634 * probably more. Just assume that the phy is
15635 * supported when it is connected to a SSB core
15636 * for now.
15637 */
15638 return -ENODEV;
15639 }
15640
15641 if (!tp->phy_id ||
15642 tp->phy_id == TG3_PHY_ID_BCM8002)
15643 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15644 }
15645 }
15646
15647 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15648 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15649 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15650 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15651 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15652 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15653 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15654 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15655 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15656 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15657
15658 linkmode_zero(tp->eee.supported);
15659 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
15660 tp->eee.supported);
15661 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
15662 tp->eee.supported);
15663 linkmode_copy(tp->eee.advertised, tp->eee.supported);
15664
15665 tp->eee.eee_enabled = 1;
15666 tp->eee.tx_lpi_enabled = 1;
15667 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15668 }
15669
15670 tg3_phy_init_link_config(tp);
15671
15672 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15673 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15674 !tg3_flag(tp, ENABLE_APE) &&
15675 !tg3_flag(tp, ENABLE_ASF)) {
15676 u32 bmsr, dummy;
15677
15678 tg3_readphy(tp, MII_BMSR, &bmsr);
15679 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15680 (bmsr & BMSR_LSTATUS))
15681 goto skip_phy_reset;
15682
15683 err = tg3_phy_reset(tp);
15684 if (err)
15685 return err;
15686
15687 tg3_phy_set_wirespeed(tp);
15688
15689 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15690 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15691 tp->link_config.flowctrl);
15692
15693 tg3_writephy(tp, MII_BMCR,
15694 BMCR_ANENABLE | BMCR_ANRESTART);
15695 }
15696 }
15697
15698 skip_phy_reset:
15699 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15700 err = tg3_init_5401phy_dsp(tp);
15701 if (err)
15702 return err;
15703
15704 err = tg3_init_5401phy_dsp(tp);
15705 }
15706
15707 return err;
15708 }
15709
tg3_read_vpd(struct tg3 * tp)15710 static void tg3_read_vpd(struct tg3 *tp)
15711 {
15712 u8 *vpd_data;
15713 unsigned int len, vpdlen;
15714 int i;
15715
15716 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15717 if (!vpd_data)
15718 goto out_no_vpd;
15719
15720 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15721 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15722 if (i < 0)
15723 goto partno;
15724
15725 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15726 goto partno;
15727
15728 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15729 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15730 if (i < 0)
15731 goto partno;
15732
15733 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15734 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15735
15736 partno:
15737 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15738 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15739 if (i < 0)
15740 goto out_not_found;
15741
15742 if (len > TG3_BPN_SIZE)
15743 goto out_not_found;
15744
15745 memcpy(tp->board_part_number, &vpd_data[i], len);
15746
15747 out_not_found:
15748 kfree(vpd_data);
15749 if (tp->board_part_number[0])
15750 return;
15751
15752 out_no_vpd:
15753 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15754 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15755 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15756 strcpy(tp->board_part_number, "BCM5717");
15757 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15758 strcpy(tp->board_part_number, "BCM5718");
15759 else
15760 goto nomatch;
15761 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15762 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15763 strcpy(tp->board_part_number, "BCM57780");
15764 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15765 strcpy(tp->board_part_number, "BCM57760");
15766 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15767 strcpy(tp->board_part_number, "BCM57790");
15768 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15769 strcpy(tp->board_part_number, "BCM57788");
15770 else
15771 goto nomatch;
15772 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15773 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15774 strcpy(tp->board_part_number, "BCM57761");
15775 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15776 strcpy(tp->board_part_number, "BCM57765");
15777 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15778 strcpy(tp->board_part_number, "BCM57781");
15779 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15780 strcpy(tp->board_part_number, "BCM57785");
15781 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15782 strcpy(tp->board_part_number, "BCM57791");
15783 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15784 strcpy(tp->board_part_number, "BCM57795");
15785 else
15786 goto nomatch;
15787 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15788 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15789 strcpy(tp->board_part_number, "BCM57762");
15790 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15791 strcpy(tp->board_part_number, "BCM57766");
15792 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15793 strcpy(tp->board_part_number, "BCM57782");
15794 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15795 strcpy(tp->board_part_number, "BCM57786");
15796 else
15797 goto nomatch;
15798 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15799 strcpy(tp->board_part_number, "BCM95906");
15800 } else {
15801 nomatch:
15802 strcpy(tp->board_part_number, "none");
15803 }
15804 }
15805
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15806 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15807 {
15808 u32 val;
15809
15810 if (tg3_nvram_read(tp, offset, &val) ||
15811 (val & 0xfc000000) != 0x0c000000 ||
15812 tg3_nvram_read(tp, offset + 4, &val) ||
15813 val != 0)
15814 return 0;
15815
15816 return 1;
15817 }
15818
tg3_read_bc_ver(struct tg3 * tp)15819 static void tg3_read_bc_ver(struct tg3 *tp)
15820 {
15821 u32 val, offset, start, ver_offset;
15822 int i, dst_off;
15823 bool newver = false;
15824
15825 if (tg3_nvram_read(tp, 0xc, &offset) ||
15826 tg3_nvram_read(tp, 0x4, &start))
15827 return;
15828
15829 offset = tg3_nvram_logical_addr(tp, offset);
15830
15831 if (tg3_nvram_read(tp, offset, &val))
15832 return;
15833
15834 if ((val & 0xfc000000) == 0x0c000000) {
15835 if (tg3_nvram_read(tp, offset + 4, &val))
15836 return;
15837
15838 if (val == 0)
15839 newver = true;
15840 }
15841
15842 dst_off = strlen(tp->fw_ver);
15843
15844 if (newver) {
15845 if (TG3_VER_SIZE - dst_off < 16 ||
15846 tg3_nvram_read(tp, offset + 8, &ver_offset))
15847 return;
15848
15849 offset = offset + ver_offset - start;
15850 for (i = 0; i < 16; i += 4) {
15851 __be32 v;
15852 if (tg3_nvram_read_be32(tp, offset + i, &v))
15853 return;
15854
15855 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15856 }
15857 } else {
15858 u32 major, minor;
15859
15860 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15861 return;
15862
15863 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15864 TG3_NVM_BCVER_MAJSFT;
15865 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15866 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15867 "v%d.%02d", major, minor);
15868 }
15869 }
15870
tg3_read_hwsb_ver(struct tg3 * tp)15871 static void tg3_read_hwsb_ver(struct tg3 *tp)
15872 {
15873 u32 val, major, minor;
15874
15875 /* Use native endian representation */
15876 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15877 return;
15878
15879 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15880 TG3_NVM_HWSB_CFG1_MAJSFT;
15881 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15882 TG3_NVM_HWSB_CFG1_MINSFT;
15883
15884 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15885 }
15886
tg3_read_sb_ver(struct tg3 * tp,u32 val)15887 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15888 {
15889 u32 offset, major, minor, build;
15890
15891 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15892
15893 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15894 return;
15895
15896 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15897 case TG3_EEPROM_SB_REVISION_0:
15898 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15899 break;
15900 case TG3_EEPROM_SB_REVISION_2:
15901 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15902 break;
15903 case TG3_EEPROM_SB_REVISION_3:
15904 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15905 break;
15906 case TG3_EEPROM_SB_REVISION_4:
15907 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15908 break;
15909 case TG3_EEPROM_SB_REVISION_5:
15910 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15911 break;
15912 case TG3_EEPROM_SB_REVISION_6:
15913 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15914 break;
15915 default:
15916 return;
15917 }
15918
15919 if (tg3_nvram_read(tp, offset, &val))
15920 return;
15921
15922 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15923 TG3_EEPROM_SB_EDH_BLD_SHFT;
15924 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15925 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15926 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15927
15928 if (minor > 99 || build > 26)
15929 return;
15930
15931 offset = strlen(tp->fw_ver);
15932 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15933 " v%d.%02d", major, minor);
15934
15935 if (build > 0) {
15936 offset = strlen(tp->fw_ver);
15937 if (offset < TG3_VER_SIZE - 1)
15938 tp->fw_ver[offset] = 'a' + build - 1;
15939 }
15940 }
15941
tg3_read_mgmtfw_ver(struct tg3 * tp)15942 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15943 {
15944 u32 val, offset, start;
15945 int i, vlen;
15946
15947 for (offset = TG3_NVM_DIR_START;
15948 offset < TG3_NVM_DIR_END;
15949 offset += TG3_NVM_DIRENT_SIZE) {
15950 if (tg3_nvram_read(tp, offset, &val))
15951 return;
15952
15953 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15954 break;
15955 }
15956
15957 if (offset == TG3_NVM_DIR_END)
15958 return;
15959
15960 if (!tg3_flag(tp, 5705_PLUS))
15961 start = 0x08000000;
15962 else if (tg3_nvram_read(tp, offset - 4, &start))
15963 return;
15964
15965 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15966 !tg3_fw_img_is_valid(tp, offset) ||
15967 tg3_nvram_read(tp, offset + 8, &val))
15968 return;
15969
15970 offset += val - start;
15971
15972 vlen = strlen(tp->fw_ver);
15973
15974 tp->fw_ver[vlen++] = ',';
15975 tp->fw_ver[vlen++] = ' ';
15976
15977 for (i = 0; i < 4; i++) {
15978 __be32 v;
15979 if (tg3_nvram_read_be32(tp, offset, &v))
15980 return;
15981
15982 offset += sizeof(v);
15983
15984 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15985 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15986 break;
15987 }
15988
15989 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15990 vlen += sizeof(v);
15991 }
15992 }
15993
tg3_probe_ncsi(struct tg3 * tp)15994 static void tg3_probe_ncsi(struct tg3 *tp)
15995 {
15996 u32 apedata;
15997
15998 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15999 if (apedata != APE_SEG_SIG_MAGIC)
16000 return;
16001
16002 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
16003 if (!(apedata & APE_FW_STATUS_READY))
16004 return;
16005
16006 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
16007 tg3_flag_set(tp, APE_HAS_NCSI);
16008 }
16009
tg3_read_dash_ver(struct tg3 * tp)16010 static void tg3_read_dash_ver(struct tg3 *tp)
16011 {
16012 int vlen;
16013 u32 apedata;
16014 char *fwtype;
16015
16016 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16017
16018 if (tg3_flag(tp, APE_HAS_NCSI))
16019 fwtype = "NCSI";
16020 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16021 fwtype = "SMASH";
16022 else
16023 fwtype = "DASH";
16024
16025 vlen = strlen(tp->fw_ver);
16026
16027 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16028 fwtype,
16029 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16030 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16031 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16032 (apedata & APE_FW_VERSION_BLDMSK));
16033 }
16034
tg3_read_otp_ver(struct tg3 * tp)16035 static void tg3_read_otp_ver(struct tg3 *tp)
16036 {
16037 u32 val, val2;
16038
16039 if (tg3_asic_rev(tp) != ASIC_REV_5762)
16040 return;
16041
16042 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16043 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16044 TG3_OTP_MAGIC0_VALID(val)) {
16045 u64 val64 = (u64) val << 32 | val2;
16046 u32 ver = 0;
16047 int i, vlen;
16048
16049 for (i = 0; i < 7; i++) {
16050 if ((val64 & 0xff) == 0)
16051 break;
16052 ver = val64 & 0xff;
16053 val64 >>= 8;
16054 }
16055 vlen = strlen(tp->fw_ver);
16056 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16057 }
16058 }
16059
tg3_read_fw_ver(struct tg3 * tp)16060 static void tg3_read_fw_ver(struct tg3 *tp)
16061 {
16062 u32 val;
16063 bool vpd_vers = false;
16064
16065 if (tp->fw_ver[0] != 0)
16066 vpd_vers = true;
16067
16068 if (tg3_flag(tp, NO_NVRAM)) {
16069 strcat(tp->fw_ver, "sb");
16070 tg3_read_otp_ver(tp);
16071 return;
16072 }
16073
16074 if (tg3_nvram_read(tp, 0, &val))
16075 return;
16076
16077 if (val == TG3_EEPROM_MAGIC)
16078 tg3_read_bc_ver(tp);
16079 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16080 tg3_read_sb_ver(tp, val);
16081 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16082 tg3_read_hwsb_ver(tp);
16083
16084 if (tg3_flag(tp, ENABLE_ASF)) {
16085 if (tg3_flag(tp, ENABLE_APE)) {
16086 tg3_probe_ncsi(tp);
16087 if (!vpd_vers)
16088 tg3_read_dash_ver(tp);
16089 } else if (!vpd_vers) {
16090 tg3_read_mgmtfw_ver(tp);
16091 }
16092 }
16093
16094 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16095 }
16096
tg3_rx_ret_ring_size(struct tg3 * tp)16097 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16098 {
16099 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16100 return TG3_RX_RET_MAX_SIZE_5717;
16101 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16102 return TG3_RX_RET_MAX_SIZE_5700;
16103 else
16104 return TG3_RX_RET_MAX_SIZE_5705;
16105 }
16106
16107 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16108 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16109 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16110 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16111 { },
16112 };
16113
tg3_find_peer(struct tg3 * tp)16114 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16115 {
16116 struct pci_dev *peer;
16117 unsigned int func, devnr = tp->pdev->devfn & ~7;
16118
16119 for (func = 0; func < 8; func++) {
16120 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16121 if (peer && peer != tp->pdev)
16122 break;
16123 pci_dev_put(peer);
16124 }
16125 /* 5704 can be configured in single-port mode, set peer to
16126 * tp->pdev in that case.
16127 */
16128 if (!peer) {
16129 peer = tp->pdev;
16130 return peer;
16131 }
16132
16133 /*
16134 * We don't need to keep the refcount elevated; there's no way
16135 * to remove one half of this device without removing the other
16136 */
16137 pci_dev_put(peer);
16138
16139 return peer;
16140 }
16141
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16142 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16143 {
16144 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16145 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16146 u32 reg;
16147
16148 /* All devices that use the alternate
16149 * ASIC REV location have a CPMU.
16150 */
16151 tg3_flag_set(tp, CPMU_PRESENT);
16152
16153 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16154 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16155 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16159 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16160 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16161 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16162 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16163 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16164 reg = TG3PCI_GEN2_PRODID_ASICREV;
16165 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16166 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16167 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16168 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16169 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16170 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16171 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16172 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16173 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16174 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16175 reg = TG3PCI_GEN15_PRODID_ASICREV;
16176 else
16177 reg = TG3PCI_PRODID_ASICREV;
16178
16179 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16180 }
16181
16182 /* Wrong chip ID in 5752 A0. This code can be removed later
16183 * as A0 is not in production.
16184 */
16185 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16186 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16187
16188 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16189 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16190
16191 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16192 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16193 tg3_asic_rev(tp) == ASIC_REV_5720)
16194 tg3_flag_set(tp, 5717_PLUS);
16195
16196 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16197 tg3_asic_rev(tp) == ASIC_REV_57766)
16198 tg3_flag_set(tp, 57765_CLASS);
16199
16200 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16201 tg3_asic_rev(tp) == ASIC_REV_5762)
16202 tg3_flag_set(tp, 57765_PLUS);
16203
16204 /* Intentionally exclude ASIC_REV_5906 */
16205 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16206 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16207 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16208 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16209 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16210 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16211 tg3_flag(tp, 57765_PLUS))
16212 tg3_flag_set(tp, 5755_PLUS);
16213
16214 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16215 tg3_asic_rev(tp) == ASIC_REV_5714)
16216 tg3_flag_set(tp, 5780_CLASS);
16217
16218 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16219 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16220 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16221 tg3_flag(tp, 5755_PLUS) ||
16222 tg3_flag(tp, 5780_CLASS))
16223 tg3_flag_set(tp, 5750_PLUS);
16224
16225 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16226 tg3_flag(tp, 5750_PLUS))
16227 tg3_flag_set(tp, 5705_PLUS);
16228 }
16229
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16230 static bool tg3_10_100_only_device(struct tg3 *tp,
16231 const struct pci_device_id *ent)
16232 {
16233 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16234
16235 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16236 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16237 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16238 return true;
16239
16240 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16241 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16242 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16243 return true;
16244 } else {
16245 return true;
16246 }
16247 }
16248
16249 return false;
16250 }
16251
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16252 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16253 {
16254 u32 misc_ctrl_reg;
16255 u32 pci_state_reg, grc_misc_cfg;
16256 u32 val;
16257 u16 pci_cmd;
16258 int err;
16259
16260 /* Force memory write invalidate off. If we leave it on,
16261 * then on 5700_BX chips we have to enable a workaround.
16262 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16263 * to match the cacheline size. The Broadcom driver have this
16264 * workaround but turns MWI off all the times so never uses
16265 * it. This seems to suggest that the workaround is insufficient.
16266 */
16267 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16268 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16269 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16270
16271 /* Important! -- Make sure register accesses are byteswapped
16272 * correctly. Also, for those chips that require it, make
16273 * sure that indirect register accesses are enabled before
16274 * the first operation.
16275 */
16276 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16277 &misc_ctrl_reg);
16278 tp->misc_host_ctrl |= (misc_ctrl_reg &
16279 MISC_HOST_CTRL_CHIPREV);
16280 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16281 tp->misc_host_ctrl);
16282
16283 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16284
16285 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16286 * we need to disable memory and use config. cycles
16287 * only to access all registers. The 5702/03 chips
16288 * can mistakenly decode the special cycles from the
16289 * ICH chipsets as memory write cycles, causing corruption
16290 * of register and memory space. Only certain ICH bridges
16291 * will drive special cycles with non-zero data during the
16292 * address phase which can fall within the 5703's address
16293 * range. This is not an ICH bug as the PCI spec allows
16294 * non-zero address during special cycles. However, only
16295 * these ICH bridges are known to drive non-zero addresses
16296 * during special cycles.
16297 *
16298 * Since special cycles do not cross PCI bridges, we only
16299 * enable this workaround if the 5703 is on the secondary
16300 * bus of these ICH bridges.
16301 */
16302 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16303 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16304 static struct tg3_dev_id {
16305 u32 vendor;
16306 u32 device;
16307 u32 rev;
16308 } ich_chipsets[] = {
16309 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16310 PCI_ANY_ID },
16311 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16312 PCI_ANY_ID },
16313 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16314 0xa },
16315 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16316 PCI_ANY_ID },
16317 { },
16318 };
16319 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16320 struct pci_dev *bridge = NULL;
16321
16322 while (pci_id->vendor != 0) {
16323 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16324 bridge);
16325 if (!bridge) {
16326 pci_id++;
16327 continue;
16328 }
16329 if (pci_id->rev != PCI_ANY_ID) {
16330 if (bridge->revision > pci_id->rev)
16331 continue;
16332 }
16333 if (bridge->subordinate &&
16334 (bridge->subordinate->number ==
16335 tp->pdev->bus->number)) {
16336 tg3_flag_set(tp, ICH_WORKAROUND);
16337 pci_dev_put(bridge);
16338 break;
16339 }
16340 }
16341 }
16342
16343 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16344 static struct tg3_dev_id {
16345 u32 vendor;
16346 u32 device;
16347 } bridge_chipsets[] = {
16348 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16349 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16350 { },
16351 };
16352 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16353 struct pci_dev *bridge = NULL;
16354
16355 while (pci_id->vendor != 0) {
16356 bridge = pci_get_device(pci_id->vendor,
16357 pci_id->device,
16358 bridge);
16359 if (!bridge) {
16360 pci_id++;
16361 continue;
16362 }
16363 if (bridge->subordinate &&
16364 (bridge->subordinate->number <=
16365 tp->pdev->bus->number) &&
16366 (bridge->subordinate->busn_res.end >=
16367 tp->pdev->bus->number)) {
16368 tg3_flag_set(tp, 5701_DMA_BUG);
16369 pci_dev_put(bridge);
16370 break;
16371 }
16372 }
16373 }
16374
16375 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16376 * DMA addresses > 40-bit. This bridge may have other additional
16377 * 57xx devices behind it in some 4-port NIC designs for example.
16378 * Any tg3 device found behind the bridge will also need the 40-bit
16379 * DMA workaround.
16380 */
16381 if (tg3_flag(tp, 5780_CLASS)) {
16382 tg3_flag_set(tp, 40BIT_DMA_BUG);
16383 tp->msi_cap = tp->pdev->msi_cap;
16384 } else {
16385 struct pci_dev *bridge = NULL;
16386
16387 do {
16388 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16389 PCI_DEVICE_ID_SERVERWORKS_EPB,
16390 bridge);
16391 if (bridge && bridge->subordinate &&
16392 (bridge->subordinate->number <=
16393 tp->pdev->bus->number) &&
16394 (bridge->subordinate->busn_res.end >=
16395 tp->pdev->bus->number)) {
16396 tg3_flag_set(tp, 40BIT_DMA_BUG);
16397 pci_dev_put(bridge);
16398 break;
16399 }
16400 } while (bridge);
16401 }
16402
16403 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16404 tg3_asic_rev(tp) == ASIC_REV_5714)
16405 tp->pdev_peer = tg3_find_peer(tp);
16406
16407 /* Determine TSO capabilities */
16408 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16409 ; /* Do nothing. HW bug. */
16410 else if (tg3_flag(tp, 57765_PLUS))
16411 tg3_flag_set(tp, HW_TSO_3);
16412 else if (tg3_flag(tp, 5755_PLUS) ||
16413 tg3_asic_rev(tp) == ASIC_REV_5906)
16414 tg3_flag_set(tp, HW_TSO_2);
16415 else if (tg3_flag(tp, 5750_PLUS)) {
16416 tg3_flag_set(tp, HW_TSO_1);
16417 tg3_flag_set(tp, TSO_BUG);
16418 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16419 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16420 tg3_flag_clear(tp, TSO_BUG);
16421 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16422 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16423 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16424 tg3_flag_set(tp, FW_TSO);
16425 tg3_flag_set(tp, TSO_BUG);
16426 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16427 tp->fw_needed = FIRMWARE_TG3TSO5;
16428 else
16429 tp->fw_needed = FIRMWARE_TG3TSO;
16430 }
16431
16432 /* Selectively allow TSO based on operating conditions */
16433 if (tg3_flag(tp, HW_TSO_1) ||
16434 tg3_flag(tp, HW_TSO_2) ||
16435 tg3_flag(tp, HW_TSO_3) ||
16436 tg3_flag(tp, FW_TSO)) {
16437 /* For firmware TSO, assume ASF is disabled.
16438 * We'll disable TSO later if we discover ASF
16439 * is enabled in tg3_get_eeprom_hw_cfg().
16440 */
16441 tg3_flag_set(tp, TSO_CAPABLE);
16442 } else {
16443 tg3_flag_clear(tp, TSO_CAPABLE);
16444 tg3_flag_clear(tp, TSO_BUG);
16445 tp->fw_needed = NULL;
16446 }
16447
16448 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16449 tp->fw_needed = FIRMWARE_TG3;
16450
16451 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16452 tp->fw_needed = FIRMWARE_TG357766;
16453
16454 tp->irq_max = 1;
16455
16456 if (tg3_flag(tp, 5750_PLUS)) {
16457 tg3_flag_set(tp, SUPPORT_MSI);
16458 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16459 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16460 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16461 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16462 tp->pdev_peer == tp->pdev))
16463 tg3_flag_clear(tp, SUPPORT_MSI);
16464
16465 if (tg3_flag(tp, 5755_PLUS) ||
16466 tg3_asic_rev(tp) == ASIC_REV_5906) {
16467 tg3_flag_set(tp, 1SHOT_MSI);
16468 }
16469
16470 if (tg3_flag(tp, 57765_PLUS)) {
16471 tg3_flag_set(tp, SUPPORT_MSIX);
16472 tp->irq_max = TG3_IRQ_MAX_VECS;
16473 }
16474 }
16475
16476 tp->txq_max = 1;
16477 tp->rxq_max = 1;
16478 if (tp->irq_max > 1) {
16479 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16480 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16481
16482 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16483 tg3_asic_rev(tp) == ASIC_REV_5720)
16484 tp->txq_max = tp->irq_max - 1;
16485 }
16486
16487 if (tg3_flag(tp, 5755_PLUS) ||
16488 tg3_asic_rev(tp) == ASIC_REV_5906)
16489 tg3_flag_set(tp, SHORT_DMA_BUG);
16490
16491 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16492 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16493
16494 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16495 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16496 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16497 tg3_asic_rev(tp) == ASIC_REV_5762)
16498 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16499
16500 if (tg3_flag(tp, 57765_PLUS) &&
16501 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16502 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16503
16504 if (!tg3_flag(tp, 5705_PLUS) ||
16505 tg3_flag(tp, 5780_CLASS) ||
16506 tg3_flag(tp, USE_JUMBO_BDFLAG))
16507 tg3_flag_set(tp, JUMBO_CAPABLE);
16508
16509 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16510 &pci_state_reg);
16511
16512 if (pci_is_pcie(tp->pdev)) {
16513 u16 lnkctl;
16514
16515 tg3_flag_set(tp, PCI_EXPRESS);
16516
16517 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16518 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16519 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16520 tg3_flag_clear(tp, HW_TSO_2);
16521 tg3_flag_clear(tp, TSO_CAPABLE);
16522 }
16523 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16524 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16525 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16526 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16527 tg3_flag_set(tp, CLKREQ_BUG);
16528 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16529 tg3_flag_set(tp, L1PLLPD_EN);
16530 }
16531 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16532 /* BCM5785 devices are effectively PCIe devices, and should
16533 * follow PCIe codepaths, but do not have a PCIe capabilities
16534 * section.
16535 */
16536 tg3_flag_set(tp, PCI_EXPRESS);
16537 } else if (!tg3_flag(tp, 5705_PLUS) ||
16538 tg3_flag(tp, 5780_CLASS)) {
16539 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16540 if (!tp->pcix_cap) {
16541 dev_err(&tp->pdev->dev,
16542 "Cannot find PCI-X capability, aborting\n");
16543 return -EIO;
16544 }
16545
16546 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16547 tg3_flag_set(tp, PCIX_MODE);
16548 }
16549
16550 /* If we have an AMD 762 or VIA K8T800 chipset, write
16551 * reordering to the mailbox registers done by the host
16552 * controller can cause major troubles. We read back from
16553 * every mailbox register write to force the writes to be
16554 * posted to the chip in order.
16555 */
16556 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16557 !tg3_flag(tp, PCI_EXPRESS))
16558 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16559
16560 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16561 &tp->pci_cacheline_sz);
16562 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16563 &tp->pci_lat_timer);
16564 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16565 tp->pci_lat_timer < 64) {
16566 tp->pci_lat_timer = 64;
16567 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16568 tp->pci_lat_timer);
16569 }
16570
16571 /* Important! -- It is critical that the PCI-X hw workaround
16572 * situation is decided before the first MMIO register access.
16573 */
16574 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16575 /* 5700 BX chips need to have their TX producer index
16576 * mailboxes written twice to workaround a bug.
16577 */
16578 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16579
16580 /* If we are in PCI-X mode, enable register write workaround.
16581 *
16582 * The workaround is to use indirect register accesses
16583 * for all chip writes not to mailbox registers.
16584 */
16585 if (tg3_flag(tp, PCIX_MODE)) {
16586 u32 pm_reg;
16587
16588 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16589
16590 /* The chip can have its power management PCI config
16591 * space registers clobbered due to this bug.
16592 * So explicitly force the chip into D0 here.
16593 */
16594 pci_read_config_dword(tp->pdev,
16595 tp->pdev->pm_cap + PCI_PM_CTRL,
16596 &pm_reg);
16597 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16598 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16599 pci_write_config_dword(tp->pdev,
16600 tp->pdev->pm_cap + PCI_PM_CTRL,
16601 pm_reg);
16602
16603 /* Also, force SERR#/PERR# in PCI command. */
16604 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16605 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16606 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16607 }
16608 }
16609
16610 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16611 tg3_flag_set(tp, PCI_HIGH_SPEED);
16612 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16613 tg3_flag_set(tp, PCI_32BIT);
16614
16615 /* Chip-specific fixup from Broadcom driver */
16616 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16617 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16618 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16619 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16620 }
16621
16622 /* Default fast path register access methods */
16623 tp->read32 = tg3_read32;
16624 tp->write32 = tg3_write32;
16625 tp->read32_mbox = tg3_read32;
16626 tp->write32_mbox = tg3_write32;
16627 tp->write32_tx_mbox = tg3_write32;
16628 tp->write32_rx_mbox = tg3_write32;
16629
16630 /* Various workaround register access methods */
16631 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16632 tp->write32 = tg3_write_indirect_reg32;
16633 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16634 (tg3_flag(tp, PCI_EXPRESS) &&
16635 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16636 /*
16637 * Back to back register writes can cause problems on these
16638 * chips, the workaround is to read back all reg writes
16639 * except those to mailbox regs.
16640 *
16641 * See tg3_write_indirect_reg32().
16642 */
16643 tp->write32 = tg3_write_flush_reg32;
16644 }
16645
16646 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16647 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16648 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16649 tp->write32_rx_mbox = tg3_write_flush_reg32;
16650 }
16651
16652 if (tg3_flag(tp, ICH_WORKAROUND)) {
16653 tp->read32 = tg3_read_indirect_reg32;
16654 tp->write32 = tg3_write_indirect_reg32;
16655 tp->read32_mbox = tg3_read_indirect_mbox;
16656 tp->write32_mbox = tg3_write_indirect_mbox;
16657 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16658 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16659
16660 iounmap(tp->regs);
16661 tp->regs = NULL;
16662
16663 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16664 pci_cmd &= ~PCI_COMMAND_MEMORY;
16665 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16666 }
16667 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16668 tp->read32_mbox = tg3_read32_mbox_5906;
16669 tp->write32_mbox = tg3_write32_mbox_5906;
16670 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16671 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16672 }
16673
16674 if (tp->write32 == tg3_write_indirect_reg32 ||
16675 (tg3_flag(tp, PCIX_MODE) &&
16676 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16677 tg3_asic_rev(tp) == ASIC_REV_5701)))
16678 tg3_flag_set(tp, SRAM_USE_CONFIG);
16679
16680 /* The memory arbiter has to be enabled in order for SRAM accesses
16681 * to succeed. Normally on powerup the tg3 chip firmware will make
16682 * sure it is enabled, but other entities such as system netboot
16683 * code might disable it.
16684 */
16685 val = tr32(MEMARB_MODE);
16686 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16687
16688 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16689 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16690 tg3_flag(tp, 5780_CLASS)) {
16691 if (tg3_flag(tp, PCIX_MODE)) {
16692 pci_read_config_dword(tp->pdev,
16693 tp->pcix_cap + PCI_X_STATUS,
16694 &val);
16695 tp->pci_fn = val & 0x7;
16696 }
16697 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16698 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16699 tg3_asic_rev(tp) == ASIC_REV_5720) {
16700 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16701 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16702 val = tr32(TG3_CPMU_STATUS);
16703
16704 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16705 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16706 else
16707 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16708 TG3_CPMU_STATUS_FSHFT_5719;
16709 }
16710
16711 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16712 tp->write32_tx_mbox = tg3_write_flush_reg32;
16713 tp->write32_rx_mbox = tg3_write_flush_reg32;
16714 }
16715
16716 /* Get eeprom hw config before calling tg3_set_power_state().
16717 * In particular, the TG3_FLAG_IS_NIC flag must be
16718 * determined before calling tg3_set_power_state() so that
16719 * we know whether or not to switch out of Vaux power.
16720 * When the flag is set, it means that GPIO1 is used for eeprom
16721 * write protect and also implies that it is a LOM where GPIOs
16722 * are not used to switch power.
16723 */
16724 tg3_get_eeprom_hw_cfg(tp);
16725
16726 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16727 tg3_flag_clear(tp, TSO_CAPABLE);
16728 tg3_flag_clear(tp, TSO_BUG);
16729 tp->fw_needed = NULL;
16730 }
16731
16732 if (tg3_flag(tp, ENABLE_APE)) {
16733 /* Allow reads and writes to the
16734 * APE register and memory space.
16735 */
16736 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16737 PCISTATE_ALLOW_APE_SHMEM_WR |
16738 PCISTATE_ALLOW_APE_PSPACE_WR;
16739 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16740 pci_state_reg);
16741
16742 tg3_ape_lock_init(tp);
16743 tp->ape_hb_interval =
16744 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16745 }
16746
16747 /* Set up tp->grc_local_ctrl before calling
16748 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16749 * will bring 5700's external PHY out of reset.
16750 * It is also used as eeprom write protect on LOMs.
16751 */
16752 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16753 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16754 tg3_flag(tp, EEPROM_WRITE_PROT))
16755 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16756 GRC_LCLCTRL_GPIO_OUTPUT1);
16757 /* Unused GPIO3 must be driven as output on 5752 because there
16758 * are no pull-up resistors on unused GPIO pins.
16759 */
16760 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16761 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16762
16763 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16764 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16765 tg3_flag(tp, 57765_CLASS))
16766 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16767
16768 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16769 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16770 /* Turn off the debug UART. */
16771 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16772 if (tg3_flag(tp, IS_NIC))
16773 /* Keep VMain power. */
16774 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16775 GRC_LCLCTRL_GPIO_OUTPUT0;
16776 }
16777
16778 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16779 tp->grc_local_ctrl |=
16780 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16781
16782 /* Switch out of Vaux if it is a NIC */
16783 tg3_pwrsrc_switch_to_vmain(tp);
16784
16785 /* Derive initial jumbo mode from MTU assigned in
16786 * ether_setup() via the alloc_etherdev() call
16787 */
16788 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16789 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16790
16791 /* Determine WakeOnLan speed to use. */
16792 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16793 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16794 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16795 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16796 tg3_flag_clear(tp, WOL_SPEED_100MB);
16797 } else {
16798 tg3_flag_set(tp, WOL_SPEED_100MB);
16799 }
16800
16801 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16802 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16803
16804 /* A few boards don't want Ethernet@WireSpeed phy feature */
16805 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16806 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16807 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16808 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16809 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16810 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16811 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16812
16813 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16814 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16815 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16816 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16817 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16818
16819 if (tg3_flag(tp, 5705_PLUS) &&
16820 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16821 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16822 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16823 !tg3_flag(tp, 57765_PLUS)) {
16824 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16825 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16826 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16827 tg3_asic_rev(tp) == ASIC_REV_5761) {
16828 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16829 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16830 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16831 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16832 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16833 } else
16834 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16835 }
16836
16837 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16838 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16839 tp->phy_otp = tg3_read_otp_phycfg(tp);
16840 if (tp->phy_otp == 0)
16841 tp->phy_otp = TG3_OTP_DEFAULT;
16842 }
16843
16844 if (tg3_flag(tp, CPMU_PRESENT))
16845 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16846 else
16847 tp->mi_mode = MAC_MI_MODE_BASE;
16848
16849 tp->coalesce_mode = 0;
16850 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16851 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16852 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16853
16854 /* Set these bits to enable statistics workaround. */
16855 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16856 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16857 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16858 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16859 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16860 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16861 }
16862
16863 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16864 tg3_asic_rev(tp) == ASIC_REV_57780)
16865 tg3_flag_set(tp, USE_PHYLIB);
16866
16867 err = tg3_mdio_init(tp);
16868 if (err)
16869 return err;
16870
16871 /* Initialize data/descriptor byte/word swapping. */
16872 val = tr32(GRC_MODE);
16873 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16874 tg3_asic_rev(tp) == ASIC_REV_5762)
16875 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16876 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16877 GRC_MODE_B2HRX_ENABLE |
16878 GRC_MODE_HTX2B_ENABLE |
16879 GRC_MODE_HOST_STACKUP);
16880 else
16881 val &= GRC_MODE_HOST_STACKUP;
16882
16883 tw32(GRC_MODE, val | tp->grc_mode);
16884
16885 tg3_switch_clocks(tp);
16886
16887 /* Clear this out for sanity. */
16888 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16889
16890 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16891 tw32(TG3PCI_REG_BASE_ADDR, 0);
16892
16893 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16894 &pci_state_reg);
16895 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16896 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16897 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16898 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16899 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16900 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16901 void __iomem *sram_base;
16902
16903 /* Write some dummy words into the SRAM status block
16904 * area, see if it reads back correctly. If the return
16905 * value is bad, force enable the PCIX workaround.
16906 */
16907 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16908
16909 writel(0x00000000, sram_base);
16910 writel(0x00000000, sram_base + 4);
16911 writel(0xffffffff, sram_base + 4);
16912 if (readl(sram_base) != 0x00000000)
16913 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16914 }
16915 }
16916
16917 udelay(50);
16918 tg3_nvram_init(tp);
16919
16920 /* If the device has an NVRAM, no need to load patch firmware */
16921 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16922 !tg3_flag(tp, NO_NVRAM))
16923 tp->fw_needed = NULL;
16924
16925 grc_misc_cfg = tr32(GRC_MISC_CFG);
16926 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16927
16928 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16929 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16930 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16931 tg3_flag_set(tp, IS_5788);
16932
16933 if (!tg3_flag(tp, IS_5788) &&
16934 tg3_asic_rev(tp) != ASIC_REV_5700)
16935 tg3_flag_set(tp, TAGGED_STATUS);
16936 if (tg3_flag(tp, TAGGED_STATUS)) {
16937 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16938 HOSTCC_MODE_CLRTICK_TXBD);
16939
16940 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16941 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16942 tp->misc_host_ctrl);
16943 }
16944
16945 /* Preserve the APE MAC_MODE bits */
16946 if (tg3_flag(tp, ENABLE_APE))
16947 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16948 else
16949 tp->mac_mode = 0;
16950
16951 if (tg3_10_100_only_device(tp, ent))
16952 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16953
16954 err = tg3_phy_probe(tp);
16955 if (err) {
16956 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16957 /* ... but do not return immediately ... */
16958 tg3_mdio_fini(tp);
16959 }
16960
16961 tg3_read_vpd(tp);
16962 tg3_read_fw_ver(tp);
16963
16964 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16965 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16966 } else {
16967 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16968 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16969 else
16970 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16971 }
16972
16973 /* 5700 {AX,BX} chips have a broken status block link
16974 * change bit implementation, so we must use the
16975 * status register in those cases.
16976 */
16977 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16978 tg3_flag_set(tp, USE_LINKCHG_REG);
16979 else
16980 tg3_flag_clear(tp, USE_LINKCHG_REG);
16981
16982 /* The led_ctrl is set during tg3_phy_probe, here we might
16983 * have to force the link status polling mechanism based
16984 * upon subsystem IDs.
16985 */
16986 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16987 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16988 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16989 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16990 tg3_flag_set(tp, USE_LINKCHG_REG);
16991 }
16992
16993 /* For all SERDES we poll the MAC status register. */
16994 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16995 tg3_flag_set(tp, POLL_SERDES);
16996 else
16997 tg3_flag_clear(tp, POLL_SERDES);
16998
16999 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
17000 tg3_flag_set(tp, POLL_CPMU_LINK);
17001
17002 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
17003 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
17004 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
17005 tg3_flag(tp, PCIX_MODE)) {
17006 tp->rx_offset = NET_SKB_PAD;
17007 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
17008 tp->rx_copy_thresh = ~(u16)0;
17009 #endif
17010 }
17011
17012 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17013 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17014 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17015
17016 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17017
17018 /* Increment the rx prod index on the rx std ring by at most
17019 * 8 for these chips to workaround hw errata.
17020 */
17021 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17022 tg3_asic_rev(tp) == ASIC_REV_5752 ||
17023 tg3_asic_rev(tp) == ASIC_REV_5755)
17024 tp->rx_std_max_post = 8;
17025
17026 if (tg3_flag(tp, ASPM_WORKAROUND))
17027 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17028 PCIE_PWR_MGMT_L1_THRESH_MSK;
17029
17030 return err;
17031 }
17032
tg3_get_device_address(struct tg3 * tp,u8 * addr)17033 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17034 {
17035 u32 hi, lo, mac_offset;
17036 int addr_ok = 0;
17037 int err;
17038
17039 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17040 return 0;
17041
17042 if (tg3_flag(tp, IS_SSB_CORE)) {
17043 err = ssb_gige_get_macaddr(tp->pdev, addr);
17044 if (!err && is_valid_ether_addr(addr))
17045 return 0;
17046 }
17047
17048 mac_offset = 0x7c;
17049 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17050 tg3_flag(tp, 5780_CLASS)) {
17051 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17052 mac_offset = 0xcc;
17053 if (tg3_nvram_lock(tp))
17054 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17055 else
17056 tg3_nvram_unlock(tp);
17057 } else if (tg3_flag(tp, 5717_PLUS)) {
17058 if (tp->pci_fn & 1)
17059 mac_offset = 0xcc;
17060 if (tp->pci_fn > 1)
17061 mac_offset += 0x18c;
17062 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17063 mac_offset = 0x10;
17064
17065 /* First try to get it from MAC address mailbox. */
17066 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17067 if ((hi >> 16) == 0x484b) {
17068 addr[0] = (hi >> 8) & 0xff;
17069 addr[1] = (hi >> 0) & 0xff;
17070
17071 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17072 addr[2] = (lo >> 24) & 0xff;
17073 addr[3] = (lo >> 16) & 0xff;
17074 addr[4] = (lo >> 8) & 0xff;
17075 addr[5] = (lo >> 0) & 0xff;
17076
17077 /* Some old bootcode may report a 0 MAC address in SRAM */
17078 addr_ok = is_valid_ether_addr(addr);
17079 }
17080 if (!addr_ok) {
17081 __be32 be_hi, be_lo;
17082
17083 /* Next, try NVRAM. */
17084 if (!tg3_flag(tp, NO_NVRAM) &&
17085 !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
17086 !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
17087 memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
17088 memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
17089 }
17090 /* Finally just fetch it out of the MAC control regs. */
17091 else {
17092 hi = tr32(MAC_ADDR_0_HIGH);
17093 lo = tr32(MAC_ADDR_0_LOW);
17094
17095 addr[5] = lo & 0xff;
17096 addr[4] = (lo >> 8) & 0xff;
17097 addr[3] = (lo >> 16) & 0xff;
17098 addr[2] = (lo >> 24) & 0xff;
17099 addr[1] = hi & 0xff;
17100 addr[0] = (hi >> 8) & 0xff;
17101 }
17102 }
17103
17104 if (!is_valid_ether_addr(addr))
17105 return -EINVAL;
17106 return 0;
17107 }
17108
17109 #define BOUNDARY_SINGLE_CACHELINE 1
17110 #define BOUNDARY_MULTI_CACHELINE 2
17111
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17112 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17113 {
17114 int cacheline_size;
17115 u8 byte;
17116 int goal;
17117
17118 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17119 if (byte == 0)
17120 cacheline_size = 1024;
17121 else
17122 cacheline_size = (int) byte * 4;
17123
17124 /* On 5703 and later chips, the boundary bits have no
17125 * effect.
17126 */
17127 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17128 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17129 !tg3_flag(tp, PCI_EXPRESS))
17130 goto out;
17131
17132 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17133 goal = BOUNDARY_MULTI_CACHELINE;
17134 #else
17135 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17136 goal = BOUNDARY_SINGLE_CACHELINE;
17137 #else
17138 goal = 0;
17139 #endif
17140 #endif
17141
17142 if (tg3_flag(tp, 57765_PLUS)) {
17143 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17144 goto out;
17145 }
17146
17147 if (!goal)
17148 goto out;
17149
17150 /* PCI controllers on most RISC systems tend to disconnect
17151 * when a device tries to burst across a cache-line boundary.
17152 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17153 *
17154 * Unfortunately, for PCI-E there are only limited
17155 * write-side controls for this, and thus for reads
17156 * we will still get the disconnects. We'll also waste
17157 * these PCI cycles for both read and write for chips
17158 * other than 5700 and 5701 which do not implement the
17159 * boundary bits.
17160 */
17161 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17162 switch (cacheline_size) {
17163 case 16:
17164 case 32:
17165 case 64:
17166 case 128:
17167 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17168 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17169 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17170 } else {
17171 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17172 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17173 }
17174 break;
17175
17176 case 256:
17177 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17178 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17179 break;
17180
17181 default:
17182 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17183 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17184 break;
17185 }
17186 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17187 switch (cacheline_size) {
17188 case 16:
17189 case 32:
17190 case 64:
17191 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17192 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17193 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17194 break;
17195 }
17196 fallthrough;
17197 case 128:
17198 default:
17199 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17200 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17201 break;
17202 }
17203 } else {
17204 switch (cacheline_size) {
17205 case 16:
17206 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17207 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17208 DMA_RWCTRL_WRITE_BNDRY_16);
17209 break;
17210 }
17211 fallthrough;
17212 case 32:
17213 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17214 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17215 DMA_RWCTRL_WRITE_BNDRY_32);
17216 break;
17217 }
17218 fallthrough;
17219 case 64:
17220 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17221 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17222 DMA_RWCTRL_WRITE_BNDRY_64);
17223 break;
17224 }
17225 fallthrough;
17226 case 128:
17227 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17228 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17229 DMA_RWCTRL_WRITE_BNDRY_128);
17230 break;
17231 }
17232 fallthrough;
17233 case 256:
17234 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17235 DMA_RWCTRL_WRITE_BNDRY_256);
17236 break;
17237 case 512:
17238 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17239 DMA_RWCTRL_WRITE_BNDRY_512);
17240 break;
17241 case 1024:
17242 default:
17243 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17244 DMA_RWCTRL_WRITE_BNDRY_1024);
17245 break;
17246 }
17247 }
17248
17249 out:
17250 return val;
17251 }
17252
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17253 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17254 int size, bool to_device)
17255 {
17256 struct tg3_internal_buffer_desc test_desc;
17257 u32 sram_dma_descs;
17258 int i, ret;
17259
17260 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17261
17262 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17263 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17264 tw32(RDMAC_STATUS, 0);
17265 tw32(WDMAC_STATUS, 0);
17266
17267 tw32(BUFMGR_MODE, 0);
17268 tw32(FTQ_RESET, 0);
17269
17270 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17271 test_desc.addr_lo = buf_dma & 0xffffffff;
17272 test_desc.nic_mbuf = 0x00002100;
17273 test_desc.len = size;
17274
17275 /*
17276 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17277 * the *second* time the tg3 driver was getting loaded after an
17278 * initial scan.
17279 *
17280 * Broadcom tells me:
17281 * ...the DMA engine is connected to the GRC block and a DMA
17282 * reset may affect the GRC block in some unpredictable way...
17283 * The behavior of resets to individual blocks has not been tested.
17284 *
17285 * Broadcom noted the GRC reset will also reset all sub-components.
17286 */
17287 if (to_device) {
17288 test_desc.cqid_sqid = (13 << 8) | 2;
17289
17290 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17291 udelay(40);
17292 } else {
17293 test_desc.cqid_sqid = (16 << 8) | 7;
17294
17295 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17296 udelay(40);
17297 }
17298 test_desc.flags = 0x00000005;
17299
17300 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17301 u32 val;
17302
17303 val = *(((u32 *)&test_desc) + i);
17304 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17305 sram_dma_descs + (i * sizeof(u32)));
17306 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17307 }
17308 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17309
17310 if (to_device)
17311 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17312 else
17313 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17314
17315 ret = -ENODEV;
17316 for (i = 0; i < 40; i++) {
17317 u32 val;
17318
17319 if (to_device)
17320 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17321 else
17322 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17323 if ((val & 0xffff) == sram_dma_descs) {
17324 ret = 0;
17325 break;
17326 }
17327
17328 udelay(100);
17329 }
17330
17331 return ret;
17332 }
17333
17334 #define TEST_BUFFER_SIZE 0x2000
17335
17336 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17337 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17338 { },
17339 };
17340
tg3_test_dma(struct tg3 * tp)17341 static int tg3_test_dma(struct tg3 *tp)
17342 {
17343 dma_addr_t buf_dma;
17344 u32 *buf, saved_dma_rwctrl;
17345 int ret = 0;
17346
17347 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17348 &buf_dma, GFP_KERNEL);
17349 if (!buf) {
17350 ret = -ENOMEM;
17351 goto out_nofree;
17352 }
17353
17354 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17355 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17356
17357 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17358
17359 if (tg3_flag(tp, 57765_PLUS))
17360 goto out;
17361
17362 if (tg3_flag(tp, PCI_EXPRESS)) {
17363 /* DMA read watermark not used on PCIE */
17364 tp->dma_rwctrl |= 0x00180000;
17365 } else if (!tg3_flag(tp, PCIX_MODE)) {
17366 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17367 tg3_asic_rev(tp) == ASIC_REV_5750)
17368 tp->dma_rwctrl |= 0x003f0000;
17369 else
17370 tp->dma_rwctrl |= 0x003f000f;
17371 } else {
17372 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17373 tg3_asic_rev(tp) == ASIC_REV_5704) {
17374 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17375 u32 read_water = 0x7;
17376
17377 /* If the 5704 is behind the EPB bridge, we can
17378 * do the less restrictive ONE_DMA workaround for
17379 * better performance.
17380 */
17381 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17382 tg3_asic_rev(tp) == ASIC_REV_5704)
17383 tp->dma_rwctrl |= 0x8000;
17384 else if (ccval == 0x6 || ccval == 0x7)
17385 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17386
17387 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17388 read_water = 4;
17389 /* Set bit 23 to enable PCIX hw bug fix */
17390 tp->dma_rwctrl |=
17391 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17392 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17393 (1 << 23);
17394 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17395 /* 5780 always in PCIX mode */
17396 tp->dma_rwctrl |= 0x00144000;
17397 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17398 /* 5714 always in PCIX mode */
17399 tp->dma_rwctrl |= 0x00148000;
17400 } else {
17401 tp->dma_rwctrl |= 0x001b000f;
17402 }
17403 }
17404 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17405 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17406
17407 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17408 tg3_asic_rev(tp) == ASIC_REV_5704)
17409 tp->dma_rwctrl &= 0xfffffff0;
17410
17411 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17412 tg3_asic_rev(tp) == ASIC_REV_5701) {
17413 /* Remove this if it causes problems for some boards. */
17414 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17415
17416 /* On 5700/5701 chips, we need to set this bit.
17417 * Otherwise the chip will issue cacheline transactions
17418 * to streamable DMA memory with not all the byte
17419 * enables turned on. This is an error on several
17420 * RISC PCI controllers, in particular sparc64.
17421 *
17422 * On 5703/5704 chips, this bit has been reassigned
17423 * a different meaning. In particular, it is used
17424 * on those chips to enable a PCI-X workaround.
17425 */
17426 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17427 }
17428
17429 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17430
17431
17432 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17433 tg3_asic_rev(tp) != ASIC_REV_5701)
17434 goto out;
17435
17436 /* It is best to perform DMA test with maximum write burst size
17437 * to expose the 5700/5701 write DMA bug.
17438 */
17439 saved_dma_rwctrl = tp->dma_rwctrl;
17440 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17441 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17442
17443 while (1) {
17444 u32 *p = buf, i;
17445
17446 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17447 p[i] = i;
17448
17449 /* Send the buffer to the chip. */
17450 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17451 if (ret) {
17452 dev_err(&tp->pdev->dev,
17453 "%s: Buffer write failed. err = %d\n",
17454 __func__, ret);
17455 break;
17456 }
17457
17458 /* Now read it back. */
17459 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17460 if (ret) {
17461 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17462 "err = %d\n", __func__, ret);
17463 break;
17464 }
17465
17466 /* Verify it. */
17467 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17468 if (p[i] == i)
17469 continue;
17470
17471 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17472 DMA_RWCTRL_WRITE_BNDRY_16) {
17473 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17474 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17475 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17476 break;
17477 } else {
17478 dev_err(&tp->pdev->dev,
17479 "%s: Buffer corrupted on read back! "
17480 "(%d != %d)\n", __func__, p[i], i);
17481 ret = -ENODEV;
17482 goto out;
17483 }
17484 }
17485
17486 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17487 /* Success. */
17488 ret = 0;
17489 break;
17490 }
17491 }
17492 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17493 DMA_RWCTRL_WRITE_BNDRY_16) {
17494 /* DMA test passed without adjusting DMA boundary,
17495 * now look for chipsets that are known to expose the
17496 * DMA bug without failing the test.
17497 */
17498 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17499 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17500 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17501 } else {
17502 /* Safe to use the calculated DMA boundary. */
17503 tp->dma_rwctrl = saved_dma_rwctrl;
17504 }
17505
17506 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17507 }
17508
17509 out:
17510 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17511 out_nofree:
17512 return ret;
17513 }
17514
tg3_init_bufmgr_config(struct tg3 * tp)17515 static void tg3_init_bufmgr_config(struct tg3 *tp)
17516 {
17517 if (tg3_flag(tp, 57765_PLUS)) {
17518 tp->bufmgr_config.mbuf_read_dma_low_water =
17519 DEFAULT_MB_RDMA_LOW_WATER_5705;
17520 tp->bufmgr_config.mbuf_mac_rx_low_water =
17521 DEFAULT_MB_MACRX_LOW_WATER_57765;
17522 tp->bufmgr_config.mbuf_high_water =
17523 DEFAULT_MB_HIGH_WATER_57765;
17524
17525 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17526 DEFAULT_MB_RDMA_LOW_WATER_5705;
17527 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17528 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17529 tp->bufmgr_config.mbuf_high_water_jumbo =
17530 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17531 } else if (tg3_flag(tp, 5705_PLUS)) {
17532 tp->bufmgr_config.mbuf_read_dma_low_water =
17533 DEFAULT_MB_RDMA_LOW_WATER_5705;
17534 tp->bufmgr_config.mbuf_mac_rx_low_water =
17535 DEFAULT_MB_MACRX_LOW_WATER_5705;
17536 tp->bufmgr_config.mbuf_high_water =
17537 DEFAULT_MB_HIGH_WATER_5705;
17538 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17539 tp->bufmgr_config.mbuf_mac_rx_low_water =
17540 DEFAULT_MB_MACRX_LOW_WATER_5906;
17541 tp->bufmgr_config.mbuf_high_water =
17542 DEFAULT_MB_HIGH_WATER_5906;
17543 }
17544
17545 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17546 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17547 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17548 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17549 tp->bufmgr_config.mbuf_high_water_jumbo =
17550 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17551 } else {
17552 tp->bufmgr_config.mbuf_read_dma_low_water =
17553 DEFAULT_MB_RDMA_LOW_WATER;
17554 tp->bufmgr_config.mbuf_mac_rx_low_water =
17555 DEFAULT_MB_MACRX_LOW_WATER;
17556 tp->bufmgr_config.mbuf_high_water =
17557 DEFAULT_MB_HIGH_WATER;
17558
17559 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17560 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17561 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17562 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17563 tp->bufmgr_config.mbuf_high_water_jumbo =
17564 DEFAULT_MB_HIGH_WATER_JUMBO;
17565 }
17566
17567 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17568 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17569 }
17570
tg3_phy_string(struct tg3 * tp)17571 static char *tg3_phy_string(struct tg3 *tp)
17572 {
17573 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17574 case TG3_PHY_ID_BCM5400: return "5400";
17575 case TG3_PHY_ID_BCM5401: return "5401";
17576 case TG3_PHY_ID_BCM5411: return "5411";
17577 case TG3_PHY_ID_BCM5701: return "5701";
17578 case TG3_PHY_ID_BCM5703: return "5703";
17579 case TG3_PHY_ID_BCM5704: return "5704";
17580 case TG3_PHY_ID_BCM5705: return "5705";
17581 case TG3_PHY_ID_BCM5750: return "5750";
17582 case TG3_PHY_ID_BCM5752: return "5752";
17583 case TG3_PHY_ID_BCM5714: return "5714";
17584 case TG3_PHY_ID_BCM5780: return "5780";
17585 case TG3_PHY_ID_BCM5755: return "5755";
17586 case TG3_PHY_ID_BCM5787: return "5787";
17587 case TG3_PHY_ID_BCM5784: return "5784";
17588 case TG3_PHY_ID_BCM5756: return "5722/5756";
17589 case TG3_PHY_ID_BCM5906: return "5906";
17590 case TG3_PHY_ID_BCM5761: return "5761";
17591 case TG3_PHY_ID_BCM5718C: return "5718C";
17592 case TG3_PHY_ID_BCM5718S: return "5718S";
17593 case TG3_PHY_ID_BCM57765: return "57765";
17594 case TG3_PHY_ID_BCM5719C: return "5719C";
17595 case TG3_PHY_ID_BCM5720C: return "5720C";
17596 case TG3_PHY_ID_BCM5762: return "5762C";
17597 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17598 case 0: return "serdes";
17599 default: return "unknown";
17600 }
17601 }
17602
tg3_bus_string(struct tg3 * tp,char * str)17603 static char *tg3_bus_string(struct tg3 *tp, char *str)
17604 {
17605 if (tg3_flag(tp, PCI_EXPRESS)) {
17606 strcpy(str, "PCI Express");
17607 return str;
17608 } else if (tg3_flag(tp, PCIX_MODE)) {
17609 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17610
17611 strcpy(str, "PCIX:");
17612
17613 if ((clock_ctrl == 7) ||
17614 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17615 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17616 strcat(str, "133MHz");
17617 else if (clock_ctrl == 0)
17618 strcat(str, "33MHz");
17619 else if (clock_ctrl == 2)
17620 strcat(str, "50MHz");
17621 else if (clock_ctrl == 4)
17622 strcat(str, "66MHz");
17623 else if (clock_ctrl == 6)
17624 strcat(str, "100MHz");
17625 } else {
17626 strcpy(str, "PCI:");
17627 if (tg3_flag(tp, PCI_HIGH_SPEED))
17628 strcat(str, "66MHz");
17629 else
17630 strcat(str, "33MHz");
17631 }
17632 if (tg3_flag(tp, PCI_32BIT))
17633 strcat(str, ":32-bit");
17634 else
17635 strcat(str, ":64-bit");
17636 return str;
17637 }
17638
tg3_init_coal(struct tg3 * tp)17639 static void tg3_init_coal(struct tg3 *tp)
17640 {
17641 struct ethtool_coalesce *ec = &tp->coal;
17642
17643 memset(ec, 0, sizeof(*ec));
17644 ec->cmd = ETHTOOL_GCOALESCE;
17645 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17646 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17647 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17648 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17649 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17650 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17651 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17652 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17653 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17654
17655 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17656 HOSTCC_MODE_CLRTICK_TXBD)) {
17657 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17658 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17659 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17660 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17661 }
17662
17663 if (tg3_flag(tp, 5705_PLUS)) {
17664 ec->rx_coalesce_usecs_irq = 0;
17665 ec->tx_coalesce_usecs_irq = 0;
17666 ec->stats_block_coalesce_usecs = 0;
17667 }
17668 }
17669
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17670 static int tg3_init_one(struct pci_dev *pdev,
17671 const struct pci_device_id *ent)
17672 {
17673 struct net_device *dev;
17674 struct tg3 *tp;
17675 int i, err;
17676 u32 sndmbx, rcvmbx, intmbx;
17677 char str[40];
17678 u64 dma_mask, persist_dma_mask;
17679 netdev_features_t features = 0;
17680 u8 addr[ETH_ALEN] __aligned(2);
17681
17682 err = pci_enable_device(pdev);
17683 if (err) {
17684 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17685 return err;
17686 }
17687
17688 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17689 if (err) {
17690 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17691 goto err_out_disable_pdev;
17692 }
17693
17694 pci_set_master(pdev);
17695
17696 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17697 if (!dev) {
17698 err = -ENOMEM;
17699 goto err_out_free_res;
17700 }
17701
17702 SET_NETDEV_DEV(dev, &pdev->dev);
17703
17704 tp = netdev_priv(dev);
17705 tp->pdev = pdev;
17706 tp->dev = dev;
17707 tp->rx_mode = TG3_DEF_RX_MODE;
17708 tp->tx_mode = TG3_DEF_TX_MODE;
17709 tp->irq_sync = 1;
17710 tp->pcierr_recovery = false;
17711
17712 if (tg3_debug > 0)
17713 tp->msg_enable = tg3_debug;
17714 else
17715 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17716
17717 if (pdev_is_ssb_gige_core(pdev)) {
17718 tg3_flag_set(tp, IS_SSB_CORE);
17719 if (ssb_gige_must_flush_posted_writes(pdev))
17720 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17721 if (ssb_gige_one_dma_at_once(pdev))
17722 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17723 if (ssb_gige_have_roboswitch(pdev)) {
17724 tg3_flag_set(tp, USE_PHYLIB);
17725 tg3_flag_set(tp, ROBOSWITCH);
17726 }
17727 if (ssb_gige_is_rgmii(pdev))
17728 tg3_flag_set(tp, RGMII_MODE);
17729 }
17730
17731 /* The word/byte swap controls here control register access byte
17732 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17733 * setting below.
17734 */
17735 tp->misc_host_ctrl =
17736 MISC_HOST_CTRL_MASK_PCI_INT |
17737 MISC_HOST_CTRL_WORD_SWAP |
17738 MISC_HOST_CTRL_INDIR_ACCESS |
17739 MISC_HOST_CTRL_PCISTATE_RW;
17740
17741 /* The NONFRM (non-frame) byte/word swap controls take effect
17742 * on descriptor entries, anything which isn't packet data.
17743 *
17744 * The StrongARM chips on the board (one for tx, one for rx)
17745 * are running in big-endian mode.
17746 */
17747 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17748 GRC_MODE_WSWAP_NONFRM_DATA);
17749 #ifdef __BIG_ENDIAN
17750 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17751 #endif
17752 spin_lock_init(&tp->lock);
17753 spin_lock_init(&tp->indirect_lock);
17754 INIT_WORK(&tp->reset_task, tg3_reset_task);
17755
17756 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17757 if (!tp->regs) {
17758 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17759 err = -ENOMEM;
17760 goto err_out_free_dev;
17761 }
17762
17763 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17764 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17765 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17766 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17767 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17768 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17769 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17770 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17771 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17772 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17773 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17774 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17775 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17776 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17777 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17778 tg3_flag_set(tp, ENABLE_APE);
17779 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17780 if (!tp->aperegs) {
17781 dev_err(&pdev->dev,
17782 "Cannot map APE registers, aborting\n");
17783 err = -ENOMEM;
17784 goto err_out_iounmap;
17785 }
17786 }
17787
17788 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17789 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17790
17791 dev->ethtool_ops = &tg3_ethtool_ops;
17792 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17793 dev->netdev_ops = &tg3_netdev_ops;
17794 dev->irq = pdev->irq;
17795
17796 err = tg3_get_invariants(tp, ent);
17797 if (err) {
17798 dev_err(&pdev->dev,
17799 "Problem fetching invariants of chip, aborting\n");
17800 goto err_out_apeunmap;
17801 }
17802
17803 /* The EPB bridge inside 5714, 5715, and 5780 and any
17804 * device behind the EPB cannot support DMA addresses > 40-bit.
17805 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17806 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17807 * do DMA address check in __tg3_start_xmit().
17808 */
17809 if (tg3_flag(tp, IS_5788))
17810 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17811 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17812 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17813 #ifdef CONFIG_HIGHMEM
17814 dma_mask = DMA_BIT_MASK(64);
17815 #endif
17816 } else
17817 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17818
17819 if (tg3_asic_rev(tp) == ASIC_REV_57766)
17820 persist_dma_mask = DMA_BIT_MASK(31);
17821
17822 /* Configure DMA attributes. */
17823 if (dma_mask > DMA_BIT_MASK(32)) {
17824 err = dma_set_mask(&pdev->dev, dma_mask);
17825 if (!err) {
17826 features |= NETIF_F_HIGHDMA;
17827 err = dma_set_coherent_mask(&pdev->dev,
17828 persist_dma_mask);
17829 if (err < 0) {
17830 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17831 "DMA for consistent allocations\n");
17832 goto err_out_apeunmap;
17833 }
17834 }
17835 }
17836 if (err || dma_mask == DMA_BIT_MASK(32)) {
17837 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17838 if (err) {
17839 dev_err(&pdev->dev,
17840 "No usable DMA configuration, aborting\n");
17841 goto err_out_apeunmap;
17842 }
17843 }
17844
17845 tg3_init_bufmgr_config(tp);
17846
17847 /* 5700 B0 chips do not support checksumming correctly due
17848 * to hardware bugs.
17849 */
17850 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17851 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17852
17853 if (tg3_flag(tp, 5755_PLUS))
17854 features |= NETIF_F_IPV6_CSUM;
17855 }
17856
17857 /* TSO is on by default on chips that support hardware TSO.
17858 * Firmware TSO on older chips gives lower performance, so it
17859 * is off by default, but can be enabled using ethtool.
17860 */
17861 if ((tg3_flag(tp, HW_TSO_1) ||
17862 tg3_flag(tp, HW_TSO_2) ||
17863 tg3_flag(tp, HW_TSO_3)) &&
17864 (features & NETIF_F_IP_CSUM))
17865 features |= NETIF_F_TSO;
17866 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17867 if (features & NETIF_F_IPV6_CSUM)
17868 features |= NETIF_F_TSO6;
17869 if (tg3_flag(tp, HW_TSO_3) ||
17870 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17871 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17872 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17873 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17874 tg3_asic_rev(tp) == ASIC_REV_57780)
17875 features |= NETIF_F_TSO_ECN;
17876 }
17877
17878 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17879 NETIF_F_HW_VLAN_CTAG_RX;
17880 dev->vlan_features |= features;
17881
17882 /*
17883 * Add loopback capability only for a subset of devices that support
17884 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17885 * loopback for the remaining devices.
17886 */
17887 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17888 !tg3_flag(tp, CPMU_PRESENT))
17889 /* Add the loopback capability */
17890 features |= NETIF_F_LOOPBACK;
17891
17892 dev->hw_features |= features;
17893 dev->priv_flags |= IFF_UNICAST_FLT;
17894
17895 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17896 dev->min_mtu = TG3_MIN_MTU;
17897 dev->max_mtu = TG3_MAX_MTU(tp);
17898
17899 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17900 !tg3_flag(tp, TSO_CAPABLE) &&
17901 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17902 tg3_flag_set(tp, MAX_RXPEND_64);
17903 tp->rx_pending = 63;
17904 }
17905
17906 err = tg3_get_device_address(tp, addr);
17907 if (err) {
17908 dev_err(&pdev->dev,
17909 "Could not obtain valid ethernet address, aborting\n");
17910 goto err_out_apeunmap;
17911 }
17912 eth_hw_addr_set(dev, addr);
17913
17914 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17915 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17916 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17917 for (i = 0; i < tp->irq_max; i++) {
17918 struct tg3_napi *tnapi = &tp->napi[i];
17919
17920 tnapi->tp = tp;
17921 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17922
17923 tnapi->int_mbox = intmbx;
17924 intmbx += 0x8;
17925
17926 tnapi->consmbox = rcvmbx;
17927 tnapi->prodmbox = sndmbx;
17928
17929 if (i)
17930 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17931 else
17932 tnapi->coal_now = HOSTCC_MODE_NOW;
17933
17934 if (!tg3_flag(tp, SUPPORT_MSIX))
17935 break;
17936
17937 /*
17938 * If we support MSIX, we'll be using RSS. If we're using
17939 * RSS, the first vector only handles link interrupts and the
17940 * remaining vectors handle rx and tx interrupts. Reuse the
17941 * mailbox values for the next iteration. The values we setup
17942 * above are still useful for the single vectored mode.
17943 */
17944 if (!i)
17945 continue;
17946
17947 rcvmbx += 0x8;
17948
17949 if (sndmbx & 0x4)
17950 sndmbx -= 0x4;
17951 else
17952 sndmbx += 0xc;
17953 }
17954
17955 /*
17956 * Reset chip in case UNDI or EFI driver did not shutdown
17957 * DMA self test will enable WDMAC and we'll see (spurious)
17958 * pending DMA on the PCI bus at that point.
17959 */
17960 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17961 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17962 tg3_full_lock(tp, 0);
17963 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17964 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17965 tg3_full_unlock(tp);
17966 }
17967
17968 err = tg3_test_dma(tp);
17969 if (err) {
17970 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17971 goto err_out_apeunmap;
17972 }
17973
17974 tg3_init_coal(tp);
17975
17976 pci_set_drvdata(pdev, dev);
17977
17978 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17979 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17980 tg3_asic_rev(tp) == ASIC_REV_5762)
17981 tg3_flag_set(tp, PTP_CAPABLE);
17982
17983 tg3_timer_init(tp);
17984
17985 tg3_carrier_off(tp);
17986
17987 err = register_netdev(dev);
17988 if (err) {
17989 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17990 goto err_out_apeunmap;
17991 }
17992
17993 if (tg3_flag(tp, PTP_CAPABLE)) {
17994 tg3_ptp_init(tp);
17995 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17996 &tp->pdev->dev);
17997 if (IS_ERR(tp->ptp_clock))
17998 tp->ptp_clock = NULL;
17999 }
18000
18001 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
18002 tp->board_part_number,
18003 tg3_chip_rev_id(tp),
18004 tg3_bus_string(tp, str),
18005 dev->dev_addr);
18006
18007 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18008 char *ethtype;
18009
18010 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18011 ethtype = "10/100Base-TX";
18012 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18013 ethtype = "1000Base-SX";
18014 else
18015 ethtype = "10/100/1000Base-T";
18016
18017 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18018 "(WireSpeed[%d], EEE[%d])\n",
18019 tg3_phy_string(tp), ethtype,
18020 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18021 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18022 }
18023
18024 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18025 (dev->features & NETIF_F_RXCSUM) != 0,
18026 tg3_flag(tp, USE_LINKCHG_REG) != 0,
18027 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18028 tg3_flag(tp, ENABLE_ASF) != 0,
18029 tg3_flag(tp, TSO_CAPABLE) != 0);
18030 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18031 tp->dma_rwctrl,
18032 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18033 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18034
18035 pci_save_state(pdev);
18036
18037 return 0;
18038
18039 err_out_apeunmap:
18040 if (tp->aperegs) {
18041 iounmap(tp->aperegs);
18042 tp->aperegs = NULL;
18043 }
18044
18045 err_out_iounmap:
18046 if (tp->regs) {
18047 iounmap(tp->regs);
18048 tp->regs = NULL;
18049 }
18050
18051 err_out_free_dev:
18052 free_netdev(dev);
18053
18054 err_out_free_res:
18055 pci_release_regions(pdev);
18056
18057 err_out_disable_pdev:
18058 if (pci_is_enabled(pdev))
18059 pci_disable_device(pdev);
18060 return err;
18061 }
18062
tg3_remove_one(struct pci_dev * pdev)18063 static void tg3_remove_one(struct pci_dev *pdev)
18064 {
18065 struct net_device *dev = pci_get_drvdata(pdev);
18066
18067 if (dev) {
18068 struct tg3 *tp = netdev_priv(dev);
18069
18070 tg3_ptp_fini(tp);
18071
18072 release_firmware(tp->fw);
18073
18074 tg3_reset_task_cancel(tp);
18075
18076 if (tg3_flag(tp, USE_PHYLIB)) {
18077 tg3_phy_fini(tp);
18078 tg3_mdio_fini(tp);
18079 }
18080
18081 unregister_netdev(dev);
18082 if (tp->aperegs) {
18083 iounmap(tp->aperegs);
18084 tp->aperegs = NULL;
18085 }
18086 if (tp->regs) {
18087 iounmap(tp->regs);
18088 tp->regs = NULL;
18089 }
18090 free_netdev(dev);
18091 pci_release_regions(pdev);
18092 pci_disable_device(pdev);
18093 }
18094 }
18095
18096 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18097 static int tg3_suspend(struct device *device)
18098 {
18099 struct net_device *dev = dev_get_drvdata(device);
18100 struct tg3 *tp = netdev_priv(dev);
18101
18102 rtnl_lock();
18103
18104 if (!netif_running(dev))
18105 goto unlock;
18106
18107 tg3_reset_task_cancel(tp);
18108 tg3_phy_stop(tp);
18109 tg3_netif_stop(tp);
18110
18111 tg3_timer_stop(tp);
18112
18113 tg3_full_lock(tp, 1);
18114 tg3_disable_ints(tp);
18115 tg3_full_unlock(tp);
18116
18117 netif_device_detach(dev);
18118
18119 tg3_full_lock(tp, 0);
18120 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18121 tg3_flag_clear(tp, INIT_COMPLETE);
18122 tg3_full_unlock(tp);
18123
18124 tg3_power_down_prepare(tp);
18125
18126 unlock:
18127 rtnl_unlock();
18128 return 0;
18129 }
18130
tg3_resume(struct device * device)18131 static int tg3_resume(struct device *device)
18132 {
18133 struct net_device *dev = dev_get_drvdata(device);
18134 struct tg3 *tp = netdev_priv(dev);
18135 int err = 0;
18136
18137 rtnl_lock();
18138
18139 if (!netif_running(dev))
18140 goto unlock;
18141
18142 netif_device_attach(dev);
18143
18144 netdev_lock(dev);
18145 tg3_full_lock(tp, 0);
18146
18147 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18148
18149 tg3_flag_set(tp, INIT_COMPLETE);
18150 err = tg3_restart_hw(tp,
18151 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18152 if (err)
18153 goto out;
18154
18155 tg3_timer_start(tp);
18156
18157 tg3_netif_start(tp);
18158
18159 out:
18160 tg3_full_unlock(tp);
18161 netdev_unlock(dev);
18162
18163 if (!err)
18164 tg3_phy_start(tp);
18165
18166 unlock:
18167 rtnl_unlock();
18168 return err;
18169 }
18170 #endif /* CONFIG_PM_SLEEP */
18171
18172 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18173
18174 /* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
18175 * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
18176 * be, powered down.
18177 */
18178 static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
18179 {
18180 .matches = {
18181 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18182 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
18183 },
18184 },
18185 {
18186 .matches = {
18187 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18188 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
18189 },
18190 },
18191 {
18192 .matches = {
18193 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18194 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
18195 },
18196 },
18197 {
18198 .matches = {
18199 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18200 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
18201 },
18202 },
18203 {
18204 .matches = {
18205 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18206 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
18207 },
18208 },
18209 {
18210 .matches = {
18211 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18212 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
18213 },
18214 },
18215 {}
18216 };
18217
tg3_shutdown(struct pci_dev * pdev)18218 static void tg3_shutdown(struct pci_dev *pdev)
18219 {
18220 struct net_device *dev = pci_get_drvdata(pdev);
18221 struct tg3 *tp = netdev_priv(dev);
18222
18223 tg3_reset_task_cancel(tp);
18224
18225 rtnl_lock();
18226
18227 netif_device_detach(dev);
18228
18229 if (netif_running(dev))
18230 dev_close(dev);
18231
18232 if (system_state == SYSTEM_POWER_OFF)
18233 tg3_power_down(tp);
18234 else if (system_state == SYSTEM_RESTART &&
18235 dmi_first_match(tg3_restart_aer_quirk_table) &&
18236 pdev->current_state != PCI_D3cold &&
18237 pdev->current_state != PCI_UNKNOWN) {
18238 /* Disable PCIe AER on the tg3 to avoid a fatal
18239 * error during this system restart.
18240 */
18241 pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
18242 PCI_EXP_DEVCTL_CERE |
18243 PCI_EXP_DEVCTL_NFERE |
18244 PCI_EXP_DEVCTL_FERE |
18245 PCI_EXP_DEVCTL_URRE);
18246 }
18247
18248 rtnl_unlock();
18249
18250 pci_disable_device(pdev);
18251 }
18252
18253 /**
18254 * tg3_io_error_detected - called when PCI error is detected
18255 * @pdev: Pointer to PCI device
18256 * @state: The current pci connection state
18257 *
18258 * This function is called after a PCI bus error affecting
18259 * this device has been detected.
18260 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18261 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18262 pci_channel_state_t state)
18263 {
18264 struct net_device *netdev = pci_get_drvdata(pdev);
18265 struct tg3 *tp = netdev_priv(netdev);
18266 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18267
18268 netdev_info(netdev, "PCI I/O error detected\n");
18269
18270 /* Want to make sure that the reset task doesn't run */
18271 tg3_reset_task_cancel(tp);
18272
18273 rtnl_lock();
18274
18275 /* Could be second call or maybe we don't have netdev yet */
18276 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18277 goto done;
18278
18279 /* We needn't recover from permanent error */
18280 if (state == pci_channel_io_frozen)
18281 tp->pcierr_recovery = true;
18282
18283 tg3_phy_stop(tp);
18284
18285 tg3_netif_stop(tp);
18286
18287 tg3_timer_stop(tp);
18288
18289 netif_device_detach(netdev);
18290
18291 /* Clean up software state, even if MMIO is blocked */
18292 tg3_full_lock(tp, 0);
18293 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18294 tg3_full_unlock(tp);
18295
18296 done:
18297 if (state == pci_channel_io_perm_failure) {
18298 if (netdev) {
18299 netdev_lock(netdev);
18300 tg3_napi_enable(tp);
18301 netdev_unlock(netdev);
18302 dev_close(netdev);
18303 }
18304 err = PCI_ERS_RESULT_DISCONNECT;
18305 } else {
18306 pci_disable_device(pdev);
18307 }
18308
18309 rtnl_unlock();
18310
18311 return err;
18312 }
18313
18314 /**
18315 * tg3_io_slot_reset - called after the pci bus has been reset.
18316 * @pdev: Pointer to PCI device
18317 *
18318 * Restart the card from scratch, as if from a cold-boot.
18319 * At this point, the card has experienced a hard reset,
18320 * followed by fixups by BIOS, and has its config space
18321 * set up identically to what it was at cold boot.
18322 */
tg3_io_slot_reset(struct pci_dev * pdev)18323 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18324 {
18325 struct net_device *netdev = pci_get_drvdata(pdev);
18326 struct tg3 *tp = netdev_priv(netdev);
18327 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18328 int err;
18329
18330 rtnl_lock();
18331
18332 if (pci_enable_device(pdev)) {
18333 dev_err(&pdev->dev,
18334 "Cannot re-enable PCI device after reset.\n");
18335 goto done;
18336 }
18337
18338 pci_set_master(pdev);
18339 pci_restore_state(pdev);
18340
18341 if (!netdev || !netif_running(netdev)) {
18342 rc = PCI_ERS_RESULT_RECOVERED;
18343 goto done;
18344 }
18345
18346 err = tg3_power_up(tp);
18347 if (err)
18348 goto done;
18349
18350 rc = PCI_ERS_RESULT_RECOVERED;
18351
18352 done:
18353 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18354 netdev_lock(netdev);
18355 tg3_napi_enable(tp);
18356 netdev_unlock(netdev);
18357 dev_close(netdev);
18358 }
18359 rtnl_unlock();
18360
18361 return rc;
18362 }
18363
18364 /**
18365 * tg3_io_resume - called when traffic can start flowing again.
18366 * @pdev: Pointer to PCI device
18367 *
18368 * This callback is called when the error recovery driver tells
18369 * us that its OK to resume normal operation.
18370 */
tg3_io_resume(struct pci_dev * pdev)18371 static void tg3_io_resume(struct pci_dev *pdev)
18372 {
18373 struct net_device *netdev = pci_get_drvdata(pdev);
18374 struct tg3 *tp = netdev_priv(netdev);
18375 int err;
18376
18377 rtnl_lock();
18378
18379 if (!netdev || !netif_running(netdev))
18380 goto done;
18381
18382 netdev_lock(netdev);
18383 tg3_full_lock(tp, 0);
18384 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18385 tg3_flag_set(tp, INIT_COMPLETE);
18386 err = tg3_restart_hw(tp, true);
18387 if (err) {
18388 tg3_full_unlock(tp);
18389 netdev_unlock(netdev);
18390 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18391 goto done;
18392 }
18393
18394 netif_device_attach(netdev);
18395
18396 tg3_timer_start(tp);
18397
18398 tg3_netif_start(tp);
18399
18400 tg3_full_unlock(tp);
18401 netdev_unlock(netdev);
18402
18403 tg3_phy_start(tp);
18404
18405 done:
18406 tp->pcierr_recovery = false;
18407 rtnl_unlock();
18408 }
18409
18410 static const struct pci_error_handlers tg3_err_handler = {
18411 .error_detected = tg3_io_error_detected,
18412 .slot_reset = tg3_io_slot_reset,
18413 .resume = tg3_io_resume
18414 };
18415
18416 static struct pci_driver tg3_driver = {
18417 .name = DRV_MODULE_NAME,
18418 .id_table = tg3_pci_tbl,
18419 .probe = tg3_init_one,
18420 .remove = tg3_remove_one,
18421 .err_handler = &tg3_err_handler,
18422 .driver.pm = &tg3_pm_ops,
18423 .shutdown = tg3_shutdown,
18424 };
18425
18426 module_pci_driver(tg3_driver);
18427