1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2016 Broadcom Corporation. 8 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 10 * refers to Broadcom Inc. and/or its subsidiaries. 11 * 12 * Firmware is: 13 * Derived from proprietary unpublished source code, 14 * Copyright (C) 2000-2016 Broadcom Corporation. 15 * Copyright (C) 2016-2017 Broadcom Ltd. 16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 17 * refers to Broadcom Inc. and/or its subsidiaries. 18 * 19 * Permission is hereby granted for the distribution of this firmware 20 * data in hexadecimal or equivalent format, provided this copyright 21 * notice is accompanying it. 22 */ 23 24 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/stringify.h> 28 #include <linux/kernel.h> 29 #include <linux/sched/signal.h> 30 #include <linux/types.h> 31 #include <linux/compiler.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/in.h> 35 #include <linux/interrupt.h> 36 #include <linux/ioport.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/ethtool.h> 42 #include <linux/mdio.h> 43 #include <linux/mii.h> 44 #include <linux/phy.h> 45 #include <linux/brcmphy.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/ip.h> 49 #include <linux/tcp.h> 50 #include <linux/workqueue.h> 51 #include <linux/prefetch.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/firmware.h> 54 #include <linux/ssb/ssb_driver_gige.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <linux/crc32poly.h> 58 59 #include <net/checksum.h> 60 #include <net/gso.h> 61 #include <net/ip.h> 62 63 #include <linux/io.h> 64 #include <asm/byteorder.h> 65 #include <linux/uaccess.h> 66 67 #include <uapi/linux/net_tstamp.h> 68 #include <linux/ptp_clock_kernel.h> 69 70 #define BAR_0 0 71 #define BAR_2 2 72 73 #include "tg3.h" 74 75 /* Functions & macros to verify TG3_FLAGS types */ 76 77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 78 { 79 return test_bit(flag, bits); 80 } 81 82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 83 { 84 set_bit(flag, bits); 85 } 86 87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 88 { 89 clear_bit(flag, bits); 90 } 91 92 #define tg3_flag(tp, flag) \ 93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 94 #define tg3_flag_set(tp, flag) \ 95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 96 #define tg3_flag_clear(tp, flag) \ 97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 98 99 #define DRV_MODULE_NAME "tg3" 100 /* DO NOT UPDATE TG3_*_NUM defines */ 101 #define TG3_MAJ_NUM 3 102 #define TG3_MIN_NUM 137 103 104 #define RESET_KIND_SHUTDOWN 0 105 #define RESET_KIND_INIT 1 106 #define RESET_KIND_SUSPEND 2 107 108 #define TG3_DEF_RX_MODE 0 109 #define TG3_DEF_TX_MODE 0 110 #define TG3_DEF_MSG_ENABLE \ 111 (NETIF_MSG_DRV | \ 112 NETIF_MSG_PROBE | \ 113 NETIF_MSG_LINK | \ 114 NETIF_MSG_TIMER | \ 115 NETIF_MSG_IFDOWN | \ 116 NETIF_MSG_IFUP | \ 117 NETIF_MSG_RX_ERR | \ 118 NETIF_MSG_TX_ERR) 119 120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 121 122 /* length of time before we decide the hardware is borked, 123 * and dev->tx_timeout() should be called to fix the problem 124 */ 125 126 #define TG3_TX_TIMEOUT (5 * HZ) 127 128 /* hardware minimum and maximum for a single frame's data payload */ 129 #define TG3_MIN_MTU ETH_ZLEN 130 #define TG3_MAX_MTU(tp) \ 131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 132 133 /* These numbers seem to be hard coded in the NIC firmware somehow. 134 * You can't change the ring sizes, but you can change where you place 135 * them in the NIC onboard memory. 136 */ 137 #define TG3_RX_STD_RING_SIZE(tp) \ 138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 140 #define TG3_DEF_RX_RING_PENDING 200 141 #define TG3_RX_JMB_RING_SIZE(tp) \ 142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 145 146 /* Do not place this n-ring entries value into the tp struct itself, 147 * we really want to expose these constants to GCC so that modulo et 148 * al. operations are done with shifts and masks instead of with 149 * hw multiply/modulo instructions. Another solution would be to 150 * replace things like '% foo' with '& (foo - 1)'. 151 */ 152 153 #define TG3_TX_RING_SIZE 512 154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 155 156 #define TG3_RX_STD_RING_BYTES(tp) \ 157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 158 #define TG3_RX_JMB_RING_BYTES(tp) \ 159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 160 #define TG3_RX_RCB_RING_BYTES(tp) \ 161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 163 TG3_TX_RING_SIZE) 164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 165 166 #define TG3_DMA_BYTE_ENAB 64 167 168 #define TG3_RX_STD_DMA_SZ 1536 169 #define TG3_RX_JMB_DMA_SZ 9046 170 171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 172 173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 175 176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 178 179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 181 182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 183 * that are at least dword aligned when used in PCIX mode. The driver 184 * works around this bug by double copying the packet. This workaround 185 * is built into the normal double copy length check for efficiency. 186 * 187 * However, the double copy is only necessary on those architectures 188 * where unaligned memory accesses are inefficient. For those architectures 189 * where unaligned memory accesses incur little penalty, we can reintegrate 190 * the 5701 in the normal rx path. Doing so saves a device structure 191 * dereference by hardcoding the double copy threshold in place. 192 */ 193 #define TG3_RX_COPY_THRESHOLD 256 194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 196 #else 197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 198 #endif 199 200 #if (NET_IP_ALIGN != 0) 201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 202 #else 203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 204 #endif 205 206 /* minimum number of free TX descriptors required to wake up TX process */ 207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 208 #define TG3_TX_BD_DMA_MAX_2K 2048 209 #define TG3_TX_BD_DMA_MAX_4K 4096 210 211 #define TG3_RAW_IP_ALIGN 2 212 213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 215 216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 218 219 #define FIRMWARE_TG3 "tigon/tg3.bin" 220 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 223 224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 226 MODULE_LICENSE("GPL"); 227 MODULE_FIRMWARE(FIRMWARE_TG3); 228 MODULE_FIRMWARE(FIRMWARE_TG357766); 229 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 231 232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 233 module_param(tg3_debug, int, 0); 234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 235 236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 238 239 static const struct pci_device_id tg3_pci_tbl[] = { 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 260 TG3_DRV_DATA_FLAG_5705_10_100}, 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 263 TG3_DRV_DATA_FLAG_5705_10_100}, 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 267 TG3_DRV_DATA_FLAG_5705_10_100}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 289 PCI_VENDOR_ID_LENOVO, 290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 355 {} 356 }; 357 358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 359 360 static const struct { 361 const char string[ETH_GSTRING_LEN]; 362 } ethtool_stats_keys[] = { 363 { "rx_octets" }, 364 { "rx_fragments" }, 365 { "rx_ucast_packets" }, 366 { "rx_mcast_packets" }, 367 { "rx_bcast_packets" }, 368 { "rx_fcs_errors" }, 369 { "rx_align_errors" }, 370 { "rx_xon_pause_rcvd" }, 371 { "rx_xoff_pause_rcvd" }, 372 { "rx_mac_ctrl_rcvd" }, 373 { "rx_xoff_entered" }, 374 { "rx_frame_too_long_errors" }, 375 { "rx_jabbers" }, 376 { "rx_undersize_packets" }, 377 { "rx_in_length_errors" }, 378 { "rx_out_length_errors" }, 379 { "rx_64_or_less_octet_packets" }, 380 { "rx_65_to_127_octet_packets" }, 381 { "rx_128_to_255_octet_packets" }, 382 { "rx_256_to_511_octet_packets" }, 383 { "rx_512_to_1023_octet_packets" }, 384 { "rx_1024_to_1522_octet_packets" }, 385 { "rx_1523_to_2047_octet_packets" }, 386 { "rx_2048_to_4095_octet_packets" }, 387 { "rx_4096_to_8191_octet_packets" }, 388 { "rx_8192_to_9022_octet_packets" }, 389 390 { "tx_octets" }, 391 { "tx_collisions" }, 392 393 { "tx_xon_sent" }, 394 { "tx_xoff_sent" }, 395 { "tx_flow_control" }, 396 { "tx_mac_errors" }, 397 { "tx_single_collisions" }, 398 { "tx_mult_collisions" }, 399 { "tx_deferred" }, 400 { "tx_excessive_collisions" }, 401 { "tx_late_collisions" }, 402 { "tx_collide_2times" }, 403 { "tx_collide_3times" }, 404 { "tx_collide_4times" }, 405 { "tx_collide_5times" }, 406 { "tx_collide_6times" }, 407 { "tx_collide_7times" }, 408 { "tx_collide_8times" }, 409 { "tx_collide_9times" }, 410 { "tx_collide_10times" }, 411 { "tx_collide_11times" }, 412 { "tx_collide_12times" }, 413 { "tx_collide_13times" }, 414 { "tx_collide_14times" }, 415 { "tx_collide_15times" }, 416 { "tx_ucast_packets" }, 417 { "tx_mcast_packets" }, 418 { "tx_bcast_packets" }, 419 { "tx_carrier_sense_errors" }, 420 { "tx_discards" }, 421 { "tx_errors" }, 422 423 { "dma_writeq_full" }, 424 { "dma_write_prioq_full" }, 425 { "rxbds_empty" }, 426 { "rx_discards" }, 427 { "rx_errors" }, 428 { "rx_threshold_hit" }, 429 430 { "dma_readq_full" }, 431 { "dma_read_prioq_full" }, 432 { "tx_comp_queue_full" }, 433 434 { "ring_set_send_prod_index" }, 435 { "ring_status_update" }, 436 { "nic_irqs" }, 437 { "nic_avoided_irqs" }, 438 { "nic_tx_threshold_hit" }, 439 440 { "mbuf_lwm_thresh_hit" }, 441 }; 442 443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 444 #define TG3_NVRAM_TEST 0 445 #define TG3_LINK_TEST 1 446 #define TG3_REGISTER_TEST 2 447 #define TG3_MEMORY_TEST 3 448 #define TG3_MAC_LOOPB_TEST 4 449 #define TG3_PHY_LOOPB_TEST 5 450 #define TG3_EXT_LOOPB_TEST 6 451 #define TG3_INTERRUPT_TEST 7 452 453 454 static const struct { 455 const char string[ETH_GSTRING_LEN]; 456 } ethtool_test_keys[] = { 457 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 458 [TG3_LINK_TEST] = { "link test (online) " }, 459 [TG3_REGISTER_TEST] = { "register test (offline)" }, 460 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 465 }; 466 467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 468 469 470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 471 { 472 writel(val, tp->regs + off); 473 } 474 475 static u32 tg3_read32(struct tg3 *tp, u32 off) 476 { 477 return readl(tp->regs + off); 478 } 479 480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 481 { 482 writel(val, tp->aperegs + off); 483 } 484 485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 486 { 487 return readl(tp->aperegs + off); 488 } 489 490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 491 { 492 unsigned long flags; 493 494 spin_lock_irqsave(&tp->indirect_lock, flags); 495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 497 spin_unlock_irqrestore(&tp->indirect_lock, flags); 498 } 499 500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 501 { 502 writel(val, tp->regs + off); 503 readl(tp->regs + off); 504 } 505 506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 507 { 508 unsigned long flags; 509 u32 val; 510 511 spin_lock_irqsave(&tp->indirect_lock, flags); 512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 514 spin_unlock_irqrestore(&tp->indirect_lock, flags); 515 return val; 516 } 517 518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 519 { 520 unsigned long flags; 521 522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 524 TG3_64BIT_REG_LOW, val); 525 return; 526 } 527 if (off == TG3_RX_STD_PROD_IDX_REG) { 528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 529 TG3_64BIT_REG_LOW, val); 530 return; 531 } 532 533 spin_lock_irqsave(&tp->indirect_lock, flags); 534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 536 spin_unlock_irqrestore(&tp->indirect_lock, flags); 537 538 /* In indirect mode when disabling interrupts, we also need 539 * to clear the interrupt bit in the GRC local ctrl register. 540 */ 541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 542 (val == 0x1)) { 543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 545 } 546 } 547 548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 549 { 550 unsigned long flags; 551 u32 val; 552 553 spin_lock_irqsave(&tp->indirect_lock, flags); 554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 556 spin_unlock_irqrestore(&tp->indirect_lock, flags); 557 return val; 558 } 559 560 /* usec_wait specifies the wait time in usec when writing to certain registers 561 * where it is unsafe to read back the register without some delay. 562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 564 */ 565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 566 { 567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 568 /* Non-posted methods */ 569 tp->write32(tp, off, val); 570 else { 571 /* Posted method */ 572 tg3_write32(tp, off, val); 573 if (usec_wait) 574 udelay(usec_wait); 575 tp->read32(tp, off); 576 } 577 /* Wait again after the read for the posted method to guarantee that 578 * the wait time is met. 579 */ 580 if (usec_wait) 581 udelay(usec_wait); 582 } 583 584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 585 { 586 tp->write32_mbox(tp, off, val); 587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 588 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 589 !tg3_flag(tp, ICH_WORKAROUND))) 590 tp->read32_mbox(tp, off); 591 } 592 593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 594 { 595 void __iomem *mbox = tp->regs + off; 596 writel(val, mbox); 597 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 598 writel(val, mbox); 599 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 600 tg3_flag(tp, FLUSH_POSTED_WRITES)) 601 readl(mbox); 602 } 603 604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 605 { 606 return readl(tp->regs + off + GRCMBOX_BASE); 607 } 608 609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 610 { 611 writel(val, tp->regs + off + GRCMBOX_BASE); 612 } 613 614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 619 620 #define tw32(reg, val) tp->write32(tp, reg, val) 621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 623 #define tr32(reg) tp->read32(tp, reg) 624 625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 626 { 627 unsigned long flags; 628 629 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 631 return; 632 633 spin_lock_irqsave(&tp->indirect_lock, flags); 634 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 637 638 /* Always leave this as zero. */ 639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 640 } else { 641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 642 tw32_f(TG3PCI_MEM_WIN_DATA, val); 643 644 /* Always leave this as zero. */ 645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 646 } 647 spin_unlock_irqrestore(&tp->indirect_lock, flags); 648 } 649 650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 651 { 652 unsigned long flags; 653 654 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 656 *val = 0; 657 return; 658 } 659 660 spin_lock_irqsave(&tp->indirect_lock, flags); 661 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 664 665 /* Always leave this as zero. */ 666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 667 } else { 668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 669 *val = tr32(TG3PCI_MEM_WIN_DATA); 670 671 /* Always leave this as zero. */ 672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 673 } 674 spin_unlock_irqrestore(&tp->indirect_lock, flags); 675 } 676 677 static void tg3_ape_lock_init(struct tg3 *tp) 678 { 679 int i; 680 u32 regbase, bit; 681 682 if (tg3_asic_rev(tp) == ASIC_REV_5761) 683 regbase = TG3_APE_LOCK_GRANT; 684 else 685 regbase = TG3_APE_PER_LOCK_GRANT; 686 687 /* Make sure the driver hasn't any stale locks. */ 688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 689 switch (i) { 690 case TG3_APE_LOCK_PHY0: 691 case TG3_APE_LOCK_PHY1: 692 case TG3_APE_LOCK_PHY2: 693 case TG3_APE_LOCK_PHY3: 694 bit = APE_LOCK_GRANT_DRIVER; 695 break; 696 default: 697 if (!tp->pci_fn) 698 bit = APE_LOCK_GRANT_DRIVER; 699 else 700 bit = 1 << tp->pci_fn; 701 } 702 tg3_ape_write32(tp, regbase + 4 * i, bit); 703 } 704 705 } 706 707 static int tg3_ape_lock(struct tg3 *tp, int locknum) 708 { 709 int i, off; 710 int ret = 0; 711 u32 status, req, gnt, bit; 712 713 if (!tg3_flag(tp, ENABLE_APE)) 714 return 0; 715 716 switch (locknum) { 717 case TG3_APE_LOCK_GPIO: 718 if (tg3_asic_rev(tp) == ASIC_REV_5761) 719 return 0; 720 fallthrough; 721 case TG3_APE_LOCK_GRC: 722 case TG3_APE_LOCK_MEM: 723 if (!tp->pci_fn) 724 bit = APE_LOCK_REQ_DRIVER; 725 else 726 bit = 1 << tp->pci_fn; 727 break; 728 case TG3_APE_LOCK_PHY0: 729 case TG3_APE_LOCK_PHY1: 730 case TG3_APE_LOCK_PHY2: 731 case TG3_APE_LOCK_PHY3: 732 bit = APE_LOCK_REQ_DRIVER; 733 break; 734 default: 735 return -EINVAL; 736 } 737 738 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 739 req = TG3_APE_LOCK_REQ; 740 gnt = TG3_APE_LOCK_GRANT; 741 } else { 742 req = TG3_APE_PER_LOCK_REQ; 743 gnt = TG3_APE_PER_LOCK_GRANT; 744 } 745 746 off = 4 * locknum; 747 748 tg3_ape_write32(tp, req + off, bit); 749 750 /* Wait for up to 1 millisecond to acquire lock. */ 751 for (i = 0; i < 100; i++) { 752 status = tg3_ape_read32(tp, gnt + off); 753 if (status == bit) 754 break; 755 if (pci_channel_offline(tp->pdev)) 756 break; 757 758 udelay(10); 759 } 760 761 if (status != bit) { 762 /* Revoke the lock request. */ 763 tg3_ape_write32(tp, gnt + off, bit); 764 ret = -EBUSY; 765 } 766 767 return ret; 768 } 769 770 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 771 { 772 u32 gnt, bit; 773 774 if (!tg3_flag(tp, ENABLE_APE)) 775 return; 776 777 switch (locknum) { 778 case TG3_APE_LOCK_GPIO: 779 if (tg3_asic_rev(tp) == ASIC_REV_5761) 780 return; 781 fallthrough; 782 case TG3_APE_LOCK_GRC: 783 case TG3_APE_LOCK_MEM: 784 if (!tp->pci_fn) 785 bit = APE_LOCK_GRANT_DRIVER; 786 else 787 bit = 1 << tp->pci_fn; 788 break; 789 case TG3_APE_LOCK_PHY0: 790 case TG3_APE_LOCK_PHY1: 791 case TG3_APE_LOCK_PHY2: 792 case TG3_APE_LOCK_PHY3: 793 bit = APE_LOCK_GRANT_DRIVER; 794 break; 795 default: 796 return; 797 } 798 799 if (tg3_asic_rev(tp) == ASIC_REV_5761) 800 gnt = TG3_APE_LOCK_GRANT; 801 else 802 gnt = TG3_APE_PER_LOCK_GRANT; 803 804 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 805 } 806 807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 808 { 809 u32 apedata; 810 811 while (timeout_us) { 812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 813 return -EBUSY; 814 815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 817 break; 818 819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 820 821 udelay(10); 822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 823 } 824 825 return timeout_us ? 0 : -EBUSY; 826 } 827 828 #ifdef CONFIG_TIGON3_HWMON 829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 830 { 831 u32 i, apedata; 832 833 for (i = 0; i < timeout_us / 10; i++) { 834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 835 836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 837 break; 838 839 udelay(10); 840 } 841 842 return i == timeout_us / 10; 843 } 844 845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 846 u32 len) 847 { 848 int err; 849 u32 i, bufoff, msgoff, maxlen, apedata; 850 851 if (!tg3_flag(tp, APE_HAS_NCSI)) 852 return 0; 853 854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 855 if (apedata != APE_SEG_SIG_MAGIC) 856 return -ENODEV; 857 858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 859 if (!(apedata & APE_FW_STATUS_READY)) 860 return -EAGAIN; 861 862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 863 TG3_APE_SHMEM_BASE; 864 msgoff = bufoff + 2 * sizeof(u32); 865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 866 867 while (len) { 868 u32 length; 869 870 /* Cap xfer sizes to scratchpad limits. */ 871 length = (len > maxlen) ? maxlen : len; 872 len -= length; 873 874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 875 if (!(apedata & APE_FW_STATUS_READY)) 876 return -EAGAIN; 877 878 /* Wait for up to 1 msec for APE to service previous event. */ 879 err = tg3_ape_event_lock(tp, 1000); 880 if (err) 881 return err; 882 883 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 884 APE_EVENT_STATUS_SCRTCHPD_READ | 885 APE_EVENT_STATUS_EVENT_PENDING; 886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 887 888 tg3_ape_write32(tp, bufoff, base_off); 889 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 890 891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 893 894 base_off += length; 895 896 if (tg3_ape_wait_for_event(tp, 30000)) 897 return -EAGAIN; 898 899 for (i = 0; length; i += 4, length -= 4) { 900 u32 val = tg3_ape_read32(tp, msgoff + i); 901 memcpy(data, &val, sizeof(u32)); 902 data++; 903 } 904 } 905 906 return 0; 907 } 908 #endif 909 910 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 911 { 912 int err; 913 u32 apedata; 914 915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 916 if (apedata != APE_SEG_SIG_MAGIC) 917 return -EAGAIN; 918 919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 920 if (!(apedata & APE_FW_STATUS_READY)) 921 return -EAGAIN; 922 923 /* Wait for up to 20 millisecond for APE to service previous event. */ 924 err = tg3_ape_event_lock(tp, 20000); 925 if (err) 926 return err; 927 928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 929 event | APE_EVENT_STATUS_EVENT_PENDING); 930 931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 933 934 return 0; 935 } 936 937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 938 { 939 u32 event; 940 u32 apedata; 941 942 if (!tg3_flag(tp, ENABLE_APE)) 943 return; 944 945 switch (kind) { 946 case RESET_KIND_INIT: 947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 949 APE_HOST_SEG_SIG_MAGIC); 950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 951 APE_HOST_SEG_LEN_MAGIC); 952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 957 APE_HOST_BEHAV_NO_PHYLOCK); 958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 959 TG3_APE_HOST_DRVR_STATE_START); 960 961 event = APE_EVENT_STATUS_STATE_START; 962 break; 963 case RESET_KIND_SHUTDOWN: 964 if (device_may_wakeup(&tp->pdev->dev) && 965 tg3_flag(tp, WOL_ENABLE)) { 966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 967 TG3_APE_HOST_WOL_SPEED_AUTO); 968 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 969 } else 970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 971 972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 973 974 event = APE_EVENT_STATUS_STATE_UNLOAD; 975 break; 976 default: 977 return; 978 } 979 980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 981 982 tg3_ape_send_event(tp, event); 983 } 984 985 static void tg3_send_ape_heartbeat(struct tg3 *tp, 986 unsigned long interval) 987 { 988 /* Check if hb interval has exceeded */ 989 if (!tg3_flag(tp, ENABLE_APE) || 990 time_before(jiffies, tp->ape_hb_jiffies + interval)) 991 return; 992 993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 994 tp->ape_hb_jiffies = jiffies; 995 } 996 997 static void tg3_disable_ints(struct tg3 *tp) 998 { 999 int i; 1000 1001 tw32(TG3PCI_MISC_HOST_CTRL, 1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 1003 for (i = 0; i < tp->irq_max; i++) 1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 1005 } 1006 1007 static void tg3_enable_ints(struct tg3 *tp) 1008 { 1009 int i; 1010 1011 tp->irq_sync = 0; 1012 wmb(); 1013 1014 tw32(TG3PCI_MISC_HOST_CTRL, 1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1016 1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1018 for (i = 0; i < tp->irq_cnt; i++) { 1019 struct tg3_napi *tnapi = &tp->napi[i]; 1020 1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1022 if (tg3_flag(tp, 1SHOT_MSI)) 1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1024 1025 tp->coal_now |= tnapi->coal_now; 1026 } 1027 1028 /* Force an initial interrupt */ 1029 if (!tg3_flag(tp, TAGGED_STATUS) && 1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1032 else 1033 tw32(HOSTCC_MODE, tp->coal_now); 1034 1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1036 } 1037 1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1039 { 1040 struct tg3 *tp = tnapi->tp; 1041 struct tg3_hw_status *sblk = tnapi->hw_status; 1042 unsigned int work_exists = 0; 1043 1044 /* check for phy events */ 1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1046 if (sblk->status & SD_STATUS_LINK_CHG) 1047 work_exists = 1; 1048 } 1049 1050 /* check for TX work to do */ 1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1052 work_exists = 1; 1053 1054 /* check for RX work to do */ 1055 if (tnapi->rx_rcb_prod_idx && 1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1057 work_exists = 1; 1058 1059 return work_exists; 1060 } 1061 1062 /* tg3_int_reenable 1063 * similar to tg3_enable_ints, but it accurately determines whether there 1064 * is new work pending and can return without flushing the PIO write 1065 * which reenables interrupts 1066 */ 1067 static void tg3_int_reenable(struct tg3_napi *tnapi) 1068 { 1069 struct tg3 *tp = tnapi->tp; 1070 1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1072 1073 /* When doing tagged status, this work check is unnecessary. 1074 * The last_tag we write above tells the chip which piece of 1075 * work we've completed. 1076 */ 1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1078 tw32(HOSTCC_MODE, tp->coalesce_mode | 1079 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1080 } 1081 1082 static void tg3_switch_clocks(struct tg3 *tp) 1083 { 1084 u32 clock_ctrl; 1085 u32 orig_clock_ctrl; 1086 1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1088 return; 1089 1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1091 1092 orig_clock_ctrl = clock_ctrl; 1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1094 CLOCK_CTRL_CLKRUN_OENABLE | 1095 0x1f); 1096 tp->pci_clock_ctrl = clock_ctrl; 1097 1098 if (tg3_flag(tp, 5705_PLUS)) { 1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1100 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1102 } 1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1105 clock_ctrl | 1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1107 40); 1108 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1109 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1110 40); 1111 } 1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1113 } 1114 1115 #define PHY_BUSY_LOOPS 5000 1116 1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1118 u32 *val) 1119 { 1120 u32 frame_val; 1121 unsigned int loops; 1122 int ret; 1123 1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1125 tw32_f(MAC_MI_MODE, 1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1127 udelay(80); 1128 } 1129 1130 tg3_ape_lock(tp, tp->phy_ape_lock); 1131 1132 *val = 0x0; 1133 1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1135 MI_COM_PHY_ADDR_MASK); 1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1137 MI_COM_REG_ADDR_MASK); 1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1139 1140 tw32_f(MAC_MI_COM, frame_val); 1141 1142 loops = PHY_BUSY_LOOPS; 1143 while (loops != 0) { 1144 udelay(10); 1145 frame_val = tr32(MAC_MI_COM); 1146 1147 if ((frame_val & MI_COM_BUSY) == 0) { 1148 udelay(5); 1149 frame_val = tr32(MAC_MI_COM); 1150 break; 1151 } 1152 loops -= 1; 1153 } 1154 1155 ret = -EBUSY; 1156 if (loops != 0) { 1157 *val = frame_val & MI_COM_DATA_MASK; 1158 ret = 0; 1159 } 1160 1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1162 tw32_f(MAC_MI_MODE, tp->mi_mode); 1163 udelay(80); 1164 } 1165 1166 tg3_ape_unlock(tp, tp->phy_ape_lock); 1167 1168 return ret; 1169 } 1170 1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1172 { 1173 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1174 } 1175 1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1177 u32 val) 1178 { 1179 u32 frame_val; 1180 unsigned int loops; 1181 int ret; 1182 1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1185 return 0; 1186 1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1188 tw32_f(MAC_MI_MODE, 1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1190 udelay(80); 1191 } 1192 1193 tg3_ape_lock(tp, tp->phy_ape_lock); 1194 1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1196 MI_COM_PHY_ADDR_MASK); 1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1198 MI_COM_REG_ADDR_MASK); 1199 frame_val |= (val & MI_COM_DATA_MASK); 1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1201 1202 tw32_f(MAC_MI_COM, frame_val); 1203 1204 loops = PHY_BUSY_LOOPS; 1205 while (loops != 0) { 1206 udelay(10); 1207 frame_val = tr32(MAC_MI_COM); 1208 if ((frame_val & MI_COM_BUSY) == 0) { 1209 udelay(5); 1210 frame_val = tr32(MAC_MI_COM); 1211 break; 1212 } 1213 loops -= 1; 1214 } 1215 1216 ret = -EBUSY; 1217 if (loops != 0) 1218 ret = 0; 1219 1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1221 tw32_f(MAC_MI_MODE, tp->mi_mode); 1222 udelay(80); 1223 } 1224 1225 tg3_ape_unlock(tp, tp->phy_ape_lock); 1226 1227 return ret; 1228 } 1229 1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1231 { 1232 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1233 } 1234 1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1236 { 1237 int err; 1238 1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1240 if (err) 1241 goto done; 1242 1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1244 if (err) 1245 goto done; 1246 1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1249 if (err) 1250 goto done; 1251 1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1253 1254 done: 1255 return err; 1256 } 1257 1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1259 { 1260 int err; 1261 1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1263 if (err) 1264 goto done; 1265 1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1267 if (err) 1268 goto done; 1269 1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1272 if (err) 1273 goto done; 1274 1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1276 1277 done: 1278 return err; 1279 } 1280 1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1282 { 1283 int err; 1284 1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1286 if (!err) 1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1288 1289 return err; 1290 } 1291 1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1293 { 1294 int err; 1295 1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1297 if (!err) 1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1299 1300 return err; 1301 } 1302 1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1304 { 1305 int err; 1306 1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1309 MII_TG3_AUXCTL_SHDWSEL_MISC); 1310 if (!err) 1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1312 1313 return err; 1314 } 1315 1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1317 { 1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1319 set |= MII_TG3_AUXCTL_MISC_WREN; 1320 1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1322 } 1323 1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1325 { 1326 u32 val; 1327 int err; 1328 1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1330 1331 if (err) 1332 return err; 1333 1334 if (enable) 1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1336 else 1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1338 1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1341 1342 return err; 1343 } 1344 1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1346 { 1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1348 reg | val | MII_TG3_MISC_SHDW_WREN); 1349 } 1350 1351 static int tg3_bmcr_reset(struct tg3 *tp) 1352 { 1353 u32 phy_control; 1354 int limit, err; 1355 1356 /* OK, reset it, and poll the BMCR_RESET bit until it 1357 * clears or we time out. 1358 */ 1359 phy_control = BMCR_RESET; 1360 err = tg3_writephy(tp, MII_BMCR, phy_control); 1361 if (err != 0) 1362 return -EBUSY; 1363 1364 limit = 5000; 1365 while (limit--) { 1366 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1367 if (err != 0) 1368 return -EBUSY; 1369 1370 if ((phy_control & BMCR_RESET) == 0) { 1371 udelay(40); 1372 break; 1373 } 1374 udelay(10); 1375 } 1376 if (limit < 0) 1377 return -EBUSY; 1378 1379 return 0; 1380 } 1381 1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1383 { 1384 struct tg3 *tp = bp->priv; 1385 u32 val; 1386 1387 spin_lock_bh(&tp->lock); 1388 1389 if (__tg3_readphy(tp, mii_id, reg, &val)) 1390 val = -EIO; 1391 1392 spin_unlock_bh(&tp->lock); 1393 1394 return val; 1395 } 1396 1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1398 { 1399 struct tg3 *tp = bp->priv; 1400 u32 ret = 0; 1401 1402 spin_lock_bh(&tp->lock); 1403 1404 if (__tg3_writephy(tp, mii_id, reg, val)) 1405 ret = -EIO; 1406 1407 spin_unlock_bh(&tp->lock); 1408 1409 return ret; 1410 } 1411 1412 static void tg3_mdio_config_5785(struct tg3 *tp) 1413 { 1414 u32 val; 1415 struct phy_device *phydev; 1416 1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1419 case PHY_ID_BCM50610: 1420 case PHY_ID_BCM50610M: 1421 val = MAC_PHYCFG2_50610_LED_MODES; 1422 break; 1423 case PHY_ID_BCMAC131: 1424 val = MAC_PHYCFG2_AC131_LED_MODES; 1425 break; 1426 case PHY_ID_RTL8211C: 1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1428 break; 1429 case PHY_ID_RTL8201E: 1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1431 break; 1432 default: 1433 return; 1434 } 1435 1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1437 tw32(MAC_PHYCFG2, val); 1438 1439 val = tr32(MAC_PHYCFG1); 1440 val &= ~(MAC_PHYCFG1_RGMII_INT | 1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1443 tw32(MAC_PHYCFG1, val); 1444 1445 return; 1446 } 1447 1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1450 MAC_PHYCFG2_FMODE_MASK_MASK | 1451 MAC_PHYCFG2_GMODE_MASK_MASK | 1452 MAC_PHYCFG2_ACT_MASK_MASK | 1453 MAC_PHYCFG2_QUAL_MASK_MASK | 1454 MAC_PHYCFG2_INBAND_ENABLE; 1455 1456 tw32(MAC_PHYCFG2, val); 1457 1458 val = tr32(MAC_PHYCFG1); 1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1466 } 1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1469 tw32(MAC_PHYCFG1, val); 1470 1471 val = tr32(MAC_EXT_RGMII_MODE); 1472 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1473 MAC_RGMII_MODE_RX_QUALITY | 1474 MAC_RGMII_MODE_RX_ACTIVITY | 1475 MAC_RGMII_MODE_RX_ENG_DET | 1476 MAC_RGMII_MODE_TX_ENABLE | 1477 MAC_RGMII_MODE_TX_LOWPWR | 1478 MAC_RGMII_MODE_TX_RESET); 1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1481 val |= MAC_RGMII_MODE_RX_INT_B | 1482 MAC_RGMII_MODE_RX_QUALITY | 1483 MAC_RGMII_MODE_RX_ACTIVITY | 1484 MAC_RGMII_MODE_RX_ENG_DET; 1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1486 val |= MAC_RGMII_MODE_TX_ENABLE | 1487 MAC_RGMII_MODE_TX_LOWPWR | 1488 MAC_RGMII_MODE_TX_RESET; 1489 } 1490 tw32(MAC_EXT_RGMII_MODE, val); 1491 } 1492 1493 static void tg3_mdio_start(struct tg3 *tp) 1494 { 1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1496 tw32_f(MAC_MI_MODE, tp->mi_mode); 1497 udelay(80); 1498 1499 if (tg3_flag(tp, MDIOBUS_INITED) && 1500 tg3_asic_rev(tp) == ASIC_REV_5785) 1501 tg3_mdio_config_5785(tp); 1502 } 1503 1504 static int tg3_mdio_init(struct tg3 *tp) 1505 { 1506 int i; 1507 u32 reg; 1508 struct phy_device *phydev; 1509 1510 if (tg3_flag(tp, 5717_PLUS)) { 1511 u32 is_serdes; 1512 1513 tp->phy_addr = tp->pci_fn + 1; 1514 1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1517 else 1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1519 TG3_CPMU_PHY_STRAP_IS_SERDES; 1520 if (is_serdes) 1521 tp->phy_addr += 7; 1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1523 int addr; 1524 1525 addr = ssb_gige_get_phyaddr(tp->pdev); 1526 if (addr < 0) 1527 return addr; 1528 tp->phy_addr = addr; 1529 } else 1530 tp->phy_addr = TG3_PHY_MII_ADDR; 1531 1532 tg3_mdio_start(tp); 1533 1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1535 return 0; 1536 1537 tp->mdio_bus = mdiobus_alloc(); 1538 if (tp->mdio_bus == NULL) 1539 return -ENOMEM; 1540 1541 tp->mdio_bus->name = "tg3 mdio bus"; 1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev)); 1543 tp->mdio_bus->priv = tp; 1544 tp->mdio_bus->parent = &tp->pdev->dev; 1545 tp->mdio_bus->read = &tg3_mdio_read; 1546 tp->mdio_bus->write = &tg3_mdio_write; 1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1548 1549 /* The bus registration will look for all the PHYs on the mdio bus. 1550 * Unfortunately, it does not ensure the PHY is powered up before 1551 * accessing the PHY ID registers. A chip reset is the 1552 * quickest way to bring the device back to an operational state.. 1553 */ 1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1555 tg3_bmcr_reset(tp); 1556 1557 i = mdiobus_register(tp->mdio_bus); 1558 if (i) { 1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1560 mdiobus_free(tp->mdio_bus); 1561 return i; 1562 } 1563 1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1565 1566 if (!phydev || !phydev->drv) { 1567 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1568 mdiobus_unregister(tp->mdio_bus); 1569 mdiobus_free(tp->mdio_bus); 1570 return -ENODEV; 1571 } 1572 1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1574 case PHY_ID_BCM57780: 1575 phydev->interface = PHY_INTERFACE_MODE_GMII; 1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1577 break; 1578 case PHY_ID_BCM50610: 1579 case PHY_ID_BCM50610M: 1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1581 PHY_BRCM_RX_REFCLK_UNUSED | 1582 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1583 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1584 fallthrough; 1585 case PHY_ID_RTL8211C: 1586 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1587 break; 1588 case PHY_ID_RTL8201E: 1589 case PHY_ID_BCMAC131: 1590 phydev->interface = PHY_INTERFACE_MODE_MII; 1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1592 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1593 break; 1594 } 1595 1596 tg3_flag_set(tp, MDIOBUS_INITED); 1597 1598 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1599 tg3_mdio_config_5785(tp); 1600 1601 return 0; 1602 } 1603 1604 static void tg3_mdio_fini(struct tg3 *tp) 1605 { 1606 if (tg3_flag(tp, MDIOBUS_INITED)) { 1607 tg3_flag_clear(tp, MDIOBUS_INITED); 1608 mdiobus_unregister(tp->mdio_bus); 1609 mdiobus_free(tp->mdio_bus); 1610 } 1611 } 1612 1613 /* tp->lock is held. */ 1614 static inline void tg3_generate_fw_event(struct tg3 *tp) 1615 { 1616 u32 val; 1617 1618 val = tr32(GRC_RX_CPU_EVENT); 1619 val |= GRC_RX_CPU_DRIVER_EVENT; 1620 tw32_f(GRC_RX_CPU_EVENT, val); 1621 1622 tp->last_event_jiffies = jiffies; 1623 } 1624 1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1626 1627 /* tp->lock is held. */ 1628 static void tg3_wait_for_event_ack(struct tg3 *tp) 1629 { 1630 int i; 1631 unsigned int delay_cnt; 1632 long time_remain; 1633 1634 /* If enough time has passed, no wait is necessary. */ 1635 time_remain = (long)(tp->last_event_jiffies + 1 + 1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1637 (long)jiffies; 1638 if (time_remain < 0) 1639 return; 1640 1641 /* Check if we can shorten the wait time. */ 1642 delay_cnt = jiffies_to_usecs(time_remain); 1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1645 delay_cnt = (delay_cnt >> 3) + 1; 1646 1647 for (i = 0; i < delay_cnt; i++) { 1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1649 break; 1650 if (pci_channel_offline(tp->pdev)) 1651 break; 1652 1653 udelay(8); 1654 } 1655 } 1656 1657 /* tp->lock is held. */ 1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1659 { 1660 u32 reg, val; 1661 1662 val = 0; 1663 if (!tg3_readphy(tp, MII_BMCR, ®)) 1664 val = reg << 16; 1665 if (!tg3_readphy(tp, MII_BMSR, ®)) 1666 val |= (reg & 0xffff); 1667 *data++ = val; 1668 1669 val = 0; 1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1671 val = reg << 16; 1672 if (!tg3_readphy(tp, MII_LPA, ®)) 1673 val |= (reg & 0xffff); 1674 *data++ = val; 1675 1676 val = 0; 1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1678 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1679 val = reg << 16; 1680 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1681 val |= (reg & 0xffff); 1682 } 1683 *data++ = val; 1684 1685 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1686 val = reg << 16; 1687 else 1688 val = 0; 1689 *data++ = val; 1690 } 1691 1692 /* tp->lock is held. */ 1693 static void tg3_ump_link_report(struct tg3 *tp) 1694 { 1695 u32 data[4]; 1696 1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1698 return; 1699 1700 tg3_phy_gather_ump_data(tp, data); 1701 1702 tg3_wait_for_event_ack(tp); 1703 1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1710 1711 tg3_generate_fw_event(tp); 1712 } 1713 1714 /* tp->lock is held. */ 1715 static void tg3_stop_fw(struct tg3 *tp) 1716 { 1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1718 /* Wait for RX cpu to ACK the previous event. */ 1719 tg3_wait_for_event_ack(tp); 1720 1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1722 1723 tg3_generate_fw_event(tp); 1724 1725 /* Wait for RX cpu to ACK this event. */ 1726 tg3_wait_for_event_ack(tp); 1727 } 1728 } 1729 1730 /* tp->lock is held. */ 1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1732 { 1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1735 1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1737 switch (kind) { 1738 case RESET_KIND_INIT: 1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1740 DRV_STATE_START); 1741 break; 1742 1743 case RESET_KIND_SHUTDOWN: 1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1745 DRV_STATE_UNLOAD); 1746 break; 1747 1748 case RESET_KIND_SUSPEND: 1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1750 DRV_STATE_SUSPEND); 1751 break; 1752 1753 default: 1754 break; 1755 } 1756 } 1757 } 1758 1759 /* tp->lock is held. */ 1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1761 { 1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1763 switch (kind) { 1764 case RESET_KIND_INIT: 1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1766 DRV_STATE_START_DONE); 1767 break; 1768 1769 case RESET_KIND_SHUTDOWN: 1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1771 DRV_STATE_UNLOAD_DONE); 1772 break; 1773 1774 default: 1775 break; 1776 } 1777 } 1778 } 1779 1780 /* tp->lock is held. */ 1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1782 { 1783 if (tg3_flag(tp, ENABLE_ASF)) { 1784 switch (kind) { 1785 case RESET_KIND_INIT: 1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1787 DRV_STATE_START); 1788 break; 1789 1790 case RESET_KIND_SHUTDOWN: 1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1792 DRV_STATE_UNLOAD); 1793 break; 1794 1795 case RESET_KIND_SUSPEND: 1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1797 DRV_STATE_SUSPEND); 1798 break; 1799 1800 default: 1801 break; 1802 } 1803 } 1804 } 1805 1806 static int tg3_poll_fw(struct tg3 *tp) 1807 { 1808 int i; 1809 u32 val; 1810 1811 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1812 return 0; 1813 1814 if (tg3_flag(tp, IS_SSB_CORE)) { 1815 /* We don't use firmware. */ 1816 return 0; 1817 } 1818 1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1820 /* Wait up to 20ms for init done. */ 1821 for (i = 0; i < 200; i++) { 1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1823 return 0; 1824 if (pci_channel_offline(tp->pdev)) 1825 return -ENODEV; 1826 1827 udelay(100); 1828 } 1829 return -ENODEV; 1830 } 1831 1832 /* Wait for firmware initialization to complete. */ 1833 for (i = 0; i < 100000; i++) { 1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1836 break; 1837 if (pci_channel_offline(tp->pdev)) { 1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1839 tg3_flag_set(tp, NO_FWARE_REPORTED); 1840 netdev_info(tp->dev, "No firmware running\n"); 1841 } 1842 1843 break; 1844 } 1845 1846 udelay(10); 1847 } 1848 1849 /* Chip might not be fitted with firmware. Some Sun onboard 1850 * parts are configured like that. So don't signal the timeout 1851 * of the above loop as an error, but do report the lack of 1852 * running firmware once. 1853 */ 1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1855 tg3_flag_set(tp, NO_FWARE_REPORTED); 1856 1857 netdev_info(tp->dev, "No firmware running\n"); 1858 } 1859 1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1861 /* The 57765 A0 needs a little more 1862 * time to do some important work. 1863 */ 1864 mdelay(10); 1865 } 1866 1867 return 0; 1868 } 1869 1870 static void tg3_link_report(struct tg3 *tp) 1871 { 1872 if (!netif_carrier_ok(tp->dev)) { 1873 netif_info(tp, link, tp->dev, "Link is down\n"); 1874 tg3_ump_link_report(tp); 1875 } else if (netif_msg_link(tp)) { 1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1877 (tp->link_config.active_speed == SPEED_1000 ? 1878 1000 : 1879 (tp->link_config.active_speed == SPEED_100 ? 1880 100 : 10)), 1881 (tp->link_config.active_duplex == DUPLEX_FULL ? 1882 "full" : "half")); 1883 1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1886 "on" : "off", 1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1888 "on" : "off"); 1889 1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1891 netdev_info(tp->dev, "EEE is %s\n", 1892 tp->setlpicnt ? "enabled" : "disabled"); 1893 1894 tg3_ump_link_report(tp); 1895 } 1896 1897 tp->link_up = netif_carrier_ok(tp->dev); 1898 } 1899 1900 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1901 { 1902 u32 flowctrl = 0; 1903 1904 if (adv & ADVERTISE_PAUSE_CAP) { 1905 flowctrl |= FLOW_CTRL_RX; 1906 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1907 flowctrl |= FLOW_CTRL_TX; 1908 } else if (adv & ADVERTISE_PAUSE_ASYM) 1909 flowctrl |= FLOW_CTRL_TX; 1910 1911 return flowctrl; 1912 } 1913 1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1915 { 1916 u16 miireg; 1917 1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1919 miireg = ADVERTISE_1000XPAUSE; 1920 else if (flow_ctrl & FLOW_CTRL_TX) 1921 miireg = ADVERTISE_1000XPSE_ASYM; 1922 else if (flow_ctrl & FLOW_CTRL_RX) 1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1924 else 1925 miireg = 0; 1926 1927 return miireg; 1928 } 1929 1930 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1931 { 1932 u32 flowctrl = 0; 1933 1934 if (adv & ADVERTISE_1000XPAUSE) { 1935 flowctrl |= FLOW_CTRL_RX; 1936 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1937 flowctrl |= FLOW_CTRL_TX; 1938 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1939 flowctrl |= FLOW_CTRL_TX; 1940 1941 return flowctrl; 1942 } 1943 1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1945 { 1946 u8 cap = 0; 1947 1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1951 if (lcladv & ADVERTISE_1000XPAUSE) 1952 cap = FLOW_CTRL_RX; 1953 if (rmtadv & ADVERTISE_1000XPAUSE) 1954 cap = FLOW_CTRL_TX; 1955 } 1956 1957 return cap; 1958 } 1959 1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1961 { 1962 u8 autoneg; 1963 u8 flowctrl = 0; 1964 u32 old_rx_mode = tp->rx_mode; 1965 u32 old_tx_mode = tp->tx_mode; 1966 1967 if (tg3_flag(tp, USE_PHYLIB)) 1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1969 else 1970 autoneg = tp->link_config.autoneg; 1971 1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1975 else 1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1977 } else 1978 flowctrl = tp->link_config.flowctrl; 1979 1980 tp->link_config.active_flowctrl = flowctrl; 1981 1982 if (flowctrl & FLOW_CTRL_RX) 1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1984 else 1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1986 1987 if (old_rx_mode != tp->rx_mode) 1988 tw32_f(MAC_RX_MODE, tp->rx_mode); 1989 1990 if (flowctrl & FLOW_CTRL_TX) 1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1992 else 1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1994 1995 if (old_tx_mode != tp->tx_mode) 1996 tw32_f(MAC_TX_MODE, tp->tx_mode); 1997 } 1998 1999 static void tg3_adjust_link(struct net_device *dev) 2000 { 2001 u8 oldflowctrl, linkmesg = 0; 2002 u32 mac_mode, lcl_adv, rmt_adv; 2003 struct tg3 *tp = netdev_priv(dev); 2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2005 2006 spin_lock_bh(&tp->lock); 2007 2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2009 MAC_MODE_HALF_DUPLEX); 2010 2011 oldflowctrl = tp->link_config.active_flowctrl; 2012 2013 if (phydev->link) { 2014 lcl_adv = 0; 2015 rmt_adv = 0; 2016 2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2018 mac_mode |= MAC_MODE_PORT_MODE_MII; 2019 else if (phydev->speed == SPEED_1000 || 2020 tg3_asic_rev(tp) != ASIC_REV_5785) 2021 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2022 else 2023 mac_mode |= MAC_MODE_PORT_MODE_MII; 2024 2025 if (phydev->duplex == DUPLEX_HALF) 2026 mac_mode |= MAC_MODE_HALF_DUPLEX; 2027 else { 2028 lcl_adv = mii_advertise_flowctrl( 2029 tp->link_config.flowctrl); 2030 2031 if (phydev->pause) 2032 rmt_adv = LPA_PAUSE_CAP; 2033 if (phydev->asym_pause) 2034 rmt_adv |= LPA_PAUSE_ASYM; 2035 } 2036 2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2038 } else 2039 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2040 2041 if (mac_mode != tp->mac_mode) { 2042 tp->mac_mode = mac_mode; 2043 tw32_f(MAC_MODE, tp->mac_mode); 2044 udelay(40); 2045 } 2046 2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2048 if (phydev->speed == SPEED_10) 2049 tw32(MAC_MI_STAT, 2050 MAC_MI_STAT_10MBPS_MODE | 2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2052 else 2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2054 } 2055 2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2057 tw32(MAC_TX_LENGTHS, 2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2059 (6 << TX_LENGTHS_IPG_SHIFT) | 2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2061 else 2062 tw32(MAC_TX_LENGTHS, 2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2064 (6 << TX_LENGTHS_IPG_SHIFT) | 2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2066 2067 if (phydev->link != tp->old_link || 2068 phydev->speed != tp->link_config.active_speed || 2069 phydev->duplex != tp->link_config.active_duplex || 2070 oldflowctrl != tp->link_config.active_flowctrl) 2071 linkmesg = 1; 2072 2073 tp->old_link = phydev->link; 2074 tp->link_config.active_speed = phydev->speed; 2075 tp->link_config.active_duplex = phydev->duplex; 2076 2077 spin_unlock_bh(&tp->lock); 2078 2079 if (linkmesg) 2080 tg3_link_report(tp); 2081 } 2082 2083 static int tg3_phy_init(struct tg3 *tp) 2084 { 2085 struct phy_device *phydev; 2086 2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2088 return 0; 2089 2090 /* Bring the PHY back to a known state. */ 2091 tg3_bmcr_reset(tp); 2092 2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2094 2095 /* Attach the MAC to the PHY. */ 2096 phydev = phy_connect(tp->dev, phydev_name(phydev), 2097 tg3_adjust_link, phydev->interface); 2098 if (IS_ERR(phydev)) { 2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2100 return PTR_ERR(phydev); 2101 } 2102 2103 /* Mask with MAC supported features. */ 2104 switch (phydev->interface) { 2105 case PHY_INTERFACE_MODE_GMII: 2106 case PHY_INTERFACE_MODE_RGMII: 2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2108 phy_set_max_speed(phydev, SPEED_1000); 2109 phy_support_asym_pause(phydev); 2110 break; 2111 } 2112 fallthrough; 2113 case PHY_INTERFACE_MODE_MII: 2114 phy_set_max_speed(phydev, SPEED_100); 2115 phy_support_asym_pause(phydev); 2116 break; 2117 default: 2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2119 return -EINVAL; 2120 } 2121 2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2123 2124 phy_attached_info(phydev); 2125 2126 return 0; 2127 } 2128 2129 static void tg3_phy_start(struct tg3 *tp) 2130 { 2131 struct phy_device *phydev; 2132 2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2134 return; 2135 2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2137 2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2140 phydev->speed = tp->link_config.speed; 2141 phydev->duplex = tp->link_config.duplex; 2142 phydev->autoneg = tp->link_config.autoneg; 2143 ethtool_convert_legacy_u32_to_link_mode( 2144 phydev->advertising, tp->link_config.advertising); 2145 } 2146 2147 phy_start(phydev); 2148 2149 phy_start_aneg(phydev); 2150 } 2151 2152 static void tg3_phy_stop(struct tg3 *tp) 2153 { 2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2155 return; 2156 2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2158 } 2159 2160 static void tg3_phy_fini(struct tg3 *tp) 2161 { 2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2165 } 2166 } 2167 2168 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2169 { 2170 int err; 2171 u32 val; 2172 2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2174 return 0; 2175 2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2177 /* Cannot do read-modify-write on 5401 */ 2178 err = tg3_phy_auxctl_write(tp, 2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2181 0x4c20); 2182 goto done; 2183 } 2184 2185 err = tg3_phy_auxctl_read(tp, 2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2187 if (err) 2188 return err; 2189 2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2191 err = tg3_phy_auxctl_write(tp, 2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2193 2194 done: 2195 return err; 2196 } 2197 2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2199 { 2200 u32 phytest; 2201 2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2203 u32 phy; 2204 2205 tg3_writephy(tp, MII_TG3_FET_TEST, 2206 phytest | MII_TG3_FET_SHADOW_EN); 2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2208 if (enable) 2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2210 else 2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2213 } 2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2215 } 2216 } 2217 2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2219 { 2220 u32 reg; 2221 2222 if (!tg3_flag(tp, 5705_PLUS) || 2223 (tg3_flag(tp, 5717_PLUS) && 2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2225 return; 2226 2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2228 tg3_phy_fet_toggle_apd(tp, enable); 2229 return; 2230 } 2231 2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2234 MII_TG3_MISC_SHDW_SCR5_SDTL | 2235 MII_TG3_MISC_SHDW_SCR5_C125OE; 2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2238 2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2240 2241 2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2243 if (enable) 2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2245 2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2247 } 2248 2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2250 { 2251 u32 phy; 2252 2253 if (!tg3_flag(tp, 5705_PLUS) || 2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2255 return; 2256 2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2258 u32 ephy; 2259 2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2262 2263 tg3_writephy(tp, MII_TG3_FET_TEST, 2264 ephy | MII_TG3_FET_SHADOW_EN); 2265 if (!tg3_readphy(tp, reg, &phy)) { 2266 if (enable) 2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2268 else 2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2270 tg3_writephy(tp, reg, phy); 2271 } 2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2273 } 2274 } else { 2275 int ret; 2276 2277 ret = tg3_phy_auxctl_read(tp, 2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2279 if (!ret) { 2280 if (enable) 2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2282 else 2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2284 tg3_phy_auxctl_write(tp, 2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2286 } 2287 } 2288 } 2289 2290 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2291 { 2292 int ret; 2293 u32 val; 2294 2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2296 return; 2297 2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2299 if (!ret) 2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2302 } 2303 2304 static void tg3_phy_apply_otp(struct tg3 *tp) 2305 { 2306 u32 otp, phy; 2307 2308 if (!tp->phy_otp) 2309 return; 2310 2311 otp = tp->phy_otp; 2312 2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2314 return; 2315 2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2319 2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2323 2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2327 2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2330 2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2333 2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2337 2338 tg3_phy_toggle_auxctl_smdsp(tp, false); 2339 } 2340 2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2342 { 2343 u32 val; 2344 struct ethtool_eee *dest = &tp->eee; 2345 2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2347 return; 2348 2349 if (eee) 2350 dest = eee; 2351 2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2353 return; 2354 2355 /* Pull eee_active */ 2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2358 dest->eee_active = 1; 2359 } else 2360 dest->eee_active = 0; 2361 2362 /* Pull lp advertised settings */ 2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2364 return; 2365 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2366 2367 /* Pull advertised and eee_enabled settings */ 2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2369 return; 2370 dest->eee_enabled = !!val; 2371 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2372 2373 /* Pull tx_lpi_enabled */ 2374 val = tr32(TG3_CPMU_EEE_MODE); 2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2376 2377 /* Pull lpi timer value */ 2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2379 } 2380 2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2382 { 2383 u32 val; 2384 2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2386 return; 2387 2388 tp->setlpicnt = 0; 2389 2390 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2391 current_link_up && 2392 tp->link_config.active_duplex == DUPLEX_FULL && 2393 (tp->link_config.active_speed == SPEED_100 || 2394 tp->link_config.active_speed == SPEED_1000)) { 2395 u32 eeectl; 2396 2397 if (tp->link_config.active_speed == SPEED_1000) 2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2399 else 2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2401 2402 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2403 2404 tg3_eee_pull_config(tp, NULL); 2405 if (tp->eee.eee_active) 2406 tp->setlpicnt = 2; 2407 } 2408 2409 if (!tp->setlpicnt) { 2410 if (current_link_up && 2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2413 tg3_phy_toggle_auxctl_smdsp(tp, false); 2414 } 2415 2416 val = tr32(TG3_CPMU_EEE_MODE); 2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2418 } 2419 } 2420 2421 static void tg3_phy_eee_enable(struct tg3 *tp) 2422 { 2423 u32 val; 2424 2425 if (tp->link_config.active_speed == SPEED_1000 && 2426 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2427 tg3_asic_rev(tp) == ASIC_REV_5719 || 2428 tg3_flag(tp, 57765_CLASS)) && 2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2430 val = MII_TG3_DSP_TAP26_ALNOKO | 2431 MII_TG3_DSP_TAP26_RMRXSTO; 2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2433 tg3_phy_toggle_auxctl_smdsp(tp, false); 2434 } 2435 2436 val = tr32(TG3_CPMU_EEE_MODE); 2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2438 } 2439 2440 static int tg3_wait_macro_done(struct tg3 *tp) 2441 { 2442 int limit = 100; 2443 2444 while (limit--) { 2445 u32 tmp32; 2446 2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2448 if ((tmp32 & 0x1000) == 0) 2449 break; 2450 } 2451 } 2452 if (limit < 0) 2453 return -EBUSY; 2454 2455 return 0; 2456 } 2457 2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2459 { 2460 static const u32 test_pat[4][6] = { 2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2465 }; 2466 int chan; 2467 2468 for (chan = 0; chan < 4; chan++) { 2469 int i; 2470 2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2472 (chan * 0x2000) | 0x0200); 2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2474 2475 for (i = 0; i < 6; i++) 2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2477 test_pat[chan][i]); 2478 2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2480 if (tg3_wait_macro_done(tp)) { 2481 *resetp = 1; 2482 return -EBUSY; 2483 } 2484 2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2486 (chan * 0x2000) | 0x0200); 2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2488 if (tg3_wait_macro_done(tp)) { 2489 *resetp = 1; 2490 return -EBUSY; 2491 } 2492 2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2494 if (tg3_wait_macro_done(tp)) { 2495 *resetp = 1; 2496 return -EBUSY; 2497 } 2498 2499 for (i = 0; i < 6; i += 2) { 2500 u32 low, high; 2501 2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2504 tg3_wait_macro_done(tp)) { 2505 *resetp = 1; 2506 return -EBUSY; 2507 } 2508 low &= 0x7fff; 2509 high &= 0x000f; 2510 if (low != test_pat[chan][i] || 2511 high != test_pat[chan][i+1]) { 2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2515 2516 return -EBUSY; 2517 } 2518 } 2519 } 2520 2521 return 0; 2522 } 2523 2524 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2525 { 2526 int chan; 2527 2528 for (chan = 0; chan < 4; chan++) { 2529 int i; 2530 2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2532 (chan * 0x2000) | 0x0200); 2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2534 for (i = 0; i < 6; i++) 2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2537 if (tg3_wait_macro_done(tp)) 2538 return -EBUSY; 2539 } 2540 2541 return 0; 2542 } 2543 2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2545 { 2546 u32 reg32, phy9_orig; 2547 int retries, do_phy_reset, err; 2548 2549 retries = 10; 2550 do_phy_reset = 1; 2551 do { 2552 if (do_phy_reset) { 2553 err = tg3_bmcr_reset(tp); 2554 if (err) 2555 return err; 2556 do_phy_reset = 0; 2557 } 2558 2559 /* Disable transmitter and interrupt. */ 2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2561 continue; 2562 2563 reg32 |= 0x3000; 2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2565 2566 /* Set full-duplex, 1000 mbps. */ 2567 tg3_writephy(tp, MII_BMCR, 2568 BMCR_FULLDPLX | BMCR_SPEED1000); 2569 2570 /* Set to master mode. */ 2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2572 continue; 2573 2574 tg3_writephy(tp, MII_CTRL1000, 2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2576 2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2578 if (err) 2579 return err; 2580 2581 /* Block the PHY control access. */ 2582 tg3_phydsp_write(tp, 0x8005, 0x0800); 2583 2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2585 if (!err) 2586 break; 2587 } while (--retries); 2588 2589 err = tg3_phy_reset_chanpat(tp); 2590 if (err) 2591 return err; 2592 2593 tg3_phydsp_write(tp, 0x8005, 0x0000); 2594 2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2597 2598 tg3_phy_toggle_auxctl_smdsp(tp, false); 2599 2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2601 2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2603 if (err) 2604 return err; 2605 2606 reg32 &= ~0x3000; 2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2608 2609 return 0; 2610 } 2611 2612 static void tg3_carrier_off(struct tg3 *tp) 2613 { 2614 netif_carrier_off(tp->dev); 2615 tp->link_up = false; 2616 } 2617 2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2619 { 2620 if (tg3_flag(tp, ENABLE_ASF)) 2621 netdev_warn(tp->dev, 2622 "Management side-band traffic will be interrupted during phy settings change\n"); 2623 } 2624 2625 /* This will reset the tigon3 PHY if there is no valid 2626 * link unless the FORCE argument is non-zero. 2627 */ 2628 static int tg3_phy_reset(struct tg3 *tp) 2629 { 2630 u32 val, cpmuctrl; 2631 int err; 2632 2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2634 val = tr32(GRC_MISC_CFG); 2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2636 udelay(40); 2637 } 2638 err = tg3_readphy(tp, MII_BMSR, &val); 2639 err |= tg3_readphy(tp, MII_BMSR, &val); 2640 if (err != 0) 2641 return -EBUSY; 2642 2643 if (netif_running(tp->dev) && tp->link_up) { 2644 netif_carrier_off(tp->dev); 2645 tg3_link_report(tp); 2646 } 2647 2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2649 tg3_asic_rev(tp) == ASIC_REV_5704 || 2650 tg3_asic_rev(tp) == ASIC_REV_5705) { 2651 err = tg3_phy_reset_5703_4_5(tp); 2652 if (err) 2653 return err; 2654 goto out; 2655 } 2656 2657 cpmuctrl = 0; 2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2660 cpmuctrl = tr32(TG3_CPMU_CTRL); 2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2662 tw32(TG3_CPMU_CTRL, 2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2664 } 2665 2666 err = tg3_bmcr_reset(tp); 2667 if (err) 2668 return err; 2669 2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2673 2674 tw32(TG3_CPMU_CTRL, cpmuctrl); 2675 } 2676 2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2681 CPMU_LSPD_1000MB_MACCLK_12_5) { 2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2683 udelay(40); 2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2685 } 2686 } 2687 2688 if (tg3_flag(tp, 5717_PLUS) && 2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2690 return 0; 2691 2692 tg3_phy_apply_otp(tp); 2693 2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2695 tg3_phy_toggle_apd(tp, true); 2696 else 2697 tg3_phy_toggle_apd(tp, false); 2698 2699 out: 2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2703 tg3_phydsp_write(tp, 0x000a, 0x0323); 2704 tg3_phy_toggle_auxctl_smdsp(tp, false); 2705 } 2706 2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2710 } 2711 2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2714 tg3_phydsp_write(tp, 0x000a, 0x310b); 2715 tg3_phydsp_write(tp, 0x201f, 0x9506); 2716 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2717 tg3_phy_toggle_auxctl_smdsp(tp, false); 2718 } 2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2724 tg3_writephy(tp, MII_TG3_TEST1, 2725 MII_TG3_TEST1_TRIM_EN | 0x4); 2726 } else 2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2728 2729 tg3_phy_toggle_auxctl_smdsp(tp, false); 2730 } 2731 } 2732 2733 /* Set Extended packet length bit (bit 14) on all chips that */ 2734 /* support jumbo frames */ 2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2736 /* Cannot do read-modify-write on 5401 */ 2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2739 /* Set bit 14 with read-modify-write to preserve other bits */ 2740 err = tg3_phy_auxctl_read(tp, 2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2742 if (!err) 2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2745 } 2746 2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2748 * jumbo frames transmission. 2749 */ 2750 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2752 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2754 } 2755 2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2757 /* adjust output voltage */ 2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2759 } 2760 2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2762 tg3_phydsp_write(tp, 0xffb, 0x4000); 2763 2764 tg3_phy_toggle_automdix(tp, true); 2765 tg3_phy_set_wirespeed(tp); 2766 return 0; 2767 } 2768 2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2772 TG3_GPIO_MSG_NEED_VAUX) 2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2777 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2778 2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2783 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2784 2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2786 { 2787 u32 status, shift; 2788 2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2790 tg3_asic_rev(tp) == ASIC_REV_5719) 2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2792 else 2793 status = tr32(TG3_CPMU_DRV_STATUS); 2794 2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2796 status &= ~(TG3_GPIO_MSG_MASK << shift); 2797 status |= (newstat << shift); 2798 2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2800 tg3_asic_rev(tp) == ASIC_REV_5719) 2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2802 else 2803 tw32(TG3_CPMU_DRV_STATUS, status); 2804 2805 return status >> TG3_APE_GPIO_MSG_SHIFT; 2806 } 2807 2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2809 { 2810 if (!tg3_flag(tp, IS_NIC)) 2811 return 0; 2812 2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2814 tg3_asic_rev(tp) == ASIC_REV_5719 || 2815 tg3_asic_rev(tp) == ASIC_REV_5720) { 2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2817 return -EIO; 2818 2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2820 2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2822 TG3_GRC_LCLCTL_PWRSW_DELAY); 2823 2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2825 } else { 2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2827 TG3_GRC_LCLCTL_PWRSW_DELAY); 2828 } 2829 2830 return 0; 2831 } 2832 2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2834 { 2835 u32 grc_local_ctrl; 2836 2837 if (!tg3_flag(tp, IS_NIC) || 2838 tg3_asic_rev(tp) == ASIC_REV_5700 || 2839 tg3_asic_rev(tp) == ASIC_REV_5701) 2840 return; 2841 2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2843 2844 tw32_wait_f(GRC_LOCAL_CTRL, 2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2846 TG3_GRC_LCLCTL_PWRSW_DELAY); 2847 2848 tw32_wait_f(GRC_LOCAL_CTRL, 2849 grc_local_ctrl, 2850 TG3_GRC_LCLCTL_PWRSW_DELAY); 2851 2852 tw32_wait_f(GRC_LOCAL_CTRL, 2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2854 TG3_GRC_LCLCTL_PWRSW_DELAY); 2855 } 2856 2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2858 { 2859 if (!tg3_flag(tp, IS_NIC)) 2860 return; 2861 2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2863 tg3_asic_rev(tp) == ASIC_REV_5701) { 2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2865 (GRC_LCLCTRL_GPIO_OE0 | 2866 GRC_LCLCTRL_GPIO_OE1 | 2867 GRC_LCLCTRL_GPIO_OE2 | 2868 GRC_LCLCTRL_GPIO_OUTPUT0 | 2869 GRC_LCLCTRL_GPIO_OUTPUT1), 2870 TG3_GRC_LCLCTL_PWRSW_DELAY); 2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2875 GRC_LCLCTRL_GPIO_OE1 | 2876 GRC_LCLCTRL_GPIO_OE2 | 2877 GRC_LCLCTRL_GPIO_OUTPUT0 | 2878 GRC_LCLCTRL_GPIO_OUTPUT1 | 2879 tp->grc_local_ctrl; 2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2881 TG3_GRC_LCLCTL_PWRSW_DELAY); 2882 2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2885 TG3_GRC_LCLCTL_PWRSW_DELAY); 2886 2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2889 TG3_GRC_LCLCTL_PWRSW_DELAY); 2890 } else { 2891 u32 no_gpio2; 2892 u32 grc_local_ctrl = 0; 2893 2894 /* Workaround to prevent overdrawing Amps. */ 2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2898 grc_local_ctrl, 2899 TG3_GRC_LCLCTL_PWRSW_DELAY); 2900 } 2901 2902 /* On 5753 and variants, GPIO2 cannot be used. */ 2903 no_gpio2 = tp->nic_sram_data_cfg & 2904 NIC_SRAM_DATA_CFG_NO_GPIO2; 2905 2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2907 GRC_LCLCTRL_GPIO_OE1 | 2908 GRC_LCLCTRL_GPIO_OE2 | 2909 GRC_LCLCTRL_GPIO_OUTPUT1 | 2910 GRC_LCLCTRL_GPIO_OUTPUT2; 2911 if (no_gpio2) { 2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2913 GRC_LCLCTRL_GPIO_OUTPUT2); 2914 } 2915 tw32_wait_f(GRC_LOCAL_CTRL, 2916 tp->grc_local_ctrl | grc_local_ctrl, 2917 TG3_GRC_LCLCTL_PWRSW_DELAY); 2918 2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2920 2921 tw32_wait_f(GRC_LOCAL_CTRL, 2922 tp->grc_local_ctrl | grc_local_ctrl, 2923 TG3_GRC_LCLCTL_PWRSW_DELAY); 2924 2925 if (!no_gpio2) { 2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2927 tw32_wait_f(GRC_LOCAL_CTRL, 2928 tp->grc_local_ctrl | grc_local_ctrl, 2929 TG3_GRC_LCLCTL_PWRSW_DELAY); 2930 } 2931 } 2932 } 2933 2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2935 { 2936 u32 msg = 0; 2937 2938 /* Serialize power state transitions */ 2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2940 return; 2941 2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2943 msg = TG3_GPIO_MSG_NEED_VAUX; 2944 2945 msg = tg3_set_function_status(tp, msg); 2946 2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2948 goto done; 2949 2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2951 tg3_pwrsrc_switch_to_vaux(tp); 2952 else 2953 tg3_pwrsrc_die_with_vmain(tp); 2954 2955 done: 2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2957 } 2958 2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2960 { 2961 bool need_vaux = false; 2962 2963 /* The GPIOs do something completely different on 57765. */ 2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2965 return; 2966 2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2968 tg3_asic_rev(tp) == ASIC_REV_5719 || 2969 tg3_asic_rev(tp) == ASIC_REV_5720) { 2970 tg3_frob_aux_power_5717(tp, include_wol ? 2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2972 return; 2973 } 2974 2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2976 struct net_device *dev_peer; 2977 2978 dev_peer = pci_get_drvdata(tp->pdev_peer); 2979 2980 /* remove_one() may have been run on the peer. */ 2981 if (dev_peer) { 2982 struct tg3 *tp_peer = netdev_priv(dev_peer); 2983 2984 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2985 return; 2986 2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2988 tg3_flag(tp_peer, ENABLE_ASF)) 2989 need_vaux = true; 2990 } 2991 } 2992 2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 2994 tg3_flag(tp, ENABLE_ASF)) 2995 need_vaux = true; 2996 2997 if (need_vaux) 2998 tg3_pwrsrc_switch_to_vaux(tp); 2999 else 3000 tg3_pwrsrc_die_with_vmain(tp); 3001 } 3002 3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3004 { 3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3006 return 1; 3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3008 if (speed != SPEED_10) 3009 return 1; 3010 } else if (speed == SPEED_10) 3011 return 1; 3012 3013 return 0; 3014 } 3015 3016 static bool tg3_phy_power_bug(struct tg3 *tp) 3017 { 3018 switch (tg3_asic_rev(tp)) { 3019 case ASIC_REV_5700: 3020 case ASIC_REV_5704: 3021 return true; 3022 case ASIC_REV_5780: 3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3024 return true; 3025 return false; 3026 case ASIC_REV_5717: 3027 if (!tp->pci_fn) 3028 return true; 3029 return false; 3030 case ASIC_REV_5719: 3031 case ASIC_REV_5720: 3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3033 !tp->pci_fn) 3034 return true; 3035 return false; 3036 } 3037 3038 return false; 3039 } 3040 3041 static bool tg3_phy_led_bug(struct tg3 *tp) 3042 { 3043 switch (tg3_asic_rev(tp)) { 3044 case ASIC_REV_5719: 3045 case ASIC_REV_5720: 3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3047 !tp->pci_fn) 3048 return true; 3049 return false; 3050 } 3051 3052 return false; 3053 } 3054 3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3056 { 3057 u32 val; 3058 3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3060 return; 3061 3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3066 3067 sg_dig_ctrl |= 3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3069 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3071 } 3072 return; 3073 } 3074 3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3076 tg3_bmcr_reset(tp); 3077 val = tr32(GRC_MISC_CFG); 3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3079 udelay(40); 3080 return; 3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3082 u32 phytest; 3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3084 u32 phy; 3085 3086 tg3_writephy(tp, MII_ADVERTISE, 0); 3087 tg3_writephy(tp, MII_BMCR, 3088 BMCR_ANENABLE | BMCR_ANRESTART); 3089 3090 tg3_writephy(tp, MII_TG3_FET_TEST, 3091 phytest | MII_TG3_FET_SHADOW_EN); 3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3094 tg3_writephy(tp, 3095 MII_TG3_FET_SHDW_AUXMODE4, 3096 phy); 3097 } 3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3099 } 3100 return; 3101 } else if (do_low_power) { 3102 if (!tg3_phy_led_bug(tp)) 3103 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3105 3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3108 MII_TG3_AUXCTL_PCTL_VREG_11V; 3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3110 } 3111 3112 /* The PHY should not be powered down on some chips because 3113 * of bugs. 3114 */ 3115 if (tg3_phy_power_bug(tp)) 3116 return; 3117 3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3124 } 3125 3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3127 } 3128 3129 /* tp->lock is held. */ 3130 static int tg3_nvram_lock(struct tg3 *tp) 3131 { 3132 if (tg3_flag(tp, NVRAM)) { 3133 int i; 3134 3135 if (tp->nvram_lock_cnt == 0) { 3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3137 for (i = 0; i < 8000; i++) { 3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3139 break; 3140 udelay(20); 3141 } 3142 if (i == 8000) { 3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3144 return -ENODEV; 3145 } 3146 } 3147 tp->nvram_lock_cnt++; 3148 } 3149 return 0; 3150 } 3151 3152 /* tp->lock is held. */ 3153 static void tg3_nvram_unlock(struct tg3 *tp) 3154 { 3155 if (tg3_flag(tp, NVRAM)) { 3156 if (tp->nvram_lock_cnt > 0) 3157 tp->nvram_lock_cnt--; 3158 if (tp->nvram_lock_cnt == 0) 3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3160 } 3161 } 3162 3163 /* tp->lock is held. */ 3164 static void tg3_enable_nvram_access(struct tg3 *tp) 3165 { 3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3167 u32 nvaccess = tr32(NVRAM_ACCESS); 3168 3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3170 } 3171 } 3172 3173 /* tp->lock is held. */ 3174 static void tg3_disable_nvram_access(struct tg3 *tp) 3175 { 3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3177 u32 nvaccess = tr32(NVRAM_ACCESS); 3178 3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3180 } 3181 } 3182 3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3184 u32 offset, u32 *val) 3185 { 3186 u32 tmp; 3187 int i; 3188 3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3190 return -EINVAL; 3191 3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3193 EEPROM_ADDR_DEVID_MASK | 3194 EEPROM_ADDR_READ); 3195 tw32(GRC_EEPROM_ADDR, 3196 tmp | 3197 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3199 EEPROM_ADDR_ADDR_MASK) | 3200 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3201 3202 for (i = 0; i < 1000; i++) { 3203 tmp = tr32(GRC_EEPROM_ADDR); 3204 3205 if (tmp & EEPROM_ADDR_COMPLETE) 3206 break; 3207 msleep(1); 3208 } 3209 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3210 return -EBUSY; 3211 3212 tmp = tr32(GRC_EEPROM_DATA); 3213 3214 /* 3215 * The data will always be opposite the native endian 3216 * format. Perform a blind byteswap to compensate. 3217 */ 3218 *val = swab32(tmp); 3219 3220 return 0; 3221 } 3222 3223 #define NVRAM_CMD_TIMEOUT 10000 3224 3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3226 { 3227 int i; 3228 3229 tw32(NVRAM_CMD, nvram_cmd); 3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3231 usleep_range(10, 40); 3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3233 udelay(10); 3234 break; 3235 } 3236 } 3237 3238 if (i == NVRAM_CMD_TIMEOUT) 3239 return -EBUSY; 3240 3241 return 0; 3242 } 3243 3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3245 { 3246 if (tg3_flag(tp, NVRAM) && 3247 tg3_flag(tp, NVRAM_BUFFERED) && 3248 tg3_flag(tp, FLASH) && 3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3250 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3251 3252 addr = ((addr / tp->nvram_pagesize) << 3253 ATMEL_AT45DB0X1B_PAGE_POS) + 3254 (addr % tp->nvram_pagesize); 3255 3256 return addr; 3257 } 3258 3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3260 { 3261 if (tg3_flag(tp, NVRAM) && 3262 tg3_flag(tp, NVRAM_BUFFERED) && 3263 tg3_flag(tp, FLASH) && 3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3265 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3266 3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3268 tp->nvram_pagesize) + 3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3270 3271 return addr; 3272 } 3273 3274 /* NOTE: Data read in from NVRAM is byteswapped according to 3275 * the byteswapping settings for all other register accesses. 3276 * tg3 devices are BE devices, so on a BE machine, the data 3277 * returned will be exactly as it is seen in NVRAM. On a LE 3278 * machine, the 32-bit value will be byteswapped. 3279 */ 3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3281 { 3282 int ret; 3283 3284 if (!tg3_flag(tp, NVRAM)) 3285 return tg3_nvram_read_using_eeprom(tp, offset, val); 3286 3287 offset = tg3_nvram_phys_addr(tp, offset); 3288 3289 if (offset > NVRAM_ADDR_MSK) 3290 return -EINVAL; 3291 3292 ret = tg3_nvram_lock(tp); 3293 if (ret) 3294 return ret; 3295 3296 tg3_enable_nvram_access(tp); 3297 3298 tw32(NVRAM_ADDR, offset); 3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3301 3302 if (ret == 0) 3303 *val = tr32(NVRAM_RDDATA); 3304 3305 tg3_disable_nvram_access(tp); 3306 3307 tg3_nvram_unlock(tp); 3308 3309 return ret; 3310 } 3311 3312 /* Ensures NVRAM data is in bytestream format. */ 3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3314 { 3315 u32 v; 3316 int res = tg3_nvram_read(tp, offset, &v); 3317 if (!res) 3318 *val = cpu_to_be32(v); 3319 return res; 3320 } 3321 3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3323 u32 offset, u32 len, u8 *buf) 3324 { 3325 int i, j, rc = 0; 3326 u32 val; 3327 3328 for (i = 0; i < len; i += 4) { 3329 u32 addr; 3330 __be32 data; 3331 3332 addr = offset + i; 3333 3334 memcpy(&data, buf + i, 4); 3335 3336 /* 3337 * The SEEPROM interface expects the data to always be opposite 3338 * the native endian format. We accomplish this by reversing 3339 * all the operations that would have been performed on the 3340 * data from a call to tg3_nvram_read_be32(). 3341 */ 3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3343 3344 val = tr32(GRC_EEPROM_ADDR); 3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3346 3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3348 EEPROM_ADDR_READ); 3349 tw32(GRC_EEPROM_ADDR, val | 3350 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3351 (addr & EEPROM_ADDR_ADDR_MASK) | 3352 EEPROM_ADDR_START | 3353 EEPROM_ADDR_WRITE); 3354 3355 for (j = 0; j < 1000; j++) { 3356 val = tr32(GRC_EEPROM_ADDR); 3357 3358 if (val & EEPROM_ADDR_COMPLETE) 3359 break; 3360 msleep(1); 3361 } 3362 if (!(val & EEPROM_ADDR_COMPLETE)) { 3363 rc = -EBUSY; 3364 break; 3365 } 3366 } 3367 3368 return rc; 3369 } 3370 3371 /* offset and length are dword aligned */ 3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3373 u8 *buf) 3374 { 3375 int ret = 0; 3376 u32 pagesize = tp->nvram_pagesize; 3377 u32 pagemask = pagesize - 1; 3378 u32 nvram_cmd; 3379 u8 *tmp; 3380 3381 tmp = kmalloc(pagesize, GFP_KERNEL); 3382 if (tmp == NULL) 3383 return -ENOMEM; 3384 3385 while (len) { 3386 int j; 3387 u32 phy_addr, page_off, size; 3388 3389 phy_addr = offset & ~pagemask; 3390 3391 for (j = 0; j < pagesize; j += 4) { 3392 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3393 (__be32 *) (tmp + j)); 3394 if (ret) 3395 break; 3396 } 3397 if (ret) 3398 break; 3399 3400 page_off = offset & pagemask; 3401 size = pagesize; 3402 if (len < size) 3403 size = len; 3404 3405 len -= size; 3406 3407 memcpy(tmp + page_off, buf, size); 3408 3409 offset = offset + (pagesize - page_off); 3410 3411 tg3_enable_nvram_access(tp); 3412 3413 /* 3414 * Before we can erase the flash page, we need 3415 * to issue a special "write enable" command. 3416 */ 3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3418 3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3420 break; 3421 3422 /* Erase the target page */ 3423 tw32(NVRAM_ADDR, phy_addr); 3424 3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3427 3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3429 break; 3430 3431 /* Issue another write enable to start the write. */ 3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3433 3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3435 break; 3436 3437 for (j = 0; j < pagesize; j += 4) { 3438 __be32 data; 3439 3440 data = *((__be32 *) (tmp + j)); 3441 3442 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3443 3444 tw32(NVRAM_ADDR, phy_addr + j); 3445 3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3447 NVRAM_CMD_WR; 3448 3449 if (j == 0) 3450 nvram_cmd |= NVRAM_CMD_FIRST; 3451 else if (j == (pagesize - 4)) 3452 nvram_cmd |= NVRAM_CMD_LAST; 3453 3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3455 if (ret) 3456 break; 3457 } 3458 if (ret) 3459 break; 3460 } 3461 3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3463 tg3_nvram_exec_cmd(tp, nvram_cmd); 3464 3465 kfree(tmp); 3466 3467 return ret; 3468 } 3469 3470 /* offset and length are dword aligned */ 3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3472 u8 *buf) 3473 { 3474 int i, ret = 0; 3475 3476 for (i = 0; i < len; i += 4, offset += 4) { 3477 u32 page_off, phy_addr, nvram_cmd; 3478 __be32 data; 3479 3480 memcpy(&data, buf + i, 4); 3481 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3482 3483 page_off = offset % tp->nvram_pagesize; 3484 3485 phy_addr = tg3_nvram_phys_addr(tp, offset); 3486 3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3488 3489 if (page_off == 0 || i == 0) 3490 nvram_cmd |= NVRAM_CMD_FIRST; 3491 if (page_off == (tp->nvram_pagesize - 4)) 3492 nvram_cmd |= NVRAM_CMD_LAST; 3493 3494 if (i == (len - 4)) 3495 nvram_cmd |= NVRAM_CMD_LAST; 3496 3497 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3498 !tg3_flag(tp, FLASH) || 3499 !tg3_flag(tp, 57765_PLUS)) 3500 tw32(NVRAM_ADDR, phy_addr); 3501 3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3503 !tg3_flag(tp, 5755_PLUS) && 3504 (tp->nvram_jedecnum == JEDEC_ST) && 3505 (nvram_cmd & NVRAM_CMD_FIRST)) { 3506 u32 cmd; 3507 3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3509 ret = tg3_nvram_exec_cmd(tp, cmd); 3510 if (ret) 3511 break; 3512 } 3513 if (!tg3_flag(tp, FLASH)) { 3514 /* We always do complete word writes to eeprom. */ 3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3516 } 3517 3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3519 if (ret) 3520 break; 3521 } 3522 return ret; 3523 } 3524 3525 /* offset and length are dword aligned */ 3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3527 { 3528 int ret; 3529 3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3532 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3533 udelay(40); 3534 } 3535 3536 if (!tg3_flag(tp, NVRAM)) { 3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3538 } else { 3539 u32 grc_mode; 3540 3541 ret = tg3_nvram_lock(tp); 3542 if (ret) 3543 return ret; 3544 3545 tg3_enable_nvram_access(tp); 3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3547 tw32(NVRAM_WRITE1, 0x406); 3548 3549 grc_mode = tr32(GRC_MODE); 3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3551 3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3553 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3554 buf); 3555 } else { 3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3557 buf); 3558 } 3559 3560 grc_mode = tr32(GRC_MODE); 3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3562 3563 tg3_disable_nvram_access(tp); 3564 tg3_nvram_unlock(tp); 3565 } 3566 3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3569 udelay(40); 3570 } 3571 3572 return ret; 3573 } 3574 3575 #define RX_CPU_SCRATCH_BASE 0x30000 3576 #define RX_CPU_SCRATCH_SIZE 0x04000 3577 #define TX_CPU_SCRATCH_BASE 0x34000 3578 #define TX_CPU_SCRATCH_SIZE 0x04000 3579 3580 /* tp->lock is held. */ 3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3582 { 3583 int i; 3584 const int iters = 10000; 3585 3586 for (i = 0; i < iters; i++) { 3587 tw32(cpu_base + CPU_STATE, 0xffffffff); 3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3590 break; 3591 if (pci_channel_offline(tp->pdev)) 3592 return -EBUSY; 3593 } 3594 3595 return (i == iters) ? -EBUSY : 0; 3596 } 3597 3598 /* tp->lock is held. */ 3599 static int tg3_rxcpu_pause(struct tg3 *tp) 3600 { 3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3602 3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3605 udelay(10); 3606 3607 return rc; 3608 } 3609 3610 /* tp->lock is held. */ 3611 static int tg3_txcpu_pause(struct tg3 *tp) 3612 { 3613 return tg3_pause_cpu(tp, TX_CPU_BASE); 3614 } 3615 3616 /* tp->lock is held. */ 3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3618 { 3619 tw32(cpu_base + CPU_STATE, 0xffffffff); 3620 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3621 } 3622 3623 /* tp->lock is held. */ 3624 static void tg3_rxcpu_resume(struct tg3 *tp) 3625 { 3626 tg3_resume_cpu(tp, RX_CPU_BASE); 3627 } 3628 3629 /* tp->lock is held. */ 3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3631 { 3632 int rc; 3633 3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3635 3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3637 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3638 3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3640 return 0; 3641 } 3642 if (cpu_base == RX_CPU_BASE) { 3643 rc = tg3_rxcpu_pause(tp); 3644 } else { 3645 /* 3646 * There is only an Rx CPU for the 5750 derivative in the 3647 * BCM4785. 3648 */ 3649 if (tg3_flag(tp, IS_SSB_CORE)) 3650 return 0; 3651 3652 rc = tg3_txcpu_pause(tp); 3653 } 3654 3655 if (rc) { 3656 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3658 return -ENODEV; 3659 } 3660 3661 /* Clear firmware's nvram arbitration. */ 3662 if (tg3_flag(tp, NVRAM)) 3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3664 return 0; 3665 } 3666 3667 static int tg3_fw_data_len(struct tg3 *tp, 3668 const struct tg3_firmware_hdr *fw_hdr) 3669 { 3670 int fw_len; 3671 3672 /* Non fragmented firmware have one firmware header followed by a 3673 * contiguous chunk of data to be written. The length field in that 3674 * header is not the length of data to be written but the complete 3675 * length of the bss. The data length is determined based on 3676 * tp->fw->size minus headers. 3677 * 3678 * Fragmented firmware have a main header followed by multiple 3679 * fragments. Each fragment is identical to non fragmented firmware 3680 * with a firmware header followed by a contiguous chunk of data. In 3681 * the main header, the length field is unused and set to 0xffffffff. 3682 * In each fragment header the length is the entire size of that 3683 * fragment i.e. fragment data + header length. Data length is 3684 * therefore length field in the header minus TG3_FW_HDR_LEN. 3685 */ 3686 if (tp->fw_len == 0xffffffff) 3687 fw_len = be32_to_cpu(fw_hdr->len); 3688 else 3689 fw_len = tp->fw->size; 3690 3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3692 } 3693 3694 /* tp->lock is held. */ 3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3696 u32 cpu_scratch_base, int cpu_scratch_size, 3697 const struct tg3_firmware_hdr *fw_hdr) 3698 { 3699 int err, i; 3700 void (*write_op)(struct tg3 *, u32, u32); 3701 int total_len = tp->fw->size; 3702 3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3704 netdev_err(tp->dev, 3705 "%s: Trying to load TX cpu firmware which is 5705\n", 3706 __func__); 3707 return -EINVAL; 3708 } 3709 3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3711 write_op = tg3_write_mem; 3712 else 3713 write_op = tg3_write_indirect_reg32; 3714 3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3716 /* It is possible that bootcode is still loading at this point. 3717 * Get the nvram lock first before halting the cpu. 3718 */ 3719 int lock_err = tg3_nvram_lock(tp); 3720 err = tg3_halt_cpu(tp, cpu_base); 3721 if (!lock_err) 3722 tg3_nvram_unlock(tp); 3723 if (err) 3724 goto out; 3725 3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3727 write_op(tp, cpu_scratch_base + i, 0); 3728 tw32(cpu_base + CPU_STATE, 0xffffffff); 3729 tw32(cpu_base + CPU_MODE, 3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3731 } else { 3732 /* Subtract additional main header for fragmented firmware and 3733 * advance to the first fragment 3734 */ 3735 total_len -= TG3_FW_HDR_LEN; 3736 fw_hdr++; 3737 } 3738 3739 do { 3740 u32 *fw_data = (u32 *)(fw_hdr + 1); 3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3742 write_op(tp, cpu_scratch_base + 3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3744 (i * sizeof(u32)), 3745 be32_to_cpu(fw_data[i])); 3746 3747 total_len -= be32_to_cpu(fw_hdr->len); 3748 3749 /* Advance to next fragment */ 3750 fw_hdr = (struct tg3_firmware_hdr *) 3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3752 } while (total_len > 0); 3753 3754 err = 0; 3755 3756 out: 3757 return err; 3758 } 3759 3760 /* tp->lock is held. */ 3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3762 { 3763 int i; 3764 const int iters = 5; 3765 3766 tw32(cpu_base + CPU_STATE, 0xffffffff); 3767 tw32_f(cpu_base + CPU_PC, pc); 3768 3769 for (i = 0; i < iters; i++) { 3770 if (tr32(cpu_base + CPU_PC) == pc) 3771 break; 3772 tw32(cpu_base + CPU_STATE, 0xffffffff); 3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3774 tw32_f(cpu_base + CPU_PC, pc); 3775 udelay(1000); 3776 } 3777 3778 return (i == iters) ? -EBUSY : 0; 3779 } 3780 3781 /* tp->lock is held. */ 3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3783 { 3784 const struct tg3_firmware_hdr *fw_hdr; 3785 int err; 3786 3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3788 3789 /* Firmware blob starts with version numbers, followed by 3790 start address and length. We are setting complete length. 3791 length = end_address_of_bss - start_address_of_text. 3792 Remainder is the blob to be loaded contiguously 3793 from start address. */ 3794 3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3797 fw_hdr); 3798 if (err) 3799 return err; 3800 3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3803 fw_hdr); 3804 if (err) 3805 return err; 3806 3807 /* Now startup only the RX cpu. */ 3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3809 be32_to_cpu(fw_hdr->base_addr)); 3810 if (err) { 3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3812 "should be %08x\n", __func__, 3813 tr32(RX_CPU_BASE + CPU_PC), 3814 be32_to_cpu(fw_hdr->base_addr)); 3815 return -ENODEV; 3816 } 3817 3818 tg3_rxcpu_resume(tp); 3819 3820 return 0; 3821 } 3822 3823 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3824 { 3825 const int iters = 1000; 3826 int i; 3827 u32 val; 3828 3829 /* Wait for boot code to complete initialization and enter service 3830 * loop. It is then safe to download service patches 3831 */ 3832 for (i = 0; i < iters; i++) { 3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3834 break; 3835 3836 udelay(10); 3837 } 3838 3839 if (i == iters) { 3840 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3841 return -EBUSY; 3842 } 3843 3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3845 if (val & 0xff) { 3846 netdev_warn(tp->dev, 3847 "Other patches exist. Not downloading EEE patch\n"); 3848 return -EEXIST; 3849 } 3850 3851 return 0; 3852 } 3853 3854 /* tp->lock is held. */ 3855 static void tg3_load_57766_firmware(struct tg3 *tp) 3856 { 3857 struct tg3_firmware_hdr *fw_hdr; 3858 3859 if (!tg3_flag(tp, NO_NVRAM)) 3860 return; 3861 3862 if (tg3_validate_rxcpu_state(tp)) 3863 return; 3864 3865 if (!tp->fw) 3866 return; 3867 3868 /* This firmware blob has a different format than older firmware 3869 * releases as given below. The main difference is we have fragmented 3870 * data to be written to non-contiguous locations. 3871 * 3872 * In the beginning we have a firmware header identical to other 3873 * firmware which consists of version, base addr and length. The length 3874 * here is unused and set to 0xffffffff. 3875 * 3876 * This is followed by a series of firmware fragments which are 3877 * individually identical to previous firmware. i.e. they have the 3878 * firmware header and followed by data for that fragment. The version 3879 * field of the individual fragment header is unused. 3880 */ 3881 3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3884 return; 3885 3886 if (tg3_rxcpu_pause(tp)) 3887 return; 3888 3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3891 3892 tg3_rxcpu_resume(tp); 3893 } 3894 3895 /* tp->lock is held. */ 3896 static int tg3_load_tso_firmware(struct tg3 *tp) 3897 { 3898 const struct tg3_firmware_hdr *fw_hdr; 3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3900 int err; 3901 3902 if (!tg3_flag(tp, FW_TSO)) 3903 return 0; 3904 3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3906 3907 /* Firmware blob starts with version numbers, followed by 3908 start address and length. We are setting complete length. 3909 length = end_address_of_bss - start_address_of_text. 3910 Remainder is the blob to be loaded contiguously 3911 from start address. */ 3912 3913 cpu_scratch_size = tp->fw_len; 3914 3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3916 cpu_base = RX_CPU_BASE; 3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3918 } else { 3919 cpu_base = TX_CPU_BASE; 3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3922 } 3923 3924 err = tg3_load_firmware_cpu(tp, cpu_base, 3925 cpu_scratch_base, cpu_scratch_size, 3926 fw_hdr); 3927 if (err) 3928 return err; 3929 3930 /* Now startup the cpu. */ 3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3932 be32_to_cpu(fw_hdr->base_addr)); 3933 if (err) { 3934 netdev_err(tp->dev, 3935 "%s fails to set CPU PC, is %08x should be %08x\n", 3936 __func__, tr32(cpu_base + CPU_PC), 3937 be32_to_cpu(fw_hdr->base_addr)); 3938 return -ENODEV; 3939 } 3940 3941 tg3_resume_cpu(tp, cpu_base); 3942 return 0; 3943 } 3944 3945 /* tp->lock is held. */ 3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr, 3947 int index) 3948 { 3949 u32 addr_high, addr_low; 3950 3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3953 (mac_addr[4] << 8) | mac_addr[5]); 3954 3955 if (index < 4) { 3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3958 } else { 3959 index -= 4; 3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3962 } 3963 } 3964 3965 /* tp->lock is held. */ 3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3967 { 3968 u32 addr_high; 3969 int i; 3970 3971 for (i = 0; i < 4; i++) { 3972 if (i == 1 && skip_mac_1) 3973 continue; 3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3975 } 3976 3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3978 tg3_asic_rev(tp) == ASIC_REV_5704) { 3979 for (i = 4; i < 16; i++) 3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3981 } 3982 3983 addr_high = (tp->dev->dev_addr[0] + 3984 tp->dev->dev_addr[1] + 3985 tp->dev->dev_addr[2] + 3986 tp->dev->dev_addr[3] + 3987 tp->dev->dev_addr[4] + 3988 tp->dev->dev_addr[5]) & 3989 TX_BACKOFF_SEED_MASK; 3990 tw32(MAC_TX_BACKOFF_SEED, addr_high); 3991 } 3992 3993 static void tg3_enable_register_access(struct tg3 *tp) 3994 { 3995 /* 3996 * Make sure register accesses (indirect or otherwise) will function 3997 * correctly. 3998 */ 3999 pci_write_config_dword(tp->pdev, 4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4001 } 4002 4003 static int tg3_power_up(struct tg3 *tp) 4004 { 4005 int err; 4006 4007 tg3_enable_register_access(tp); 4008 4009 err = pci_set_power_state(tp->pdev, PCI_D0); 4010 if (!err) { 4011 /* Switch out of Vaux if it is a NIC */ 4012 tg3_pwrsrc_switch_to_vmain(tp); 4013 } else { 4014 netdev_err(tp->dev, "Transition to D0 failed\n"); 4015 } 4016 4017 return err; 4018 } 4019 4020 static int tg3_setup_phy(struct tg3 *, bool); 4021 4022 static int tg3_power_down_prepare(struct tg3 *tp) 4023 { 4024 u32 misc_host_ctrl; 4025 bool device_should_wake, do_low_power; 4026 4027 tg3_enable_register_access(tp); 4028 4029 /* Restore the CLKREQ setting. */ 4030 if (tg3_flag(tp, CLKREQ_BUG)) 4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4032 PCI_EXP_LNKCTL_CLKREQ_EN); 4033 4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4035 tw32(TG3PCI_MISC_HOST_CTRL, 4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4037 4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4039 tg3_flag(tp, WOL_ENABLE); 4040 4041 if (tg3_flag(tp, USE_PHYLIB)) { 4042 do_low_power = false; 4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; 4046 struct phy_device *phydev; 4047 u32 phyid; 4048 4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4050 4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4052 4053 tp->link_config.speed = phydev->speed; 4054 tp->link_config.duplex = phydev->duplex; 4055 tp->link_config.autoneg = phydev->autoneg; 4056 ethtool_convert_link_mode_to_legacy_u32( 4057 &tp->link_config.advertising, 4058 phydev->advertising); 4059 4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); 4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 4062 advertising); 4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 4064 advertising); 4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, 4066 advertising); 4067 4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4069 if (tg3_flag(tp, WOL_SPEED_100MB)) { 4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 4071 advertising); 4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 4073 advertising); 4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4075 advertising); 4076 } else { 4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4078 advertising); 4079 } 4080 } 4081 4082 linkmode_copy(phydev->advertising, advertising); 4083 phy_start_aneg(phydev); 4084 4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4086 if (phyid != PHY_ID_BCMAC131) { 4087 phyid &= PHY_BCM_OUI_MASK; 4088 if (phyid == PHY_BCM_OUI_1 || 4089 phyid == PHY_BCM_OUI_2 || 4090 phyid == PHY_BCM_OUI_3) 4091 do_low_power = true; 4092 } 4093 } 4094 } else { 4095 do_low_power = true; 4096 4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4099 4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4101 tg3_setup_phy(tp, false); 4102 } 4103 4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4105 u32 val; 4106 4107 val = tr32(GRC_VCPU_EXT_CTRL); 4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4109 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4110 int i; 4111 u32 val; 4112 4113 for (i = 0; i < 200; i++) { 4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4116 break; 4117 msleep(1); 4118 } 4119 } 4120 if (tg3_flag(tp, WOL_CAP)) 4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4122 WOL_DRV_STATE_SHUTDOWN | 4123 WOL_DRV_WOL | 4124 WOL_SET_MAGIC_PKT); 4125 4126 if (device_should_wake) { 4127 u32 mac_mode; 4128 4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4130 if (do_low_power && 4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4132 tg3_phy_auxctl_write(tp, 4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4134 MII_TG3_AUXCTL_PCTL_WOL_EN | 4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4137 udelay(40); 4138 } 4139 4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4141 mac_mode = MAC_MODE_PORT_MODE_GMII; 4142 else if (tp->phy_flags & 4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4144 if (tp->link_config.active_speed == SPEED_1000) 4145 mac_mode = MAC_MODE_PORT_MODE_GMII; 4146 else 4147 mac_mode = MAC_MODE_PORT_MODE_MII; 4148 } else 4149 mac_mode = MAC_MODE_PORT_MODE_MII; 4150 4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4154 SPEED_100 : SPEED_10; 4155 if (tg3_5700_link_polarity(tp, speed)) 4156 mac_mode |= MAC_MODE_LINK_POLARITY; 4157 else 4158 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4159 } 4160 } else { 4161 mac_mode = MAC_MODE_PORT_MODE_TBI; 4162 } 4163 4164 if (!tg3_flag(tp, 5750_PLUS)) 4165 tw32(MAC_LED_CTRL, tp->led_ctrl); 4166 4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4171 4172 if (tg3_flag(tp, ENABLE_APE)) 4173 mac_mode |= MAC_MODE_APE_TX_EN | 4174 MAC_MODE_APE_RX_EN | 4175 MAC_MODE_TDE_ENABLE; 4176 4177 tw32_f(MAC_MODE, mac_mode); 4178 udelay(100); 4179 4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4181 udelay(10); 4182 } 4183 4184 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4185 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4186 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4187 u32 base_val; 4188 4189 base_val = tp->pci_clock_ctrl; 4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4191 CLOCK_CTRL_TXCLK_DISABLE); 4192 4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4194 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4195 } else if (tg3_flag(tp, 5780_CLASS) || 4196 tg3_flag(tp, CPMU_PRESENT) || 4197 tg3_asic_rev(tp) == ASIC_REV_5906) { 4198 /* do nothing */ 4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4200 u32 newbits1, newbits2; 4201 4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4203 tg3_asic_rev(tp) == ASIC_REV_5701) { 4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4205 CLOCK_CTRL_TXCLK_DISABLE | 4206 CLOCK_CTRL_ALTCLK); 4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4208 } else if (tg3_flag(tp, 5705_PLUS)) { 4209 newbits1 = CLOCK_CTRL_625_CORE; 4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4211 } else { 4212 newbits1 = CLOCK_CTRL_ALTCLK; 4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4214 } 4215 4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4217 40); 4218 4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4220 40); 4221 4222 if (!tg3_flag(tp, 5705_PLUS)) { 4223 u32 newbits3; 4224 4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4226 tg3_asic_rev(tp) == ASIC_REV_5701) { 4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4228 CLOCK_CTRL_TXCLK_DISABLE | 4229 CLOCK_CTRL_44MHZ_CORE); 4230 } else { 4231 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4232 } 4233 4234 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4235 tp->pci_clock_ctrl | newbits3, 40); 4236 } 4237 } 4238 4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4240 tg3_power_down_phy(tp, do_low_power); 4241 4242 tg3_frob_aux_power(tp, true); 4243 4244 /* Workaround for unstable PLL clock */ 4245 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4248 u32 val = tr32(0x7d00); 4249 4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4251 tw32(0x7d00, val); 4252 if (!tg3_flag(tp, ENABLE_ASF)) { 4253 int err; 4254 4255 err = tg3_nvram_lock(tp); 4256 tg3_halt_cpu(tp, RX_CPU_BASE); 4257 if (!err) 4258 tg3_nvram_unlock(tp); 4259 } 4260 } 4261 4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4263 4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4265 4266 return 0; 4267 } 4268 4269 static void tg3_power_down(struct tg3 *tp) 4270 { 4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4272 pci_set_power_state(tp->pdev, PCI_D3hot); 4273 } 4274 4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) 4276 { 4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4278 case MII_TG3_AUX_STAT_10HALF: 4279 *speed = SPEED_10; 4280 *duplex = DUPLEX_HALF; 4281 break; 4282 4283 case MII_TG3_AUX_STAT_10FULL: 4284 *speed = SPEED_10; 4285 *duplex = DUPLEX_FULL; 4286 break; 4287 4288 case MII_TG3_AUX_STAT_100HALF: 4289 *speed = SPEED_100; 4290 *duplex = DUPLEX_HALF; 4291 break; 4292 4293 case MII_TG3_AUX_STAT_100FULL: 4294 *speed = SPEED_100; 4295 *duplex = DUPLEX_FULL; 4296 break; 4297 4298 case MII_TG3_AUX_STAT_1000HALF: 4299 *speed = SPEED_1000; 4300 *duplex = DUPLEX_HALF; 4301 break; 4302 4303 case MII_TG3_AUX_STAT_1000FULL: 4304 *speed = SPEED_1000; 4305 *duplex = DUPLEX_FULL; 4306 break; 4307 4308 default: 4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4311 SPEED_10; 4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4313 DUPLEX_HALF; 4314 break; 4315 } 4316 *speed = SPEED_UNKNOWN; 4317 *duplex = DUPLEX_UNKNOWN; 4318 break; 4319 } 4320 } 4321 4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4323 { 4324 int err = 0; 4325 u32 val, new_adv; 4326 4327 new_adv = ADVERTISE_CSMA; 4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4329 new_adv |= mii_advertise_flowctrl(flowctrl); 4330 4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4332 if (err) 4333 goto done; 4334 4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4337 4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4341 4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4343 if (err) 4344 goto done; 4345 } 4346 4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4348 goto done; 4349 4350 tw32(TG3_CPMU_EEE_MODE, 4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4352 4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4354 if (!err) { 4355 u32 err2; 4356 4357 val = 0; 4358 /* Advertise 100-BaseTX EEE ability */ 4359 if (advertise & ADVERTISED_100baseT_Full) 4360 val |= MDIO_AN_EEE_ADV_100TX; 4361 /* Advertise 1000-BaseT EEE ability */ 4362 if (advertise & ADVERTISED_1000baseT_Full) 4363 val |= MDIO_AN_EEE_ADV_1000T; 4364 4365 if (!tp->eee.eee_enabled) { 4366 val = 0; 4367 tp->eee.advertised = 0; 4368 } else { 4369 tp->eee.advertised = advertise & 4370 (ADVERTISED_100baseT_Full | 4371 ADVERTISED_1000baseT_Full); 4372 } 4373 4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4375 if (err) 4376 val = 0; 4377 4378 switch (tg3_asic_rev(tp)) { 4379 case ASIC_REV_5717: 4380 case ASIC_REV_57765: 4381 case ASIC_REV_57766: 4382 case ASIC_REV_5719: 4383 /* If we advertised any eee advertisements above... */ 4384 if (val) 4385 val = MII_TG3_DSP_TAP26_ALNOKO | 4386 MII_TG3_DSP_TAP26_RMRXSTO | 4387 MII_TG3_DSP_TAP26_OPCSINPT; 4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4389 fallthrough; 4390 case ASIC_REV_5720: 4391 case ASIC_REV_5762: 4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4394 MII_TG3_DSP_CH34TP2_HIBW01); 4395 } 4396 4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4398 if (!err) 4399 err = err2; 4400 } 4401 4402 done: 4403 return err; 4404 } 4405 4406 static void tg3_phy_copper_begin(struct tg3 *tp) 4407 { 4408 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4410 u32 adv, fc; 4411 4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4414 adv = ADVERTISED_10baseT_Half | 4415 ADVERTISED_10baseT_Full; 4416 if (tg3_flag(tp, WOL_SPEED_100MB)) 4417 adv |= ADVERTISED_100baseT_Half | 4418 ADVERTISED_100baseT_Full; 4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4420 if (!(tp->phy_flags & 4421 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4422 adv |= ADVERTISED_1000baseT_Half; 4423 adv |= ADVERTISED_1000baseT_Full; 4424 } 4425 4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4427 } else { 4428 adv = tp->link_config.advertising; 4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4430 adv &= ~(ADVERTISED_1000baseT_Half | 4431 ADVERTISED_1000baseT_Full); 4432 4433 fc = tp->link_config.flowctrl; 4434 } 4435 4436 tg3_phy_autoneg_cfg(tp, adv, fc); 4437 4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4440 /* Normally during power down we want to autonegotiate 4441 * the lowest possible speed for WOL. However, to avoid 4442 * link flap, we leave it untouched. 4443 */ 4444 return; 4445 } 4446 4447 tg3_writephy(tp, MII_BMCR, 4448 BMCR_ANENABLE | BMCR_ANRESTART); 4449 } else { 4450 int i; 4451 u32 bmcr, orig_bmcr; 4452 4453 tp->link_config.active_speed = tp->link_config.speed; 4454 tp->link_config.active_duplex = tp->link_config.duplex; 4455 4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4457 /* With autoneg disabled, 5715 only links up when the 4458 * advertisement register has the configured speed 4459 * enabled. 4460 */ 4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4462 } 4463 4464 bmcr = 0; 4465 switch (tp->link_config.speed) { 4466 default: 4467 case SPEED_10: 4468 break; 4469 4470 case SPEED_100: 4471 bmcr |= BMCR_SPEED100; 4472 break; 4473 4474 case SPEED_1000: 4475 bmcr |= BMCR_SPEED1000; 4476 break; 4477 } 4478 4479 if (tp->link_config.duplex == DUPLEX_FULL) 4480 bmcr |= BMCR_FULLDPLX; 4481 4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4483 (bmcr != orig_bmcr)) { 4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4485 for (i = 0; i < 1500; i++) { 4486 u32 tmp; 4487 4488 udelay(10); 4489 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4490 tg3_readphy(tp, MII_BMSR, &tmp)) 4491 continue; 4492 if (!(tmp & BMSR_LSTATUS)) { 4493 udelay(40); 4494 break; 4495 } 4496 } 4497 tg3_writephy(tp, MII_BMCR, bmcr); 4498 udelay(40); 4499 } 4500 } 4501 } 4502 4503 static int tg3_phy_pull_config(struct tg3 *tp) 4504 { 4505 int err; 4506 u32 val; 4507 4508 err = tg3_readphy(tp, MII_BMCR, &val); 4509 if (err) 4510 goto done; 4511 4512 if (!(val & BMCR_ANENABLE)) { 4513 tp->link_config.autoneg = AUTONEG_DISABLE; 4514 tp->link_config.advertising = 0; 4515 tg3_flag_clear(tp, PAUSE_AUTONEG); 4516 4517 err = -EIO; 4518 4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4520 case 0: 4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4522 goto done; 4523 4524 tp->link_config.speed = SPEED_10; 4525 break; 4526 case BMCR_SPEED100: 4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4528 goto done; 4529 4530 tp->link_config.speed = SPEED_100; 4531 break; 4532 case BMCR_SPEED1000: 4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4534 tp->link_config.speed = SPEED_1000; 4535 break; 4536 } 4537 fallthrough; 4538 default: 4539 goto done; 4540 } 4541 4542 if (val & BMCR_FULLDPLX) 4543 tp->link_config.duplex = DUPLEX_FULL; 4544 else 4545 tp->link_config.duplex = DUPLEX_HALF; 4546 4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4548 4549 err = 0; 4550 goto done; 4551 } 4552 4553 tp->link_config.autoneg = AUTONEG_ENABLE; 4554 tp->link_config.advertising = ADVERTISED_Autoneg; 4555 tg3_flag_set(tp, PAUSE_AUTONEG); 4556 4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4558 u32 adv; 4559 4560 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4561 if (err) 4562 goto done; 4563 4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4565 tp->link_config.advertising |= adv | ADVERTISED_TP; 4566 4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4568 } else { 4569 tp->link_config.advertising |= ADVERTISED_FIBRE; 4570 } 4571 4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4573 u32 adv; 4574 4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4576 err = tg3_readphy(tp, MII_CTRL1000, &val); 4577 if (err) 4578 goto done; 4579 4580 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4581 } else { 4582 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4583 if (err) 4584 goto done; 4585 4586 adv = tg3_decode_flowctrl_1000X(val); 4587 tp->link_config.flowctrl = adv; 4588 4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4590 adv = mii_adv_to_ethtool_adv_x(val); 4591 } 4592 4593 tp->link_config.advertising |= adv; 4594 } 4595 4596 done: 4597 return err; 4598 } 4599 4600 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4601 { 4602 int err; 4603 4604 /* Turn off tap power management. */ 4605 /* Set Extended packet length bit */ 4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4607 4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4613 4614 udelay(40); 4615 4616 return err; 4617 } 4618 4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4620 { 4621 struct ethtool_eee eee; 4622 4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4624 return true; 4625 4626 tg3_eee_pull_config(tp, &eee); 4627 4628 if (tp->eee.eee_enabled) { 4629 if (tp->eee.advertised != eee.advertised || 4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4632 return false; 4633 } else { 4634 /* EEE is disabled but we're advertising */ 4635 if (eee.advertised) 4636 return false; 4637 } 4638 4639 return true; 4640 } 4641 4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4643 { 4644 u32 advmsk, tgtadv, advertising; 4645 4646 advertising = tp->link_config.advertising; 4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4648 4649 advmsk = ADVERTISE_ALL; 4650 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4653 } 4654 4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4656 return false; 4657 4658 if ((*lcladv & advmsk) != tgtadv) 4659 return false; 4660 4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4662 u32 tg3_ctrl; 4663 4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4665 4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4667 return false; 4668 4669 if (tgtadv && 4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4675 } else { 4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4677 } 4678 4679 if (tg3_ctrl != tgtadv) 4680 return false; 4681 } 4682 4683 return true; 4684 } 4685 4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4687 { 4688 u32 lpeth = 0; 4689 4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4691 u32 val; 4692 4693 if (tg3_readphy(tp, MII_STAT1000, &val)) 4694 return false; 4695 4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4697 } 4698 4699 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4700 return false; 4701 4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4703 tp->link_config.rmt_adv = lpeth; 4704 4705 return true; 4706 } 4707 4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4709 { 4710 if (curr_link_up != tp->link_up) { 4711 if (curr_link_up) { 4712 netif_carrier_on(tp->dev); 4713 } else { 4714 netif_carrier_off(tp->dev); 4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4717 } 4718 4719 tg3_link_report(tp); 4720 return true; 4721 } 4722 4723 return false; 4724 } 4725 4726 static void tg3_clear_mac_status(struct tg3 *tp) 4727 { 4728 tw32(MAC_EVENT, 0); 4729 4730 tw32_f(MAC_STATUS, 4731 MAC_STATUS_SYNC_CHANGED | 4732 MAC_STATUS_CFG_CHANGED | 4733 MAC_STATUS_MI_COMPLETION | 4734 MAC_STATUS_LNKSTATE_CHANGED); 4735 udelay(40); 4736 } 4737 4738 static void tg3_setup_eee(struct tg3 *tp) 4739 { 4740 u32 val; 4741 4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4743 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4746 4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4748 4749 tw32_f(TG3_CPMU_EEE_CTRL, 4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4751 4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4754 TG3_CPMU_EEEMD_LPI_IN_RX | 4755 TG3_CPMU_EEEMD_EEE_ENABLE; 4756 4757 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4759 4760 if (tg3_flag(tp, ENABLE_APE)) 4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4762 4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4764 4765 tw32_f(TG3_CPMU_EEE_DBTMR1, 4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4767 (tp->eee.tx_lpi_timer & 0xffff)); 4768 4769 tw32_f(TG3_CPMU_EEE_DBTMR2, 4770 TG3_CPMU_DBTMR2_APE_TX_2047US | 4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4772 } 4773 4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4775 { 4776 bool current_link_up; 4777 u32 bmsr, val; 4778 u32 lcl_adv, rmt_adv; 4779 u32 current_speed; 4780 u8 current_duplex; 4781 int i, err; 4782 4783 tg3_clear_mac_status(tp); 4784 4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4786 tw32_f(MAC_MI_MODE, 4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4788 udelay(80); 4789 } 4790 4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4792 4793 /* Some third-party PHYs need to be reset on link going 4794 * down. 4795 */ 4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4797 tg3_asic_rev(tp) == ASIC_REV_5704 || 4798 tg3_asic_rev(tp) == ASIC_REV_5705) && 4799 tp->link_up) { 4800 tg3_readphy(tp, MII_BMSR, &bmsr); 4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4802 !(bmsr & BMSR_LSTATUS)) 4803 force_reset = true; 4804 } 4805 if (force_reset) 4806 tg3_phy_reset(tp); 4807 4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4809 tg3_readphy(tp, MII_BMSR, &bmsr); 4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4811 !tg3_flag(tp, INIT_COMPLETE)) 4812 bmsr = 0; 4813 4814 if (!(bmsr & BMSR_LSTATUS)) { 4815 err = tg3_init_5401phy_dsp(tp); 4816 if (err) 4817 return err; 4818 4819 tg3_readphy(tp, MII_BMSR, &bmsr); 4820 for (i = 0; i < 1000; i++) { 4821 udelay(10); 4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4823 (bmsr & BMSR_LSTATUS)) { 4824 udelay(40); 4825 break; 4826 } 4827 } 4828 4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4830 TG3_PHY_REV_BCM5401_B0 && 4831 !(bmsr & BMSR_LSTATUS) && 4832 tp->link_config.active_speed == SPEED_1000) { 4833 err = tg3_phy_reset(tp); 4834 if (!err) 4835 err = tg3_init_5401phy_dsp(tp); 4836 if (err) 4837 return err; 4838 } 4839 } 4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4842 /* 5701 {A0,B0} CRC bug workaround */ 4843 tg3_writephy(tp, 0x15, 0x0a75); 4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4847 } 4848 4849 /* Clear pending interrupts... */ 4850 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4851 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4852 4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4856 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4857 4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4859 tg3_asic_rev(tp) == ASIC_REV_5701) { 4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4861 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4863 else 4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4865 } 4866 4867 current_link_up = false; 4868 current_speed = SPEED_UNKNOWN; 4869 current_duplex = DUPLEX_UNKNOWN; 4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4871 tp->link_config.rmt_adv = 0; 4872 4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4874 err = tg3_phy_auxctl_read(tp, 4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4876 &val); 4877 if (!err && !(val & (1 << 10))) { 4878 tg3_phy_auxctl_write(tp, 4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4880 val | (1 << 10)); 4881 goto relink; 4882 } 4883 } 4884 4885 bmsr = 0; 4886 for (i = 0; i < 100; i++) { 4887 tg3_readphy(tp, MII_BMSR, &bmsr); 4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4889 (bmsr & BMSR_LSTATUS)) 4890 break; 4891 udelay(40); 4892 } 4893 4894 if (bmsr & BMSR_LSTATUS) { 4895 u32 aux_stat, bmcr; 4896 4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4898 for (i = 0; i < 2000; i++) { 4899 udelay(10); 4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4901 aux_stat) 4902 break; 4903 } 4904 4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4906 ¤t_speed, 4907 ¤t_duplex); 4908 4909 bmcr = 0; 4910 for (i = 0; i < 200; i++) { 4911 tg3_readphy(tp, MII_BMCR, &bmcr); 4912 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4913 continue; 4914 if (bmcr && bmcr != 0x7fff) 4915 break; 4916 udelay(10); 4917 } 4918 4919 lcl_adv = 0; 4920 rmt_adv = 0; 4921 4922 tp->link_config.active_speed = current_speed; 4923 tp->link_config.active_duplex = current_duplex; 4924 4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4927 4928 if ((bmcr & BMCR_ANENABLE) && 4929 eee_config_ok && 4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4932 current_link_up = true; 4933 4934 /* EEE settings changes take effect only after a phy 4935 * reset. If we have skipped a reset due to Link Flap 4936 * Avoidance being enabled, do it now. 4937 */ 4938 if (!eee_config_ok && 4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4940 !force_reset) { 4941 tg3_setup_eee(tp); 4942 tg3_phy_reset(tp); 4943 } 4944 } else { 4945 if (!(bmcr & BMCR_ANENABLE) && 4946 tp->link_config.speed == current_speed && 4947 tp->link_config.duplex == current_duplex) { 4948 current_link_up = true; 4949 } 4950 } 4951 4952 if (current_link_up && 4953 tp->link_config.active_duplex == DUPLEX_FULL) { 4954 u32 reg, bit; 4955 4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4957 reg = MII_TG3_FET_GEN_STAT; 4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4959 } else { 4960 reg = MII_TG3_EXT_STAT; 4961 bit = MII_TG3_EXT_STAT_MDIX; 4962 } 4963 4964 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4966 4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4968 } 4969 } 4970 4971 relink: 4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4973 tg3_phy_copper_begin(tp); 4974 4975 if (tg3_flag(tp, ROBOSWITCH)) { 4976 current_link_up = true; 4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4978 current_speed = SPEED_1000; 4979 current_duplex = DUPLEX_FULL; 4980 tp->link_config.active_speed = current_speed; 4981 tp->link_config.active_duplex = current_duplex; 4982 } 4983 4984 tg3_readphy(tp, MII_BMSR, &bmsr); 4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4987 current_link_up = true; 4988 } 4989 4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4991 if (current_link_up) { 4992 if (tp->link_config.active_speed == SPEED_100 || 4993 tp->link_config.active_speed == SPEED_10) 4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4995 else 4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4999 else 5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5001 5002 /* In order for the 5750 core in BCM4785 chip to work properly 5003 * in RGMII mode, the Led Control Register must be set up. 5004 */ 5005 if (tg3_flag(tp, RGMII_MODE)) { 5006 u32 led_ctrl = tr32(MAC_LED_CTRL); 5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5008 5009 if (tp->link_config.active_speed == SPEED_10) 5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5011 else if (tp->link_config.active_speed == SPEED_100) 5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5013 LED_CTRL_100MBPS_ON); 5014 else if (tp->link_config.active_speed == SPEED_1000) 5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5016 LED_CTRL_1000MBPS_ON); 5017 5018 tw32(MAC_LED_CTRL, led_ctrl); 5019 udelay(40); 5020 } 5021 5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5023 if (tp->link_config.active_duplex == DUPLEX_HALF) 5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5025 5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5027 if (current_link_up && 5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5030 else 5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5032 } 5033 5034 /* ??? Without this setting Netgear GA302T PHY does not 5035 * ??? send/receive packets... 5036 */ 5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5040 tw32_f(MAC_MI_MODE, tp->mi_mode); 5041 udelay(80); 5042 } 5043 5044 tw32_f(MAC_MODE, tp->mac_mode); 5045 udelay(40); 5046 5047 tg3_phy_eee_adjust(tp, current_link_up); 5048 5049 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5050 /* Polled via timer. */ 5051 tw32_f(MAC_EVENT, 0); 5052 } else { 5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5054 } 5055 udelay(40); 5056 5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5058 current_link_up && 5059 tp->link_config.active_speed == SPEED_1000 && 5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5061 udelay(120); 5062 tw32_f(MAC_STATUS, 5063 (MAC_STATUS_SYNC_CHANGED | 5064 MAC_STATUS_CFG_CHANGED)); 5065 udelay(40); 5066 tg3_write_mem(tp, 5067 NIC_SRAM_FIRMWARE_MBOX, 5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5069 } 5070 5071 /* Prevent send BD corruption. */ 5072 if (tg3_flag(tp, CLKREQ_BUG)) { 5073 if (tp->link_config.active_speed == SPEED_100 || 5074 tp->link_config.active_speed == SPEED_10) 5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5076 PCI_EXP_LNKCTL_CLKREQ_EN); 5077 else 5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5079 PCI_EXP_LNKCTL_CLKREQ_EN); 5080 } 5081 5082 tg3_test_and_report_link_chg(tp, current_link_up); 5083 5084 return 0; 5085 } 5086 5087 struct tg3_fiber_aneginfo { 5088 int state; 5089 #define ANEG_STATE_UNKNOWN 0 5090 #define ANEG_STATE_AN_ENABLE 1 5091 #define ANEG_STATE_RESTART_INIT 2 5092 #define ANEG_STATE_RESTART 3 5093 #define ANEG_STATE_DISABLE_LINK_OK 4 5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5095 #define ANEG_STATE_ABILITY_DETECT 6 5096 #define ANEG_STATE_ACK_DETECT_INIT 7 5097 #define ANEG_STATE_ACK_DETECT 8 5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5099 #define ANEG_STATE_COMPLETE_ACK 10 5100 #define ANEG_STATE_IDLE_DETECT_INIT 11 5101 #define ANEG_STATE_IDLE_DETECT 12 5102 #define ANEG_STATE_LINK_OK 13 5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5105 5106 u32 flags; 5107 #define MR_AN_ENABLE 0x00000001 5108 #define MR_RESTART_AN 0x00000002 5109 #define MR_AN_COMPLETE 0x00000004 5110 #define MR_PAGE_RX 0x00000008 5111 #define MR_NP_LOADED 0x00000010 5112 #define MR_TOGGLE_TX 0x00000020 5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5120 #define MR_TOGGLE_RX 0x00002000 5121 #define MR_NP_RX 0x00004000 5122 5123 #define MR_LINK_OK 0x80000000 5124 5125 unsigned long link_time, cur_time; 5126 5127 u32 ability_match_cfg; 5128 int ability_match_count; 5129 5130 char ability_match, idle_match, ack_match; 5131 5132 u32 txconfig, rxconfig; 5133 #define ANEG_CFG_NP 0x00000080 5134 #define ANEG_CFG_ACK 0x00000040 5135 #define ANEG_CFG_RF2 0x00000020 5136 #define ANEG_CFG_RF1 0x00000010 5137 #define ANEG_CFG_PS2 0x00000001 5138 #define ANEG_CFG_PS1 0x00008000 5139 #define ANEG_CFG_HD 0x00004000 5140 #define ANEG_CFG_FD 0x00002000 5141 #define ANEG_CFG_INVAL 0x00001f06 5142 5143 }; 5144 #define ANEG_OK 0 5145 #define ANEG_DONE 1 5146 #define ANEG_TIMER_ENAB 2 5147 #define ANEG_FAILED -1 5148 5149 #define ANEG_STATE_SETTLE_TIME 10000 5150 5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5152 struct tg3_fiber_aneginfo *ap) 5153 { 5154 u16 flowctrl; 5155 unsigned long delta; 5156 u32 rx_cfg_reg; 5157 int ret; 5158 5159 if (ap->state == ANEG_STATE_UNKNOWN) { 5160 ap->rxconfig = 0; 5161 ap->link_time = 0; 5162 ap->cur_time = 0; 5163 ap->ability_match_cfg = 0; 5164 ap->ability_match_count = 0; 5165 ap->ability_match = 0; 5166 ap->idle_match = 0; 5167 ap->ack_match = 0; 5168 } 5169 ap->cur_time++; 5170 5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5173 5174 if (rx_cfg_reg != ap->ability_match_cfg) { 5175 ap->ability_match_cfg = rx_cfg_reg; 5176 ap->ability_match = 0; 5177 ap->ability_match_count = 0; 5178 } else { 5179 if (++ap->ability_match_count > 1) { 5180 ap->ability_match = 1; 5181 ap->ability_match_cfg = rx_cfg_reg; 5182 } 5183 } 5184 if (rx_cfg_reg & ANEG_CFG_ACK) 5185 ap->ack_match = 1; 5186 else 5187 ap->ack_match = 0; 5188 5189 ap->idle_match = 0; 5190 } else { 5191 ap->idle_match = 1; 5192 ap->ability_match_cfg = 0; 5193 ap->ability_match_count = 0; 5194 ap->ability_match = 0; 5195 ap->ack_match = 0; 5196 5197 rx_cfg_reg = 0; 5198 } 5199 5200 ap->rxconfig = rx_cfg_reg; 5201 ret = ANEG_OK; 5202 5203 switch (ap->state) { 5204 case ANEG_STATE_UNKNOWN: 5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5206 ap->state = ANEG_STATE_AN_ENABLE; 5207 5208 fallthrough; 5209 case ANEG_STATE_AN_ENABLE: 5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5211 if (ap->flags & MR_AN_ENABLE) { 5212 ap->link_time = 0; 5213 ap->cur_time = 0; 5214 ap->ability_match_cfg = 0; 5215 ap->ability_match_count = 0; 5216 ap->ability_match = 0; 5217 ap->idle_match = 0; 5218 ap->ack_match = 0; 5219 5220 ap->state = ANEG_STATE_RESTART_INIT; 5221 } else { 5222 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5223 } 5224 break; 5225 5226 case ANEG_STATE_RESTART_INIT: 5227 ap->link_time = ap->cur_time; 5228 ap->flags &= ~(MR_NP_LOADED); 5229 ap->txconfig = 0; 5230 tw32(MAC_TX_AUTO_NEG, 0); 5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5232 tw32_f(MAC_MODE, tp->mac_mode); 5233 udelay(40); 5234 5235 ret = ANEG_TIMER_ENAB; 5236 ap->state = ANEG_STATE_RESTART; 5237 5238 fallthrough; 5239 case ANEG_STATE_RESTART: 5240 delta = ap->cur_time - ap->link_time; 5241 if (delta > ANEG_STATE_SETTLE_TIME) 5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5243 else 5244 ret = ANEG_TIMER_ENAB; 5245 break; 5246 5247 case ANEG_STATE_DISABLE_LINK_OK: 5248 ret = ANEG_DONE; 5249 break; 5250 5251 case ANEG_STATE_ABILITY_DETECT_INIT: 5252 ap->flags &= ~(MR_TOGGLE_TX); 5253 ap->txconfig = ANEG_CFG_FD; 5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5255 if (flowctrl & ADVERTISE_1000XPAUSE) 5256 ap->txconfig |= ANEG_CFG_PS1; 5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5258 ap->txconfig |= ANEG_CFG_PS2; 5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5261 tw32_f(MAC_MODE, tp->mac_mode); 5262 udelay(40); 5263 5264 ap->state = ANEG_STATE_ABILITY_DETECT; 5265 break; 5266 5267 case ANEG_STATE_ABILITY_DETECT: 5268 if (ap->ability_match != 0 && ap->rxconfig != 0) 5269 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5270 break; 5271 5272 case ANEG_STATE_ACK_DETECT_INIT: 5273 ap->txconfig |= ANEG_CFG_ACK; 5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5276 tw32_f(MAC_MODE, tp->mac_mode); 5277 udelay(40); 5278 5279 ap->state = ANEG_STATE_ACK_DETECT; 5280 5281 fallthrough; 5282 case ANEG_STATE_ACK_DETECT: 5283 if (ap->ack_match != 0) { 5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5287 } else { 5288 ap->state = ANEG_STATE_AN_ENABLE; 5289 } 5290 } else if (ap->ability_match != 0 && 5291 ap->rxconfig == 0) { 5292 ap->state = ANEG_STATE_AN_ENABLE; 5293 } 5294 break; 5295 5296 case ANEG_STATE_COMPLETE_ACK_INIT: 5297 if (ap->rxconfig & ANEG_CFG_INVAL) { 5298 ret = ANEG_FAILED; 5299 break; 5300 } 5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5302 MR_LP_ADV_HALF_DUPLEX | 5303 MR_LP_ADV_SYM_PAUSE | 5304 MR_LP_ADV_ASYM_PAUSE | 5305 MR_LP_ADV_REMOTE_FAULT1 | 5306 MR_LP_ADV_REMOTE_FAULT2 | 5307 MR_LP_ADV_NEXT_PAGE | 5308 MR_TOGGLE_RX | 5309 MR_NP_RX); 5310 if (ap->rxconfig & ANEG_CFG_FD) 5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5312 if (ap->rxconfig & ANEG_CFG_HD) 5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5314 if (ap->rxconfig & ANEG_CFG_PS1) 5315 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5316 if (ap->rxconfig & ANEG_CFG_PS2) 5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5318 if (ap->rxconfig & ANEG_CFG_RF1) 5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5320 if (ap->rxconfig & ANEG_CFG_RF2) 5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5322 if (ap->rxconfig & ANEG_CFG_NP) 5323 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5324 5325 ap->link_time = ap->cur_time; 5326 5327 ap->flags ^= (MR_TOGGLE_TX); 5328 if (ap->rxconfig & 0x0008) 5329 ap->flags |= MR_TOGGLE_RX; 5330 if (ap->rxconfig & ANEG_CFG_NP) 5331 ap->flags |= MR_NP_RX; 5332 ap->flags |= MR_PAGE_RX; 5333 5334 ap->state = ANEG_STATE_COMPLETE_ACK; 5335 ret = ANEG_TIMER_ENAB; 5336 break; 5337 5338 case ANEG_STATE_COMPLETE_ACK: 5339 if (ap->ability_match != 0 && 5340 ap->rxconfig == 0) { 5341 ap->state = ANEG_STATE_AN_ENABLE; 5342 break; 5343 } 5344 delta = ap->cur_time - ap->link_time; 5345 if (delta > ANEG_STATE_SETTLE_TIME) { 5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5348 } else { 5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5350 !(ap->flags & MR_NP_RX)) { 5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5352 } else { 5353 ret = ANEG_FAILED; 5354 } 5355 } 5356 } 5357 break; 5358 5359 case ANEG_STATE_IDLE_DETECT_INIT: 5360 ap->link_time = ap->cur_time; 5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5362 tw32_f(MAC_MODE, tp->mac_mode); 5363 udelay(40); 5364 5365 ap->state = ANEG_STATE_IDLE_DETECT; 5366 ret = ANEG_TIMER_ENAB; 5367 break; 5368 5369 case ANEG_STATE_IDLE_DETECT: 5370 if (ap->ability_match != 0 && 5371 ap->rxconfig == 0) { 5372 ap->state = ANEG_STATE_AN_ENABLE; 5373 break; 5374 } 5375 delta = ap->cur_time - ap->link_time; 5376 if (delta > ANEG_STATE_SETTLE_TIME) { 5377 /* XXX another gem from the Broadcom driver :( */ 5378 ap->state = ANEG_STATE_LINK_OK; 5379 } 5380 break; 5381 5382 case ANEG_STATE_LINK_OK: 5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5384 ret = ANEG_DONE; 5385 break; 5386 5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5388 /* ??? unimplemented */ 5389 break; 5390 5391 case ANEG_STATE_NEXT_PAGE_WAIT: 5392 /* ??? unimplemented */ 5393 break; 5394 5395 default: 5396 ret = ANEG_FAILED; 5397 break; 5398 } 5399 5400 return ret; 5401 } 5402 5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5404 { 5405 int res = 0; 5406 struct tg3_fiber_aneginfo aninfo; 5407 int status = ANEG_FAILED; 5408 unsigned int tick; 5409 u32 tmp; 5410 5411 tw32_f(MAC_TX_AUTO_NEG, 0); 5412 5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5415 udelay(40); 5416 5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5418 udelay(40); 5419 5420 memset(&aninfo, 0, sizeof(aninfo)); 5421 aninfo.flags |= MR_AN_ENABLE; 5422 aninfo.state = ANEG_STATE_UNKNOWN; 5423 aninfo.cur_time = 0; 5424 tick = 0; 5425 while (++tick < 195000) { 5426 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5427 if (status == ANEG_DONE || status == ANEG_FAILED) 5428 break; 5429 5430 udelay(1); 5431 } 5432 5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5434 tw32_f(MAC_MODE, tp->mac_mode); 5435 udelay(40); 5436 5437 *txflags = aninfo.txconfig; 5438 *rxflags = aninfo.flags; 5439 5440 if (status == ANEG_DONE && 5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5442 MR_LP_ADV_FULL_DUPLEX))) 5443 res = 1; 5444 5445 return res; 5446 } 5447 5448 static void tg3_init_bcm8002(struct tg3 *tp) 5449 { 5450 u32 mac_status = tr32(MAC_STATUS); 5451 int i; 5452 5453 /* Reset when initting first time or we have a link. */ 5454 if (tg3_flag(tp, INIT_COMPLETE) && 5455 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5456 return; 5457 5458 /* Set PLL lock range. */ 5459 tg3_writephy(tp, 0x16, 0x8007); 5460 5461 /* SW reset */ 5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5463 5464 /* Wait for reset to complete. */ 5465 /* XXX schedule_timeout() ... */ 5466 for (i = 0; i < 500; i++) 5467 udelay(10); 5468 5469 /* Config mode; select PMA/Ch 1 regs. */ 5470 tg3_writephy(tp, 0x10, 0x8411); 5471 5472 /* Enable auto-lock and comdet, select txclk for tx. */ 5473 tg3_writephy(tp, 0x11, 0x0a10); 5474 5475 tg3_writephy(tp, 0x18, 0x00a0); 5476 tg3_writephy(tp, 0x16, 0x41ff); 5477 5478 /* Assert and deassert POR. */ 5479 tg3_writephy(tp, 0x13, 0x0400); 5480 udelay(40); 5481 tg3_writephy(tp, 0x13, 0x0000); 5482 5483 tg3_writephy(tp, 0x11, 0x0a50); 5484 udelay(40); 5485 tg3_writephy(tp, 0x11, 0x0a10); 5486 5487 /* Wait for signal to stabilize */ 5488 /* XXX schedule_timeout() ... */ 5489 for (i = 0; i < 15000; i++) 5490 udelay(10); 5491 5492 /* Deselect the channel register so we can read the PHYID 5493 * later. 5494 */ 5495 tg3_writephy(tp, 0x10, 0x8011); 5496 } 5497 5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5499 { 5500 u16 flowctrl; 5501 bool current_link_up; 5502 u32 sg_dig_ctrl, sg_dig_status; 5503 u32 serdes_cfg, expected_sg_dig_ctrl; 5504 int workaround, port_a; 5505 5506 serdes_cfg = 0; 5507 workaround = 0; 5508 port_a = 1; 5509 current_link_up = false; 5510 5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5513 workaround = 1; 5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5515 port_a = 0; 5516 5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5518 /* preserve bits 20-23 for voltage regulator */ 5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5520 } 5521 5522 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5523 5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5526 if (workaround) { 5527 u32 val = serdes_cfg; 5528 5529 if (port_a) 5530 val |= 0xc010000; 5531 else 5532 val |= 0x4010000; 5533 tw32_f(MAC_SERDES_CFG, val); 5534 } 5535 5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5537 } 5538 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5539 tg3_setup_flow_control(tp, 0, 0); 5540 current_link_up = true; 5541 } 5542 goto out; 5543 } 5544 5545 /* Want auto-negotiation. */ 5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5547 5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5549 if (flowctrl & ADVERTISE_1000XPAUSE) 5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5553 5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5556 tp->serdes_counter && 5557 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5558 MAC_STATUS_RCVD_CFG)) == 5559 MAC_STATUS_PCS_SYNCED)) { 5560 tp->serdes_counter--; 5561 current_link_up = true; 5562 goto out; 5563 } 5564 restart_autoneg: 5565 if (workaround) 5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5568 udelay(5); 5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5570 5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5574 MAC_STATUS_SIGNAL_DET)) { 5575 sg_dig_status = tr32(SG_DIG_STATUS); 5576 mac_status = tr32(MAC_STATUS); 5577 5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5579 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5580 u32 local_adv = 0, remote_adv = 0; 5581 5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5583 local_adv |= ADVERTISE_1000XPAUSE; 5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5585 local_adv |= ADVERTISE_1000XPSE_ASYM; 5586 5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5588 remote_adv |= LPA_1000XPAUSE; 5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5590 remote_adv |= LPA_1000XPAUSE_ASYM; 5591 5592 tp->link_config.rmt_adv = 5593 mii_adv_to_ethtool_adv_x(remote_adv); 5594 5595 tg3_setup_flow_control(tp, local_adv, remote_adv); 5596 current_link_up = true; 5597 tp->serdes_counter = 0; 5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5600 if (tp->serdes_counter) 5601 tp->serdes_counter--; 5602 else { 5603 if (workaround) { 5604 u32 val = serdes_cfg; 5605 5606 if (port_a) 5607 val |= 0xc010000; 5608 else 5609 val |= 0x4010000; 5610 5611 tw32_f(MAC_SERDES_CFG, val); 5612 } 5613 5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5615 udelay(40); 5616 5617 /* Link parallel detection - link is up */ 5618 /* only if we have PCS_SYNC and not */ 5619 /* receiving config code words */ 5620 mac_status = tr32(MAC_STATUS); 5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5622 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5623 tg3_setup_flow_control(tp, 0, 0); 5624 current_link_up = true; 5625 tp->phy_flags |= 5626 TG3_PHYFLG_PARALLEL_DETECT; 5627 tp->serdes_counter = 5628 SERDES_PARALLEL_DET_TIMEOUT; 5629 } else 5630 goto restart_autoneg; 5631 } 5632 } 5633 } else { 5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5636 } 5637 5638 out: 5639 return current_link_up; 5640 } 5641 5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5643 { 5644 bool current_link_up = false; 5645 5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5647 goto out; 5648 5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5650 u32 txflags, rxflags; 5651 int i; 5652 5653 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5654 u32 local_adv = 0, remote_adv = 0; 5655 5656 if (txflags & ANEG_CFG_PS1) 5657 local_adv |= ADVERTISE_1000XPAUSE; 5658 if (txflags & ANEG_CFG_PS2) 5659 local_adv |= ADVERTISE_1000XPSE_ASYM; 5660 5661 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5662 remote_adv |= LPA_1000XPAUSE; 5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5664 remote_adv |= LPA_1000XPAUSE_ASYM; 5665 5666 tp->link_config.rmt_adv = 5667 mii_adv_to_ethtool_adv_x(remote_adv); 5668 5669 tg3_setup_flow_control(tp, local_adv, remote_adv); 5670 5671 current_link_up = true; 5672 } 5673 for (i = 0; i < 30; i++) { 5674 udelay(20); 5675 tw32_f(MAC_STATUS, 5676 (MAC_STATUS_SYNC_CHANGED | 5677 MAC_STATUS_CFG_CHANGED)); 5678 udelay(40); 5679 if ((tr32(MAC_STATUS) & 5680 (MAC_STATUS_SYNC_CHANGED | 5681 MAC_STATUS_CFG_CHANGED)) == 0) 5682 break; 5683 } 5684 5685 mac_status = tr32(MAC_STATUS); 5686 if (!current_link_up && 5687 (mac_status & MAC_STATUS_PCS_SYNCED) && 5688 !(mac_status & MAC_STATUS_RCVD_CFG)) 5689 current_link_up = true; 5690 } else { 5691 tg3_setup_flow_control(tp, 0, 0); 5692 5693 /* Forcing 1000FD link up. */ 5694 current_link_up = true; 5695 5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5697 udelay(40); 5698 5699 tw32_f(MAC_MODE, tp->mac_mode); 5700 udelay(40); 5701 } 5702 5703 out: 5704 return current_link_up; 5705 } 5706 5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5708 { 5709 u32 orig_pause_cfg; 5710 u32 orig_active_speed; 5711 u8 orig_active_duplex; 5712 u32 mac_status; 5713 bool current_link_up; 5714 int i; 5715 5716 orig_pause_cfg = tp->link_config.active_flowctrl; 5717 orig_active_speed = tp->link_config.active_speed; 5718 orig_active_duplex = tp->link_config.active_duplex; 5719 5720 if (!tg3_flag(tp, HW_AUTONEG) && 5721 tp->link_up && 5722 tg3_flag(tp, INIT_COMPLETE)) { 5723 mac_status = tr32(MAC_STATUS); 5724 mac_status &= (MAC_STATUS_PCS_SYNCED | 5725 MAC_STATUS_SIGNAL_DET | 5726 MAC_STATUS_CFG_CHANGED | 5727 MAC_STATUS_RCVD_CFG); 5728 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5729 MAC_STATUS_SIGNAL_DET)) { 5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5731 MAC_STATUS_CFG_CHANGED)); 5732 return 0; 5733 } 5734 } 5735 5736 tw32_f(MAC_TX_AUTO_NEG, 0); 5737 5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5740 tw32_f(MAC_MODE, tp->mac_mode); 5741 udelay(40); 5742 5743 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5744 tg3_init_bcm8002(tp); 5745 5746 /* Enable link change event even when serdes polling. */ 5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5748 udelay(40); 5749 5750 tp->link_config.rmt_adv = 0; 5751 mac_status = tr32(MAC_STATUS); 5752 5753 if (tg3_flag(tp, HW_AUTONEG)) 5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5755 else 5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5757 5758 tp->napi[0].hw_status->status = 5759 (SD_STATUS_UPDATED | 5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5761 5762 for (i = 0; i < 100; i++) { 5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5764 MAC_STATUS_CFG_CHANGED)); 5765 udelay(5); 5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5767 MAC_STATUS_CFG_CHANGED | 5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5769 break; 5770 } 5771 5772 mac_status = tr32(MAC_STATUS); 5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5774 current_link_up = false; 5775 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5776 tp->serdes_counter == 0) { 5777 tw32_f(MAC_MODE, (tp->mac_mode | 5778 MAC_MODE_SEND_CONFIGS)); 5779 udelay(1); 5780 tw32_f(MAC_MODE, tp->mac_mode); 5781 } 5782 } 5783 5784 if (current_link_up) { 5785 tp->link_config.active_speed = SPEED_1000; 5786 tp->link_config.active_duplex = DUPLEX_FULL; 5787 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5788 LED_CTRL_LNKLED_OVERRIDE | 5789 LED_CTRL_1000MBPS_ON)); 5790 } else { 5791 tp->link_config.active_speed = SPEED_UNKNOWN; 5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5793 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5794 LED_CTRL_LNKLED_OVERRIDE | 5795 LED_CTRL_TRAFFIC_OVERRIDE)); 5796 } 5797 5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5799 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5800 if (orig_pause_cfg != now_pause_cfg || 5801 orig_active_speed != tp->link_config.active_speed || 5802 orig_active_duplex != tp->link_config.active_duplex) 5803 tg3_link_report(tp); 5804 } 5805 5806 return 0; 5807 } 5808 5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5810 { 5811 int err = 0; 5812 u32 bmsr, bmcr; 5813 u32 current_speed = SPEED_UNKNOWN; 5814 u8 current_duplex = DUPLEX_UNKNOWN; 5815 bool current_link_up = false; 5816 u32 local_adv, remote_adv, sgsr; 5817 5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5819 tg3_asic_rev(tp) == ASIC_REV_5720) && 5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5821 (sgsr & SERDES_TG3_SGMII_MODE)) { 5822 5823 if (force_reset) 5824 tg3_phy_reset(tp); 5825 5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5827 5828 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5830 } else { 5831 current_link_up = true; 5832 if (sgsr & SERDES_TG3_SPEED_1000) { 5833 current_speed = SPEED_1000; 5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5835 } else if (sgsr & SERDES_TG3_SPEED_100) { 5836 current_speed = SPEED_100; 5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5838 } else { 5839 current_speed = SPEED_10; 5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5841 } 5842 5843 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5844 current_duplex = DUPLEX_FULL; 5845 else 5846 current_duplex = DUPLEX_HALF; 5847 } 5848 5849 tw32_f(MAC_MODE, tp->mac_mode); 5850 udelay(40); 5851 5852 tg3_clear_mac_status(tp); 5853 5854 goto fiber_setup_done; 5855 } 5856 5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5858 tw32_f(MAC_MODE, tp->mac_mode); 5859 udelay(40); 5860 5861 tg3_clear_mac_status(tp); 5862 5863 if (force_reset) 5864 tg3_phy_reset(tp); 5865 5866 tp->link_config.rmt_adv = 0; 5867 5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5872 bmsr |= BMSR_LSTATUS; 5873 else 5874 bmsr &= ~BMSR_LSTATUS; 5875 } 5876 5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5878 5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5881 /* do nothing, just check for link up at the end */ 5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5883 u32 adv, newadv; 5884 5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5887 ADVERTISE_1000XPAUSE | 5888 ADVERTISE_1000XPSE_ASYM | 5889 ADVERTISE_SLCT); 5890 5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5893 5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5895 tg3_writephy(tp, MII_ADVERTISE, newadv); 5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5897 tg3_writephy(tp, MII_BMCR, bmcr); 5898 5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5902 5903 return err; 5904 } 5905 } else { 5906 u32 new_bmcr; 5907 5908 bmcr &= ~BMCR_SPEED1000; 5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5910 5911 if (tp->link_config.duplex == DUPLEX_FULL) 5912 new_bmcr |= BMCR_FULLDPLX; 5913 5914 if (new_bmcr != bmcr) { 5915 /* BMCR_SPEED1000 is a reserved bit that needs 5916 * to be set on write. 5917 */ 5918 new_bmcr |= BMCR_SPEED1000; 5919 5920 /* Force a linkdown */ 5921 if (tp->link_up) { 5922 u32 adv; 5923 5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5925 adv &= ~(ADVERTISE_1000XFULL | 5926 ADVERTISE_1000XHALF | 5927 ADVERTISE_SLCT); 5928 tg3_writephy(tp, MII_ADVERTISE, adv); 5929 tg3_writephy(tp, MII_BMCR, bmcr | 5930 BMCR_ANRESTART | 5931 BMCR_ANENABLE); 5932 udelay(10); 5933 tg3_carrier_off(tp); 5934 } 5935 tg3_writephy(tp, MII_BMCR, new_bmcr); 5936 bmcr = new_bmcr; 5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5941 bmsr |= BMSR_LSTATUS; 5942 else 5943 bmsr &= ~BMSR_LSTATUS; 5944 } 5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5946 } 5947 } 5948 5949 if (bmsr & BMSR_LSTATUS) { 5950 current_speed = SPEED_1000; 5951 current_link_up = true; 5952 if (bmcr & BMCR_FULLDPLX) 5953 current_duplex = DUPLEX_FULL; 5954 else 5955 current_duplex = DUPLEX_HALF; 5956 5957 local_adv = 0; 5958 remote_adv = 0; 5959 5960 if (bmcr & BMCR_ANENABLE) { 5961 u32 common; 5962 5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5965 common = local_adv & remote_adv; 5966 if (common & (ADVERTISE_1000XHALF | 5967 ADVERTISE_1000XFULL)) { 5968 if (common & ADVERTISE_1000XFULL) 5969 current_duplex = DUPLEX_FULL; 5970 else 5971 current_duplex = DUPLEX_HALF; 5972 5973 tp->link_config.rmt_adv = 5974 mii_adv_to_ethtool_adv_x(remote_adv); 5975 } else if (!tg3_flag(tp, 5780_CLASS)) { 5976 /* Link is up via parallel detect */ 5977 } else { 5978 current_link_up = false; 5979 } 5980 } 5981 } 5982 5983 fiber_setup_done: 5984 if (current_link_up && current_duplex == DUPLEX_FULL) 5985 tg3_setup_flow_control(tp, local_adv, remote_adv); 5986 5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5988 if (tp->link_config.active_duplex == DUPLEX_HALF) 5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5990 5991 tw32_f(MAC_MODE, tp->mac_mode); 5992 udelay(40); 5993 5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5995 5996 tp->link_config.active_speed = current_speed; 5997 tp->link_config.active_duplex = current_duplex; 5998 5999 tg3_test_and_report_link_chg(tp, current_link_up); 6000 return err; 6001 } 6002 6003 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6004 { 6005 if (tp->serdes_counter) { 6006 /* Give autoneg time to complete. */ 6007 tp->serdes_counter--; 6008 return; 6009 } 6010 6011 if (!tp->link_up && 6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6013 u32 bmcr; 6014 6015 tg3_readphy(tp, MII_BMCR, &bmcr); 6016 if (bmcr & BMCR_ANENABLE) { 6017 u32 phy1, phy2; 6018 6019 /* Select shadow register 0x1f */ 6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6022 6023 /* Select expansion interrupt status register */ 6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6025 MII_TG3_DSP_EXP1_INT_STAT); 6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6028 6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6030 /* We have signal detect and not receiving 6031 * config code words, link is up by parallel 6032 * detection. 6033 */ 6034 6035 bmcr &= ~BMCR_ANENABLE; 6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6037 tg3_writephy(tp, MII_BMCR, bmcr); 6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6039 } 6040 } 6041 } else if (tp->link_up && 6042 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6044 u32 phy2; 6045 6046 /* Select expansion interrupt status register */ 6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6048 MII_TG3_DSP_EXP1_INT_STAT); 6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6050 if (phy2 & 0x20) { 6051 u32 bmcr; 6052 6053 /* Config code words received, turn on autoneg. */ 6054 tg3_readphy(tp, MII_BMCR, &bmcr); 6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6056 6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6058 6059 } 6060 } 6061 } 6062 6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6064 { 6065 u32 val; 6066 int err; 6067 6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6069 err = tg3_setup_fiber_phy(tp, force_reset); 6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6071 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6072 else 6073 err = tg3_setup_copper_phy(tp, force_reset); 6074 6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6076 u32 scale; 6077 6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6080 scale = 65; 6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6082 scale = 6; 6083 else 6084 scale = 12; 6085 6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6088 tw32(GRC_MISC_CFG, val); 6089 } 6090 6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6092 (6 << TX_LENGTHS_IPG_SHIFT); 6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6094 tg3_asic_rev(tp) == ASIC_REV_5762) 6095 val |= tr32(MAC_TX_LENGTHS) & 6096 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6097 TX_LENGTHS_CNT_DWN_VAL_MSK); 6098 6099 if (tp->link_config.active_speed == SPEED_1000 && 6100 tp->link_config.active_duplex == DUPLEX_HALF) 6101 tw32(MAC_TX_LENGTHS, val | 6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6103 else 6104 tw32(MAC_TX_LENGTHS, val | 6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6106 6107 if (!tg3_flag(tp, 5705_PLUS)) { 6108 if (tp->link_up) { 6109 tw32(HOSTCC_STAT_COAL_TICKS, 6110 tp->coal.stats_block_coalesce_usecs); 6111 } else { 6112 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6113 } 6114 } 6115 6116 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6117 val = tr32(PCIE_PWR_MGMT_THRESH); 6118 if (!tp->link_up) 6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6120 tp->pwrmgmt_thresh; 6121 else 6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6123 tw32(PCIE_PWR_MGMT_THRESH, val); 6124 } 6125 6126 return err; 6127 } 6128 6129 /* tp->lock must be held */ 6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) 6131 { 6132 u64 stamp; 6133 6134 ptp_read_system_prets(sts); 6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6136 ptp_read_system_postts(sts); 6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6138 6139 return stamp; 6140 } 6141 6142 /* tp->lock must be held */ 6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6144 { 6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6146 6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6151 } 6152 6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6154 static inline void tg3_full_unlock(struct tg3 *tp); 6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6156 { 6157 struct tg3 *tp = netdev_priv(dev); 6158 6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6160 SOF_TIMESTAMPING_RX_SOFTWARE | 6161 SOF_TIMESTAMPING_SOFTWARE; 6162 6163 if (tg3_flag(tp, PTP_CAPABLE)) { 6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6165 SOF_TIMESTAMPING_RX_HARDWARE | 6166 SOF_TIMESTAMPING_RAW_HARDWARE; 6167 } 6168 6169 if (tp->ptp_clock) 6170 info->phc_index = ptp_clock_index(tp->ptp_clock); 6171 else 6172 info->phc_index = -1; 6173 6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6175 6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6180 return 0; 6181 } 6182 6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 6184 { 6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6186 u64 correction; 6187 bool neg_adj; 6188 6189 /* Frequency adjustment is performed using hardware with a 24 bit 6190 * accumulator and a programmable correction value. On each clk, the 6191 * correction value gets added to the accumulator and when it 6192 * overflows, the time counter is incremented/decremented. 6193 */ 6194 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction); 6195 6196 tg3_full_lock(tp, 0); 6197 6198 if (correction) 6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6200 TG3_EAV_REF_CLK_CORRECT_EN | 6201 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | 6202 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK)); 6203 else 6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6205 6206 tg3_full_unlock(tp); 6207 6208 return 0; 6209 } 6210 6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6212 { 6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6214 6215 tg3_full_lock(tp, 0); 6216 tp->ptp_adjust += delta; 6217 tg3_full_unlock(tp); 6218 6219 return 0; 6220 } 6221 6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 6223 struct ptp_system_timestamp *sts) 6224 { 6225 u64 ns; 6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6227 6228 tg3_full_lock(tp, 0); 6229 ns = tg3_refclk_read(tp, sts); 6230 ns += tp->ptp_adjust; 6231 tg3_full_unlock(tp); 6232 6233 *ts = ns_to_timespec64(ns); 6234 6235 return 0; 6236 } 6237 6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6239 const struct timespec64 *ts) 6240 { 6241 u64 ns; 6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6243 6244 ns = timespec64_to_ns(ts); 6245 6246 tg3_full_lock(tp, 0); 6247 tg3_refclk_write(tp, ns); 6248 tp->ptp_adjust = 0; 6249 tg3_full_unlock(tp); 6250 6251 return 0; 6252 } 6253 6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6255 struct ptp_clock_request *rq, int on) 6256 { 6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6258 u32 clock_ctl; 6259 int rval = 0; 6260 6261 switch (rq->type) { 6262 case PTP_CLK_REQ_PEROUT: 6263 /* Reject requests with unsupported flags */ 6264 if (rq->perout.flags) 6265 return -EOPNOTSUPP; 6266 6267 if (rq->perout.index != 0) 6268 return -EINVAL; 6269 6270 tg3_full_lock(tp, 0); 6271 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6272 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6273 6274 if (on) { 6275 u64 nsec; 6276 6277 nsec = rq->perout.start.sec * 1000000000ULL + 6278 rq->perout.start.nsec; 6279 6280 if (rq->perout.period.sec || rq->perout.period.nsec) { 6281 netdev_warn(tp->dev, 6282 "Device supports only a one-shot timesync output, period must be 0\n"); 6283 rval = -EINVAL; 6284 goto err_out; 6285 } 6286 6287 if (nsec & (1ULL << 63)) { 6288 netdev_warn(tp->dev, 6289 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6290 rval = -EINVAL; 6291 goto err_out; 6292 } 6293 6294 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6295 tw32(TG3_EAV_WATCHDOG0_MSB, 6296 TG3_EAV_WATCHDOG0_EN | 6297 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6298 6299 tw32(TG3_EAV_REF_CLCK_CTL, 6300 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6301 } else { 6302 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6303 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6304 } 6305 6306 err_out: 6307 tg3_full_unlock(tp); 6308 return rval; 6309 6310 default: 6311 break; 6312 } 6313 6314 return -EOPNOTSUPP; 6315 } 6316 6317 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6318 struct skb_shared_hwtstamps *timestamp) 6319 { 6320 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6321 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6322 tp->ptp_adjust); 6323 } 6324 6325 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock) 6326 { 6327 *hwclock = tr32(TG3_TX_TSTAMP_LSB); 6328 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6329 } 6330 6331 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp) 6332 { 6333 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6334 struct skb_shared_hwtstamps timestamp; 6335 u64 hwclock; 6336 6337 if (tp->ptp_txts_retrycnt > 2) 6338 goto done; 6339 6340 tg3_read_tx_tstamp(tp, &hwclock); 6341 6342 if (hwclock != tp->pre_tx_ts) { 6343 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6344 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp); 6345 goto done; 6346 } 6347 tp->ptp_txts_retrycnt++; 6348 return HZ / 10; 6349 done: 6350 dev_consume_skb_any(tp->tx_tstamp_skb); 6351 tp->tx_tstamp_skb = NULL; 6352 tp->ptp_txts_retrycnt = 0; 6353 tp->pre_tx_ts = 0; 6354 return -1; 6355 } 6356 6357 static const struct ptp_clock_info tg3_ptp_caps = { 6358 .owner = THIS_MODULE, 6359 .name = "tg3 clock", 6360 .max_adj = 250000000, 6361 .n_alarm = 0, 6362 .n_ext_ts = 0, 6363 .n_per_out = 1, 6364 .n_pins = 0, 6365 .pps = 0, 6366 .adjfine = tg3_ptp_adjfine, 6367 .adjtime = tg3_ptp_adjtime, 6368 .do_aux_work = tg3_ptp_ts_aux_work, 6369 .gettimex64 = tg3_ptp_gettimex, 6370 .settime64 = tg3_ptp_settime, 6371 .enable = tg3_ptp_enable, 6372 }; 6373 6374 /* tp->lock must be held */ 6375 static void tg3_ptp_init(struct tg3 *tp) 6376 { 6377 if (!tg3_flag(tp, PTP_CAPABLE)) 6378 return; 6379 6380 /* Initialize the hardware clock to the system time. */ 6381 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6382 tp->ptp_adjust = 0; 6383 tp->ptp_info = tg3_ptp_caps; 6384 } 6385 6386 /* tp->lock must be held */ 6387 static void tg3_ptp_resume(struct tg3 *tp) 6388 { 6389 if (!tg3_flag(tp, PTP_CAPABLE)) 6390 return; 6391 6392 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6393 tp->ptp_adjust = 0; 6394 } 6395 6396 static void tg3_ptp_fini(struct tg3 *tp) 6397 { 6398 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6399 return; 6400 6401 ptp_clock_unregister(tp->ptp_clock); 6402 tp->ptp_clock = NULL; 6403 tp->ptp_adjust = 0; 6404 dev_consume_skb_any(tp->tx_tstamp_skb); 6405 tp->tx_tstamp_skb = NULL; 6406 } 6407 6408 static inline int tg3_irq_sync(struct tg3 *tp) 6409 { 6410 return tp->irq_sync; 6411 } 6412 6413 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6414 { 6415 int i; 6416 6417 dst = (u32 *)((u8 *)dst + off); 6418 for (i = 0; i < len; i += sizeof(u32)) 6419 *dst++ = tr32(off + i); 6420 } 6421 6422 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6423 { 6424 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6425 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6426 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6427 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6428 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6429 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6430 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6431 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6432 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6433 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6434 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6435 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6436 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6437 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6438 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6439 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6440 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6441 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6442 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6443 6444 if (tg3_flag(tp, SUPPORT_MSIX)) 6445 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6446 6447 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6448 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6449 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6450 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6451 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6452 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6453 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6454 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6455 6456 if (!tg3_flag(tp, 5705_PLUS)) { 6457 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6458 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6459 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6460 } 6461 6462 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6463 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6464 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6465 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6466 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6467 6468 if (tg3_flag(tp, NVRAM)) 6469 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6470 } 6471 6472 static void tg3_dump_state(struct tg3 *tp) 6473 { 6474 int i; 6475 u32 *regs; 6476 6477 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6478 if (!regs) 6479 return; 6480 6481 if (tg3_flag(tp, PCI_EXPRESS)) { 6482 /* Read up to but not including private PCI registers */ 6483 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6484 regs[i / sizeof(u32)] = tr32(i); 6485 } else 6486 tg3_dump_legacy_regs(tp, regs); 6487 6488 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6489 if (!regs[i + 0] && !regs[i + 1] && 6490 !regs[i + 2] && !regs[i + 3]) 6491 continue; 6492 6493 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6494 i * 4, 6495 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6496 } 6497 6498 kfree(regs); 6499 6500 for (i = 0; i < tp->irq_cnt; i++) { 6501 struct tg3_napi *tnapi = &tp->napi[i]; 6502 6503 /* SW status block */ 6504 netdev_err(tp->dev, 6505 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6506 i, 6507 tnapi->hw_status->status, 6508 tnapi->hw_status->status_tag, 6509 tnapi->hw_status->rx_jumbo_consumer, 6510 tnapi->hw_status->rx_consumer, 6511 tnapi->hw_status->rx_mini_consumer, 6512 tnapi->hw_status->idx[0].rx_producer, 6513 tnapi->hw_status->idx[0].tx_consumer); 6514 6515 netdev_err(tp->dev, 6516 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6517 i, 6518 tnapi->last_tag, tnapi->last_irq_tag, 6519 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6520 tnapi->rx_rcb_ptr, 6521 tnapi->prodring.rx_std_prod_idx, 6522 tnapi->prodring.rx_std_cons_idx, 6523 tnapi->prodring.rx_jmb_prod_idx, 6524 tnapi->prodring.rx_jmb_cons_idx); 6525 } 6526 } 6527 6528 /* This is called whenever we suspect that the system chipset is re- 6529 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6530 * is bogus tx completions. We try to recover by setting the 6531 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6532 * in the workqueue. 6533 */ 6534 static void tg3_tx_recover(struct tg3 *tp) 6535 { 6536 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6537 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6538 6539 netdev_warn(tp->dev, 6540 "The system may be re-ordering memory-mapped I/O " 6541 "cycles to the network device, attempting to recover. " 6542 "Please report the problem to the driver maintainer " 6543 "and include system chipset information.\n"); 6544 6545 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6546 } 6547 6548 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6549 { 6550 /* Tell compiler to fetch tx indices from memory. */ 6551 barrier(); 6552 return tnapi->tx_pending - 6553 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6554 } 6555 6556 /* Tigon3 never reports partial packet sends. So we do not 6557 * need special logic to handle SKBs that have not had all 6558 * of their frags sent yet, like SunGEM does. 6559 */ 6560 static void tg3_tx(struct tg3_napi *tnapi) 6561 { 6562 struct tg3 *tp = tnapi->tp; 6563 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6564 u32 sw_idx = tnapi->tx_cons; 6565 struct netdev_queue *txq; 6566 int index = tnapi - tp->napi; 6567 unsigned int pkts_compl = 0, bytes_compl = 0; 6568 6569 if (tg3_flag(tp, ENABLE_TSS)) 6570 index--; 6571 6572 txq = netdev_get_tx_queue(tp->dev, index); 6573 6574 while (sw_idx != hw_idx) { 6575 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6576 bool complete_skb_later = false; 6577 struct sk_buff *skb = ri->skb; 6578 int i, tx_bug = 0; 6579 6580 if (unlikely(skb == NULL)) { 6581 tg3_tx_recover(tp); 6582 return; 6583 } 6584 6585 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6586 struct skb_shared_hwtstamps timestamp; 6587 u64 hwclock; 6588 6589 tg3_read_tx_tstamp(tp, &hwclock); 6590 if (hwclock != tp->pre_tx_ts) { 6591 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6592 skb_tstamp_tx(skb, ×tamp); 6593 tp->pre_tx_ts = 0; 6594 } else { 6595 tp->tx_tstamp_skb = skb; 6596 complete_skb_later = true; 6597 } 6598 } 6599 6600 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), 6601 skb_headlen(skb), DMA_TO_DEVICE); 6602 6603 ri->skb = NULL; 6604 6605 while (ri->fragmented) { 6606 ri->fragmented = false; 6607 sw_idx = NEXT_TX(sw_idx); 6608 ri = &tnapi->tx_buffers[sw_idx]; 6609 } 6610 6611 sw_idx = NEXT_TX(sw_idx); 6612 6613 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6614 ri = &tnapi->tx_buffers[sw_idx]; 6615 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6616 tx_bug = 1; 6617 6618 dma_unmap_page(&tp->pdev->dev, 6619 dma_unmap_addr(ri, mapping), 6620 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6621 DMA_TO_DEVICE); 6622 6623 while (ri->fragmented) { 6624 ri->fragmented = false; 6625 sw_idx = NEXT_TX(sw_idx); 6626 ri = &tnapi->tx_buffers[sw_idx]; 6627 } 6628 6629 sw_idx = NEXT_TX(sw_idx); 6630 } 6631 6632 pkts_compl++; 6633 bytes_compl += skb->len; 6634 6635 if (!complete_skb_later) 6636 dev_consume_skb_any(skb); 6637 else 6638 ptp_schedule_worker(tp->ptp_clock, 0); 6639 6640 if (unlikely(tx_bug)) { 6641 tg3_tx_recover(tp); 6642 return; 6643 } 6644 } 6645 6646 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6647 6648 tnapi->tx_cons = sw_idx; 6649 6650 /* Need to make the tx_cons update visible to tg3_start_xmit() 6651 * before checking for netif_queue_stopped(). Without the 6652 * memory barrier, there is a small possibility that tg3_start_xmit() 6653 * will miss it and cause the queue to be stopped forever. 6654 */ 6655 smp_mb(); 6656 6657 if (unlikely(netif_tx_queue_stopped(txq) && 6658 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6659 __netif_tx_lock(txq, smp_processor_id()); 6660 if (netif_tx_queue_stopped(txq) && 6661 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6662 netif_tx_wake_queue(txq); 6663 __netif_tx_unlock(txq); 6664 } 6665 } 6666 6667 static void tg3_frag_free(bool is_frag, void *data) 6668 { 6669 if (is_frag) 6670 skb_free_frag(data); 6671 else 6672 kfree(data); 6673 } 6674 6675 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6676 { 6677 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6678 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6679 6680 if (!ri->data) 6681 return; 6682 6683 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, 6684 DMA_FROM_DEVICE); 6685 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6686 ri->data = NULL; 6687 } 6688 6689 6690 /* Returns size of skb allocated or < 0 on error. 6691 * 6692 * We only need to fill in the address because the other members 6693 * of the RX descriptor are invariant, see tg3_init_rings. 6694 * 6695 * Note the purposeful assymetry of cpu vs. chip accesses. For 6696 * posting buffers we only dirty the first cache line of the RX 6697 * descriptor (containing the address). Whereas for the RX status 6698 * buffers the cpu only reads the last cacheline of the RX descriptor 6699 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6700 */ 6701 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6702 u32 opaque_key, u32 dest_idx_unmasked, 6703 unsigned int *frag_size) 6704 { 6705 struct tg3_rx_buffer_desc *desc; 6706 struct ring_info *map; 6707 u8 *data; 6708 dma_addr_t mapping; 6709 int skb_size, data_size, dest_idx; 6710 6711 switch (opaque_key) { 6712 case RXD_OPAQUE_RING_STD: 6713 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6714 desc = &tpr->rx_std[dest_idx]; 6715 map = &tpr->rx_std_buffers[dest_idx]; 6716 data_size = tp->rx_pkt_map_sz; 6717 break; 6718 6719 case RXD_OPAQUE_RING_JUMBO: 6720 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6721 desc = &tpr->rx_jmb[dest_idx].std; 6722 map = &tpr->rx_jmb_buffers[dest_idx]; 6723 data_size = TG3_RX_JMB_MAP_SZ; 6724 break; 6725 6726 default: 6727 return -EINVAL; 6728 } 6729 6730 /* Do not overwrite any of the map or rp information 6731 * until we are sure we can commit to a new buffer. 6732 * 6733 * Callers depend upon this behavior and assume that 6734 * we leave everything unchanged if we fail. 6735 */ 6736 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6737 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6738 if (skb_size <= PAGE_SIZE) { 6739 data = napi_alloc_frag(skb_size); 6740 *frag_size = skb_size; 6741 } else { 6742 data = kmalloc(skb_size, GFP_ATOMIC); 6743 *frag_size = 0; 6744 } 6745 if (!data) 6746 return -ENOMEM; 6747 6748 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), 6749 data_size, DMA_FROM_DEVICE); 6750 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { 6751 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6752 return -EIO; 6753 } 6754 6755 map->data = data; 6756 dma_unmap_addr_set(map, mapping, mapping); 6757 6758 desc->addr_hi = ((u64)mapping >> 32); 6759 desc->addr_lo = ((u64)mapping & 0xffffffff); 6760 6761 return data_size; 6762 } 6763 6764 /* We only need to move over in the address because the other 6765 * members of the RX descriptor are invariant. See notes above 6766 * tg3_alloc_rx_data for full details. 6767 */ 6768 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6769 struct tg3_rx_prodring_set *dpr, 6770 u32 opaque_key, int src_idx, 6771 u32 dest_idx_unmasked) 6772 { 6773 struct tg3 *tp = tnapi->tp; 6774 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6775 struct ring_info *src_map, *dest_map; 6776 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6777 int dest_idx; 6778 6779 switch (opaque_key) { 6780 case RXD_OPAQUE_RING_STD: 6781 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6782 dest_desc = &dpr->rx_std[dest_idx]; 6783 dest_map = &dpr->rx_std_buffers[dest_idx]; 6784 src_desc = &spr->rx_std[src_idx]; 6785 src_map = &spr->rx_std_buffers[src_idx]; 6786 break; 6787 6788 case RXD_OPAQUE_RING_JUMBO: 6789 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6790 dest_desc = &dpr->rx_jmb[dest_idx].std; 6791 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6792 src_desc = &spr->rx_jmb[src_idx].std; 6793 src_map = &spr->rx_jmb_buffers[src_idx]; 6794 break; 6795 6796 default: 6797 return; 6798 } 6799 6800 dest_map->data = src_map->data; 6801 dma_unmap_addr_set(dest_map, mapping, 6802 dma_unmap_addr(src_map, mapping)); 6803 dest_desc->addr_hi = src_desc->addr_hi; 6804 dest_desc->addr_lo = src_desc->addr_lo; 6805 6806 /* Ensure that the update to the skb happens after the physical 6807 * addresses have been transferred to the new BD location. 6808 */ 6809 smp_wmb(); 6810 6811 src_map->data = NULL; 6812 } 6813 6814 /* The RX ring scheme is composed of multiple rings which post fresh 6815 * buffers to the chip, and one special ring the chip uses to report 6816 * status back to the host. 6817 * 6818 * The special ring reports the status of received packets to the 6819 * host. The chip does not write into the original descriptor the 6820 * RX buffer was obtained from. The chip simply takes the original 6821 * descriptor as provided by the host, updates the status and length 6822 * field, then writes this into the next status ring entry. 6823 * 6824 * Each ring the host uses to post buffers to the chip is described 6825 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6826 * it is first placed into the on-chip ram. When the packet's length 6827 * is known, it walks down the TG3_BDINFO entries to select the ring. 6828 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6829 * which is within the range of the new packet's length is chosen. 6830 * 6831 * The "separate ring for rx status" scheme may sound queer, but it makes 6832 * sense from a cache coherency perspective. If only the host writes 6833 * to the buffer post rings, and only the chip writes to the rx status 6834 * rings, then cache lines never move beyond shared-modified state. 6835 * If both the host and chip were to write into the same ring, cache line 6836 * eviction could occur since both entities want it in an exclusive state. 6837 */ 6838 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6839 { 6840 struct tg3 *tp = tnapi->tp; 6841 u32 work_mask, rx_std_posted = 0; 6842 u32 std_prod_idx, jmb_prod_idx; 6843 u32 sw_idx = tnapi->rx_rcb_ptr; 6844 u16 hw_idx; 6845 int received; 6846 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6847 6848 hw_idx = *(tnapi->rx_rcb_prod_idx); 6849 /* 6850 * We need to order the read of hw_idx and the read of 6851 * the opaque cookie. 6852 */ 6853 rmb(); 6854 work_mask = 0; 6855 received = 0; 6856 std_prod_idx = tpr->rx_std_prod_idx; 6857 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6858 while (sw_idx != hw_idx && budget > 0) { 6859 struct ring_info *ri; 6860 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6861 unsigned int len; 6862 struct sk_buff *skb; 6863 dma_addr_t dma_addr; 6864 u32 opaque_key, desc_idx, *post_ptr; 6865 u8 *data; 6866 u64 tstamp = 0; 6867 6868 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6869 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6870 if (opaque_key == RXD_OPAQUE_RING_STD) { 6871 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6872 dma_addr = dma_unmap_addr(ri, mapping); 6873 data = ri->data; 6874 post_ptr = &std_prod_idx; 6875 rx_std_posted++; 6876 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6877 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6878 dma_addr = dma_unmap_addr(ri, mapping); 6879 data = ri->data; 6880 post_ptr = &jmb_prod_idx; 6881 } else 6882 goto next_pkt_nopost; 6883 6884 work_mask |= opaque_key; 6885 6886 if (desc->err_vlan & RXD_ERR_MASK) { 6887 drop_it: 6888 tg3_recycle_rx(tnapi, tpr, opaque_key, 6889 desc_idx, *post_ptr); 6890 drop_it_no_recycle: 6891 /* Other statistics kept track of by card. */ 6892 tp->rx_dropped++; 6893 goto next_pkt; 6894 } 6895 6896 prefetch(data + TG3_RX_OFFSET(tp)); 6897 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6898 ETH_FCS_LEN; 6899 6900 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6901 RXD_FLAG_PTPSTAT_PTPV1 || 6902 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6903 RXD_FLAG_PTPSTAT_PTPV2) { 6904 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6905 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6906 } 6907 6908 if (len > TG3_RX_COPY_THRESH(tp)) { 6909 int skb_size; 6910 unsigned int frag_size; 6911 6912 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6913 *post_ptr, &frag_size); 6914 if (skb_size < 0) 6915 goto drop_it; 6916 6917 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, 6918 DMA_FROM_DEVICE); 6919 6920 /* Ensure that the update to the data happens 6921 * after the usage of the old DMA mapping. 6922 */ 6923 smp_wmb(); 6924 6925 ri->data = NULL; 6926 6927 if (frag_size) 6928 skb = build_skb(data, frag_size); 6929 else 6930 skb = slab_build_skb(data); 6931 if (!skb) { 6932 tg3_frag_free(frag_size != 0, data); 6933 goto drop_it_no_recycle; 6934 } 6935 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6936 } else { 6937 tg3_recycle_rx(tnapi, tpr, opaque_key, 6938 desc_idx, *post_ptr); 6939 6940 skb = netdev_alloc_skb(tp->dev, 6941 len + TG3_RAW_IP_ALIGN); 6942 if (skb == NULL) 6943 goto drop_it_no_recycle; 6944 6945 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6946 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, 6947 DMA_FROM_DEVICE); 6948 memcpy(skb->data, 6949 data + TG3_RX_OFFSET(tp), 6950 len); 6951 dma_sync_single_for_device(&tp->pdev->dev, dma_addr, 6952 len, DMA_FROM_DEVICE); 6953 } 6954 6955 skb_put(skb, len); 6956 if (tstamp) 6957 tg3_hwclock_to_timestamp(tp, tstamp, 6958 skb_hwtstamps(skb)); 6959 6960 if ((tp->dev->features & NETIF_F_RXCSUM) && 6961 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6962 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6963 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6964 skb->ip_summed = CHECKSUM_UNNECESSARY; 6965 else 6966 skb_checksum_none_assert(skb); 6967 6968 skb->protocol = eth_type_trans(skb, tp->dev); 6969 6970 if (len > (tp->dev->mtu + ETH_HLEN) && 6971 skb->protocol != htons(ETH_P_8021Q) && 6972 skb->protocol != htons(ETH_P_8021AD)) { 6973 dev_kfree_skb_any(skb); 6974 goto drop_it_no_recycle; 6975 } 6976 6977 if (desc->type_flags & RXD_FLAG_VLAN && 6978 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6979 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6980 desc->err_vlan & RXD_VLAN_MASK); 6981 6982 napi_gro_receive(&tnapi->napi, skb); 6983 6984 received++; 6985 budget--; 6986 6987 next_pkt: 6988 (*post_ptr)++; 6989 6990 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6991 tpr->rx_std_prod_idx = std_prod_idx & 6992 tp->rx_std_ring_mask; 6993 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6994 tpr->rx_std_prod_idx); 6995 work_mask &= ~RXD_OPAQUE_RING_STD; 6996 rx_std_posted = 0; 6997 } 6998 next_pkt_nopost: 6999 sw_idx++; 7000 sw_idx &= tp->rx_ret_ring_mask; 7001 7002 /* Refresh hw_idx to see if there is new work */ 7003 if (sw_idx == hw_idx) { 7004 hw_idx = *(tnapi->rx_rcb_prod_idx); 7005 rmb(); 7006 } 7007 } 7008 7009 /* ACK the status ring. */ 7010 tnapi->rx_rcb_ptr = sw_idx; 7011 tw32_rx_mbox(tnapi->consmbox, sw_idx); 7012 7013 /* Refill RX ring(s). */ 7014 if (!tg3_flag(tp, ENABLE_RSS)) { 7015 /* Sync BD data before updating mailbox */ 7016 wmb(); 7017 7018 if (work_mask & RXD_OPAQUE_RING_STD) { 7019 tpr->rx_std_prod_idx = std_prod_idx & 7020 tp->rx_std_ring_mask; 7021 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7022 tpr->rx_std_prod_idx); 7023 } 7024 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 7025 tpr->rx_jmb_prod_idx = jmb_prod_idx & 7026 tp->rx_jmb_ring_mask; 7027 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7028 tpr->rx_jmb_prod_idx); 7029 } 7030 } else if (work_mask) { 7031 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 7032 * updated before the producer indices can be updated. 7033 */ 7034 smp_wmb(); 7035 7036 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 7037 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 7038 7039 if (tnapi != &tp->napi[1]) { 7040 tp->rx_refill = true; 7041 napi_schedule(&tp->napi[1].napi); 7042 } 7043 } 7044 7045 return received; 7046 } 7047 7048 static void tg3_poll_link(struct tg3 *tp) 7049 { 7050 /* handle link change and other phy events */ 7051 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7052 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7053 7054 if (sblk->status & SD_STATUS_LINK_CHG) { 7055 sblk->status = SD_STATUS_UPDATED | 7056 (sblk->status & ~SD_STATUS_LINK_CHG); 7057 spin_lock(&tp->lock); 7058 if (tg3_flag(tp, USE_PHYLIB)) { 7059 tw32_f(MAC_STATUS, 7060 (MAC_STATUS_SYNC_CHANGED | 7061 MAC_STATUS_CFG_CHANGED | 7062 MAC_STATUS_MI_COMPLETION | 7063 MAC_STATUS_LNKSTATE_CHANGED)); 7064 udelay(40); 7065 } else 7066 tg3_setup_phy(tp, false); 7067 spin_unlock(&tp->lock); 7068 } 7069 } 7070 } 7071 7072 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7073 struct tg3_rx_prodring_set *dpr, 7074 struct tg3_rx_prodring_set *spr) 7075 { 7076 u32 si, di, cpycnt, src_prod_idx; 7077 int i, err = 0; 7078 7079 while (1) { 7080 src_prod_idx = spr->rx_std_prod_idx; 7081 7082 /* Make sure updates to the rx_std_buffers[] entries and the 7083 * standard producer index are seen in the correct order. 7084 */ 7085 smp_rmb(); 7086 7087 if (spr->rx_std_cons_idx == src_prod_idx) 7088 break; 7089 7090 if (spr->rx_std_cons_idx < src_prod_idx) 7091 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7092 else 7093 cpycnt = tp->rx_std_ring_mask + 1 - 7094 spr->rx_std_cons_idx; 7095 7096 cpycnt = min(cpycnt, 7097 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7098 7099 si = spr->rx_std_cons_idx; 7100 di = dpr->rx_std_prod_idx; 7101 7102 for (i = di; i < di + cpycnt; i++) { 7103 if (dpr->rx_std_buffers[i].data) { 7104 cpycnt = i - di; 7105 err = -ENOSPC; 7106 break; 7107 } 7108 } 7109 7110 if (!cpycnt) 7111 break; 7112 7113 /* Ensure that updates to the rx_std_buffers ring and the 7114 * shadowed hardware producer ring from tg3_recycle_skb() are 7115 * ordered correctly WRT the skb check above. 7116 */ 7117 smp_rmb(); 7118 7119 memcpy(&dpr->rx_std_buffers[di], 7120 &spr->rx_std_buffers[si], 7121 cpycnt * sizeof(struct ring_info)); 7122 7123 for (i = 0; i < cpycnt; i++, di++, si++) { 7124 struct tg3_rx_buffer_desc *sbd, *dbd; 7125 sbd = &spr->rx_std[si]; 7126 dbd = &dpr->rx_std[di]; 7127 dbd->addr_hi = sbd->addr_hi; 7128 dbd->addr_lo = sbd->addr_lo; 7129 } 7130 7131 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7132 tp->rx_std_ring_mask; 7133 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7134 tp->rx_std_ring_mask; 7135 } 7136 7137 while (1) { 7138 src_prod_idx = spr->rx_jmb_prod_idx; 7139 7140 /* Make sure updates to the rx_jmb_buffers[] entries and 7141 * the jumbo producer index are seen in the correct order. 7142 */ 7143 smp_rmb(); 7144 7145 if (spr->rx_jmb_cons_idx == src_prod_idx) 7146 break; 7147 7148 if (spr->rx_jmb_cons_idx < src_prod_idx) 7149 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7150 else 7151 cpycnt = tp->rx_jmb_ring_mask + 1 - 7152 spr->rx_jmb_cons_idx; 7153 7154 cpycnt = min(cpycnt, 7155 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7156 7157 si = spr->rx_jmb_cons_idx; 7158 di = dpr->rx_jmb_prod_idx; 7159 7160 for (i = di; i < di + cpycnt; i++) { 7161 if (dpr->rx_jmb_buffers[i].data) { 7162 cpycnt = i - di; 7163 err = -ENOSPC; 7164 break; 7165 } 7166 } 7167 7168 if (!cpycnt) 7169 break; 7170 7171 /* Ensure that updates to the rx_jmb_buffers ring and the 7172 * shadowed hardware producer ring from tg3_recycle_skb() are 7173 * ordered correctly WRT the skb check above. 7174 */ 7175 smp_rmb(); 7176 7177 memcpy(&dpr->rx_jmb_buffers[di], 7178 &spr->rx_jmb_buffers[si], 7179 cpycnt * sizeof(struct ring_info)); 7180 7181 for (i = 0; i < cpycnt; i++, di++, si++) { 7182 struct tg3_rx_buffer_desc *sbd, *dbd; 7183 sbd = &spr->rx_jmb[si].std; 7184 dbd = &dpr->rx_jmb[di].std; 7185 dbd->addr_hi = sbd->addr_hi; 7186 dbd->addr_lo = sbd->addr_lo; 7187 } 7188 7189 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7190 tp->rx_jmb_ring_mask; 7191 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7192 tp->rx_jmb_ring_mask; 7193 } 7194 7195 return err; 7196 } 7197 7198 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7199 { 7200 struct tg3 *tp = tnapi->tp; 7201 7202 /* run TX completion thread */ 7203 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7204 tg3_tx(tnapi); 7205 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7206 return work_done; 7207 } 7208 7209 if (!tnapi->rx_rcb_prod_idx) 7210 return work_done; 7211 7212 /* run RX thread, within the bounds set by NAPI. 7213 * All RX "locking" is done by ensuring outside 7214 * code synchronizes with tg3->napi.poll() 7215 */ 7216 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7217 work_done += tg3_rx(tnapi, budget - work_done); 7218 7219 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7220 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7221 int i, err = 0; 7222 u32 std_prod_idx = dpr->rx_std_prod_idx; 7223 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7224 7225 tp->rx_refill = false; 7226 for (i = 1; i <= tp->rxq_cnt; i++) 7227 err |= tg3_rx_prodring_xfer(tp, dpr, 7228 &tp->napi[i].prodring); 7229 7230 wmb(); 7231 7232 if (std_prod_idx != dpr->rx_std_prod_idx) 7233 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7234 dpr->rx_std_prod_idx); 7235 7236 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7237 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7238 dpr->rx_jmb_prod_idx); 7239 7240 if (err) 7241 tw32_f(HOSTCC_MODE, tp->coal_now); 7242 } 7243 7244 return work_done; 7245 } 7246 7247 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7248 { 7249 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7250 schedule_work(&tp->reset_task); 7251 } 7252 7253 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7254 { 7255 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7256 cancel_work_sync(&tp->reset_task); 7257 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7258 } 7259 7260 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7261 { 7262 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7263 struct tg3 *tp = tnapi->tp; 7264 int work_done = 0; 7265 struct tg3_hw_status *sblk = tnapi->hw_status; 7266 7267 while (1) { 7268 work_done = tg3_poll_work(tnapi, work_done, budget); 7269 7270 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7271 goto tx_recovery; 7272 7273 if (unlikely(work_done >= budget)) 7274 break; 7275 7276 /* tp->last_tag is used in tg3_int_reenable() below 7277 * to tell the hw how much work has been processed, 7278 * so we must read it before checking for more work. 7279 */ 7280 tnapi->last_tag = sblk->status_tag; 7281 tnapi->last_irq_tag = tnapi->last_tag; 7282 rmb(); 7283 7284 /* check for RX/TX work to do */ 7285 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7286 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7287 7288 /* This test here is not race free, but will reduce 7289 * the number of interrupts by looping again. 7290 */ 7291 if (tnapi == &tp->napi[1] && tp->rx_refill) 7292 continue; 7293 7294 napi_complete_done(napi, work_done); 7295 /* Reenable interrupts. */ 7296 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7297 7298 /* This test here is synchronized by napi_schedule() 7299 * and napi_complete() to close the race condition. 7300 */ 7301 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7302 tw32(HOSTCC_MODE, tp->coalesce_mode | 7303 HOSTCC_MODE_ENABLE | 7304 tnapi->coal_now); 7305 } 7306 break; 7307 } 7308 } 7309 7310 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7311 return work_done; 7312 7313 tx_recovery: 7314 /* work_done is guaranteed to be less than budget. */ 7315 napi_complete(napi); 7316 tg3_reset_task_schedule(tp); 7317 return work_done; 7318 } 7319 7320 static void tg3_process_error(struct tg3 *tp) 7321 { 7322 u32 val; 7323 bool real_error = false; 7324 7325 if (tg3_flag(tp, ERROR_PROCESSED)) 7326 return; 7327 7328 /* Check Flow Attention register */ 7329 val = tr32(HOSTCC_FLOW_ATTN); 7330 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7331 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7332 real_error = true; 7333 } 7334 7335 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7336 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7337 real_error = true; 7338 } 7339 7340 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7341 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7342 real_error = true; 7343 } 7344 7345 if (!real_error) 7346 return; 7347 7348 tg3_dump_state(tp); 7349 7350 tg3_flag_set(tp, ERROR_PROCESSED); 7351 tg3_reset_task_schedule(tp); 7352 } 7353 7354 static int tg3_poll(struct napi_struct *napi, int budget) 7355 { 7356 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7357 struct tg3 *tp = tnapi->tp; 7358 int work_done = 0; 7359 struct tg3_hw_status *sblk = tnapi->hw_status; 7360 7361 while (1) { 7362 if (sblk->status & SD_STATUS_ERROR) 7363 tg3_process_error(tp); 7364 7365 tg3_poll_link(tp); 7366 7367 work_done = tg3_poll_work(tnapi, work_done, budget); 7368 7369 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7370 goto tx_recovery; 7371 7372 if (unlikely(work_done >= budget)) 7373 break; 7374 7375 if (tg3_flag(tp, TAGGED_STATUS)) { 7376 /* tp->last_tag is used in tg3_int_reenable() below 7377 * to tell the hw how much work has been processed, 7378 * so we must read it before checking for more work. 7379 */ 7380 tnapi->last_tag = sblk->status_tag; 7381 tnapi->last_irq_tag = tnapi->last_tag; 7382 rmb(); 7383 } else 7384 sblk->status &= ~SD_STATUS_UPDATED; 7385 7386 if (likely(!tg3_has_work(tnapi))) { 7387 napi_complete_done(napi, work_done); 7388 tg3_int_reenable(tnapi); 7389 break; 7390 } 7391 } 7392 7393 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7394 return work_done; 7395 7396 tx_recovery: 7397 /* work_done is guaranteed to be less than budget. */ 7398 napi_complete(napi); 7399 tg3_reset_task_schedule(tp); 7400 return work_done; 7401 } 7402 7403 static void tg3_napi_disable(struct tg3 *tp) 7404 { 7405 int i; 7406 7407 for (i = tp->irq_cnt - 1; i >= 0; i--) 7408 napi_disable(&tp->napi[i].napi); 7409 } 7410 7411 static void tg3_napi_enable(struct tg3 *tp) 7412 { 7413 int i; 7414 7415 for (i = 0; i < tp->irq_cnt; i++) 7416 napi_enable(&tp->napi[i].napi); 7417 } 7418 7419 static void tg3_napi_init(struct tg3 *tp) 7420 { 7421 int i; 7422 7423 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll); 7424 for (i = 1; i < tp->irq_cnt; i++) 7425 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix); 7426 } 7427 7428 static void tg3_napi_fini(struct tg3 *tp) 7429 { 7430 int i; 7431 7432 for (i = 0; i < tp->irq_cnt; i++) 7433 netif_napi_del(&tp->napi[i].napi); 7434 } 7435 7436 static inline void tg3_netif_stop(struct tg3 *tp) 7437 { 7438 netif_trans_update(tp->dev); /* prevent tx timeout */ 7439 tg3_napi_disable(tp); 7440 netif_carrier_off(tp->dev); 7441 netif_tx_disable(tp->dev); 7442 } 7443 7444 /* tp->lock must be held */ 7445 static inline void tg3_netif_start(struct tg3 *tp) 7446 { 7447 tg3_ptp_resume(tp); 7448 7449 /* NOTE: unconditional netif_tx_wake_all_queues is only 7450 * appropriate so long as all callers are assured to 7451 * have free tx slots (such as after tg3_init_hw) 7452 */ 7453 netif_tx_wake_all_queues(tp->dev); 7454 7455 if (tp->link_up) 7456 netif_carrier_on(tp->dev); 7457 7458 tg3_napi_enable(tp); 7459 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7460 tg3_enable_ints(tp); 7461 } 7462 7463 static void tg3_irq_quiesce(struct tg3 *tp) 7464 __releases(tp->lock) 7465 __acquires(tp->lock) 7466 { 7467 int i; 7468 7469 BUG_ON(tp->irq_sync); 7470 7471 tp->irq_sync = 1; 7472 smp_mb(); 7473 7474 spin_unlock_bh(&tp->lock); 7475 7476 for (i = 0; i < tp->irq_cnt; i++) 7477 synchronize_irq(tp->napi[i].irq_vec); 7478 7479 spin_lock_bh(&tp->lock); 7480 } 7481 7482 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7483 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7484 * with as well. Most of the time, this is not necessary except when 7485 * shutting down the device. 7486 */ 7487 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7488 { 7489 spin_lock_bh(&tp->lock); 7490 if (irq_sync) 7491 tg3_irq_quiesce(tp); 7492 } 7493 7494 static inline void tg3_full_unlock(struct tg3 *tp) 7495 { 7496 spin_unlock_bh(&tp->lock); 7497 } 7498 7499 /* One-shot MSI handler - Chip automatically disables interrupt 7500 * after sending MSI so driver doesn't have to do it. 7501 */ 7502 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7503 { 7504 struct tg3_napi *tnapi = dev_id; 7505 struct tg3 *tp = tnapi->tp; 7506 7507 prefetch(tnapi->hw_status); 7508 if (tnapi->rx_rcb) 7509 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7510 7511 if (likely(!tg3_irq_sync(tp))) 7512 napi_schedule(&tnapi->napi); 7513 7514 return IRQ_HANDLED; 7515 } 7516 7517 /* MSI ISR - No need to check for interrupt sharing and no need to 7518 * flush status block and interrupt mailbox. PCI ordering rules 7519 * guarantee that MSI will arrive after the status block. 7520 */ 7521 static irqreturn_t tg3_msi(int irq, void *dev_id) 7522 { 7523 struct tg3_napi *tnapi = dev_id; 7524 struct tg3 *tp = tnapi->tp; 7525 7526 prefetch(tnapi->hw_status); 7527 if (tnapi->rx_rcb) 7528 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7529 /* 7530 * Writing any value to intr-mbox-0 clears PCI INTA# and 7531 * chip-internal interrupt pending events. 7532 * Writing non-zero to intr-mbox-0 additional tells the 7533 * NIC to stop sending us irqs, engaging "in-intr-handler" 7534 * event coalescing. 7535 */ 7536 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7537 if (likely(!tg3_irq_sync(tp))) 7538 napi_schedule(&tnapi->napi); 7539 7540 return IRQ_RETVAL(1); 7541 } 7542 7543 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7544 { 7545 struct tg3_napi *tnapi = dev_id; 7546 struct tg3 *tp = tnapi->tp; 7547 struct tg3_hw_status *sblk = tnapi->hw_status; 7548 unsigned int handled = 1; 7549 7550 /* In INTx mode, it is possible for the interrupt to arrive at 7551 * the CPU before the status block posted prior to the interrupt. 7552 * Reading the PCI State register will confirm whether the 7553 * interrupt is ours and will flush the status block. 7554 */ 7555 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7556 if (tg3_flag(tp, CHIP_RESETTING) || 7557 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7558 handled = 0; 7559 goto out; 7560 } 7561 } 7562 7563 /* 7564 * Writing any value to intr-mbox-0 clears PCI INTA# and 7565 * chip-internal interrupt pending events. 7566 * Writing non-zero to intr-mbox-0 additional tells the 7567 * NIC to stop sending us irqs, engaging "in-intr-handler" 7568 * event coalescing. 7569 * 7570 * Flush the mailbox to de-assert the IRQ immediately to prevent 7571 * spurious interrupts. The flush impacts performance but 7572 * excessive spurious interrupts can be worse in some cases. 7573 */ 7574 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7575 if (tg3_irq_sync(tp)) 7576 goto out; 7577 sblk->status &= ~SD_STATUS_UPDATED; 7578 if (likely(tg3_has_work(tnapi))) { 7579 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7580 napi_schedule(&tnapi->napi); 7581 } else { 7582 /* No work, shared interrupt perhaps? re-enable 7583 * interrupts, and flush that PCI write 7584 */ 7585 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7586 0x00000000); 7587 } 7588 out: 7589 return IRQ_RETVAL(handled); 7590 } 7591 7592 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7593 { 7594 struct tg3_napi *tnapi = dev_id; 7595 struct tg3 *tp = tnapi->tp; 7596 struct tg3_hw_status *sblk = tnapi->hw_status; 7597 unsigned int handled = 1; 7598 7599 /* In INTx mode, it is possible for the interrupt to arrive at 7600 * the CPU before the status block posted prior to the interrupt. 7601 * Reading the PCI State register will confirm whether the 7602 * interrupt is ours and will flush the status block. 7603 */ 7604 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7605 if (tg3_flag(tp, CHIP_RESETTING) || 7606 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7607 handled = 0; 7608 goto out; 7609 } 7610 } 7611 7612 /* 7613 * writing any value to intr-mbox-0 clears PCI INTA# and 7614 * chip-internal interrupt pending events. 7615 * writing non-zero to intr-mbox-0 additional tells the 7616 * NIC to stop sending us irqs, engaging "in-intr-handler" 7617 * event coalescing. 7618 * 7619 * Flush the mailbox to de-assert the IRQ immediately to prevent 7620 * spurious interrupts. The flush impacts performance but 7621 * excessive spurious interrupts can be worse in some cases. 7622 */ 7623 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7624 7625 /* 7626 * In a shared interrupt configuration, sometimes other devices' 7627 * interrupts will scream. We record the current status tag here 7628 * so that the above check can report that the screaming interrupts 7629 * are unhandled. Eventually they will be silenced. 7630 */ 7631 tnapi->last_irq_tag = sblk->status_tag; 7632 7633 if (tg3_irq_sync(tp)) 7634 goto out; 7635 7636 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7637 7638 napi_schedule(&tnapi->napi); 7639 7640 out: 7641 return IRQ_RETVAL(handled); 7642 } 7643 7644 /* ISR for interrupt test */ 7645 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7646 { 7647 struct tg3_napi *tnapi = dev_id; 7648 struct tg3 *tp = tnapi->tp; 7649 struct tg3_hw_status *sblk = tnapi->hw_status; 7650 7651 if ((sblk->status & SD_STATUS_UPDATED) || 7652 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7653 tg3_disable_ints(tp); 7654 return IRQ_RETVAL(1); 7655 } 7656 return IRQ_RETVAL(0); 7657 } 7658 7659 #ifdef CONFIG_NET_POLL_CONTROLLER 7660 static void tg3_poll_controller(struct net_device *dev) 7661 { 7662 int i; 7663 struct tg3 *tp = netdev_priv(dev); 7664 7665 if (tg3_irq_sync(tp)) 7666 return; 7667 7668 for (i = 0; i < tp->irq_cnt; i++) 7669 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7670 } 7671 #endif 7672 7673 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue) 7674 { 7675 struct tg3 *tp = netdev_priv(dev); 7676 7677 if (netif_msg_tx_err(tp)) { 7678 netdev_err(dev, "transmit timed out, resetting\n"); 7679 tg3_dump_state(tp); 7680 } 7681 7682 tg3_reset_task_schedule(tp); 7683 } 7684 7685 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7686 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7687 { 7688 u32 base = (u32) mapping & 0xffffffff; 7689 7690 return base + len + 8 < base; 7691 } 7692 7693 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7694 * of any 4GB boundaries: 4G, 8G, etc 7695 */ 7696 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7697 u32 len, u32 mss) 7698 { 7699 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7700 u32 base = (u32) mapping & 0xffffffff; 7701 7702 return ((base + len + (mss & 0x3fff)) < base); 7703 } 7704 return 0; 7705 } 7706 7707 /* Test for DMA addresses > 40-bit */ 7708 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7709 int len) 7710 { 7711 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7712 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7713 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7714 return 0; 7715 #else 7716 return 0; 7717 #endif 7718 } 7719 7720 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7721 dma_addr_t mapping, u32 len, u32 flags, 7722 u32 mss, u32 vlan) 7723 { 7724 txbd->addr_hi = ((u64) mapping >> 32); 7725 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7726 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7727 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7728 } 7729 7730 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7731 dma_addr_t map, u32 len, u32 flags, 7732 u32 mss, u32 vlan) 7733 { 7734 struct tg3 *tp = tnapi->tp; 7735 bool hwbug = false; 7736 7737 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7738 hwbug = true; 7739 7740 if (tg3_4g_overflow_test(map, len)) 7741 hwbug = true; 7742 7743 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7744 hwbug = true; 7745 7746 if (tg3_40bit_overflow_test(tp, map, len)) 7747 hwbug = true; 7748 7749 if (tp->dma_limit) { 7750 u32 prvidx = *entry; 7751 u32 tmp_flag = flags & ~TXD_FLAG_END; 7752 while (len > tp->dma_limit && *budget) { 7753 u32 frag_len = tp->dma_limit; 7754 len -= tp->dma_limit; 7755 7756 /* Avoid the 8byte DMA problem */ 7757 if (len <= 8) { 7758 len += tp->dma_limit / 2; 7759 frag_len = tp->dma_limit / 2; 7760 } 7761 7762 tnapi->tx_buffers[*entry].fragmented = true; 7763 7764 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7765 frag_len, tmp_flag, mss, vlan); 7766 *budget -= 1; 7767 prvidx = *entry; 7768 *entry = NEXT_TX(*entry); 7769 7770 map += frag_len; 7771 } 7772 7773 if (len) { 7774 if (*budget) { 7775 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7776 len, flags, mss, vlan); 7777 *budget -= 1; 7778 *entry = NEXT_TX(*entry); 7779 } else { 7780 hwbug = true; 7781 tnapi->tx_buffers[prvidx].fragmented = false; 7782 } 7783 } 7784 } else { 7785 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7786 len, flags, mss, vlan); 7787 *entry = NEXT_TX(*entry); 7788 } 7789 7790 return hwbug; 7791 } 7792 7793 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7794 { 7795 int i; 7796 struct sk_buff *skb; 7797 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7798 7799 skb = txb->skb; 7800 txb->skb = NULL; 7801 7802 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), 7803 skb_headlen(skb), DMA_TO_DEVICE); 7804 7805 while (txb->fragmented) { 7806 txb->fragmented = false; 7807 entry = NEXT_TX(entry); 7808 txb = &tnapi->tx_buffers[entry]; 7809 } 7810 7811 for (i = 0; i <= last; i++) { 7812 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7813 7814 entry = NEXT_TX(entry); 7815 txb = &tnapi->tx_buffers[entry]; 7816 7817 dma_unmap_page(&tnapi->tp->pdev->dev, 7818 dma_unmap_addr(txb, mapping), 7819 skb_frag_size(frag), DMA_TO_DEVICE); 7820 7821 while (txb->fragmented) { 7822 txb->fragmented = false; 7823 entry = NEXT_TX(entry); 7824 txb = &tnapi->tx_buffers[entry]; 7825 } 7826 } 7827 } 7828 7829 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7830 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7831 struct sk_buff **pskb, 7832 u32 *entry, u32 *budget, 7833 u32 base_flags, u32 mss, u32 vlan) 7834 { 7835 struct tg3 *tp = tnapi->tp; 7836 struct sk_buff *new_skb, *skb = *pskb; 7837 dma_addr_t new_addr = 0; 7838 int ret = 0; 7839 7840 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7841 new_skb = skb_copy(skb, GFP_ATOMIC); 7842 else { 7843 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7844 7845 new_skb = skb_copy_expand(skb, 7846 skb_headroom(skb) + more_headroom, 7847 skb_tailroom(skb), GFP_ATOMIC); 7848 } 7849 7850 if (!new_skb) { 7851 ret = -1; 7852 } else { 7853 /* New SKB is guaranteed to be linear. */ 7854 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, 7855 new_skb->len, DMA_TO_DEVICE); 7856 /* Make sure the mapping succeeded */ 7857 if (dma_mapping_error(&tp->pdev->dev, new_addr)) { 7858 dev_kfree_skb_any(new_skb); 7859 ret = -1; 7860 } else { 7861 u32 save_entry = *entry; 7862 7863 base_flags |= TXD_FLAG_END; 7864 7865 tnapi->tx_buffers[*entry].skb = new_skb; 7866 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7867 mapping, new_addr); 7868 7869 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7870 new_skb->len, base_flags, 7871 mss, vlan)) { 7872 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7873 dev_kfree_skb_any(new_skb); 7874 ret = -1; 7875 } 7876 } 7877 } 7878 7879 dev_consume_skb_any(skb); 7880 *pskb = new_skb; 7881 return ret; 7882 } 7883 7884 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7885 { 7886 /* Check if we will never have enough descriptors, 7887 * as gso_segs can be more than current ring size 7888 */ 7889 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7890 } 7891 7892 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7893 7894 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7895 * indicated in tg3_tx_frag_set() 7896 */ 7897 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7898 struct netdev_queue *txq, struct sk_buff *skb) 7899 { 7900 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7901 struct sk_buff *segs, *seg, *next; 7902 7903 /* Estimate the number of fragments in the worst case */ 7904 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7905 netif_tx_stop_queue(txq); 7906 7907 /* netif_tx_stop_queue() must be done before checking 7908 * checking tx index in tg3_tx_avail() below, because in 7909 * tg3_tx(), we update tx index before checking for 7910 * netif_tx_queue_stopped(). 7911 */ 7912 smp_mb(); 7913 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7914 return NETDEV_TX_BUSY; 7915 7916 netif_tx_wake_queue(txq); 7917 } 7918 7919 segs = skb_gso_segment(skb, tp->dev->features & 7920 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7921 if (IS_ERR(segs) || !segs) 7922 goto tg3_tso_bug_end; 7923 7924 skb_list_walk_safe(segs, seg, next) { 7925 skb_mark_not_on_list(seg); 7926 tg3_start_xmit(seg, tp->dev); 7927 } 7928 7929 tg3_tso_bug_end: 7930 dev_consume_skb_any(skb); 7931 7932 return NETDEV_TX_OK; 7933 } 7934 7935 /* hard_start_xmit for all devices */ 7936 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7937 { 7938 struct tg3 *tp = netdev_priv(dev); 7939 u32 len, entry, base_flags, mss, vlan = 0; 7940 u32 budget; 7941 int i = -1, would_hit_hwbug; 7942 dma_addr_t mapping; 7943 struct tg3_napi *tnapi; 7944 struct netdev_queue *txq; 7945 unsigned int last; 7946 struct iphdr *iph = NULL; 7947 struct tcphdr *tcph = NULL; 7948 __sum16 tcp_csum = 0, ip_csum = 0; 7949 __be16 ip_tot_len = 0; 7950 7951 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7952 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7953 if (tg3_flag(tp, ENABLE_TSS)) 7954 tnapi++; 7955 7956 budget = tg3_tx_avail(tnapi); 7957 7958 /* We are running in BH disabled context with netif_tx_lock 7959 * and TX reclaim runs via tp->napi.poll inside of a software 7960 * interrupt. Furthermore, IRQ processing runs lockless so we have 7961 * no IRQ context deadlocks to worry about either. Rejoice! 7962 */ 7963 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7964 if (!netif_tx_queue_stopped(txq)) { 7965 netif_tx_stop_queue(txq); 7966 7967 /* This is a hard error, log it. */ 7968 netdev_err(dev, 7969 "BUG! Tx Ring full when queue awake!\n"); 7970 } 7971 return NETDEV_TX_BUSY; 7972 } 7973 7974 entry = tnapi->tx_prod; 7975 base_flags = 0; 7976 7977 mss = skb_shinfo(skb)->gso_size; 7978 if (mss) { 7979 u32 tcp_opt_len, hdr_len; 7980 7981 if (skb_cow_head(skb, 0)) 7982 goto drop; 7983 7984 iph = ip_hdr(skb); 7985 tcp_opt_len = tcp_optlen(skb); 7986 7987 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN; 7988 7989 /* HW/FW can not correctly segment packets that have been 7990 * vlan encapsulated. 7991 */ 7992 if (skb->protocol == htons(ETH_P_8021Q) || 7993 skb->protocol == htons(ETH_P_8021AD)) { 7994 if (tg3_tso_bug_gso_check(tnapi, skb)) 7995 return tg3_tso_bug(tp, tnapi, txq, skb); 7996 goto drop; 7997 } 7998 7999 if (!skb_is_gso_v6(skb)) { 8000 if (unlikely((ETH_HLEN + hdr_len) > 80) && 8001 tg3_flag(tp, TSO_BUG)) { 8002 if (tg3_tso_bug_gso_check(tnapi, skb)) 8003 return tg3_tso_bug(tp, tnapi, txq, skb); 8004 goto drop; 8005 } 8006 ip_csum = iph->check; 8007 ip_tot_len = iph->tot_len; 8008 iph->check = 0; 8009 iph->tot_len = htons(mss + hdr_len); 8010 } 8011 8012 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 8013 TXD_FLAG_CPU_POST_DMA); 8014 8015 tcph = tcp_hdr(skb); 8016 tcp_csum = tcph->check; 8017 8018 if (tg3_flag(tp, HW_TSO_1) || 8019 tg3_flag(tp, HW_TSO_2) || 8020 tg3_flag(tp, HW_TSO_3)) { 8021 tcph->check = 0; 8022 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 8023 } else { 8024 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 8025 0, IPPROTO_TCP, 0); 8026 } 8027 8028 if (tg3_flag(tp, HW_TSO_3)) { 8029 mss |= (hdr_len & 0xc) << 12; 8030 if (hdr_len & 0x10) 8031 base_flags |= 0x00000010; 8032 base_flags |= (hdr_len & 0x3e0) << 5; 8033 } else if (tg3_flag(tp, HW_TSO_2)) 8034 mss |= hdr_len << 9; 8035 else if (tg3_flag(tp, HW_TSO_1) || 8036 tg3_asic_rev(tp) == ASIC_REV_5705) { 8037 if (tcp_opt_len || iph->ihl > 5) { 8038 int tsflags; 8039 8040 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8041 mss |= (tsflags << 11); 8042 } 8043 } else { 8044 if (tcp_opt_len || iph->ihl > 5) { 8045 int tsflags; 8046 8047 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8048 base_flags |= tsflags << 12; 8049 } 8050 } 8051 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 8052 /* HW/FW can not correctly checksum packets that have been 8053 * vlan encapsulated. 8054 */ 8055 if (skb->protocol == htons(ETH_P_8021Q) || 8056 skb->protocol == htons(ETH_P_8021AD)) { 8057 if (skb_checksum_help(skb)) 8058 goto drop; 8059 } else { 8060 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8061 } 8062 } 8063 8064 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8065 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8066 base_flags |= TXD_FLAG_JMB_PKT; 8067 8068 if (skb_vlan_tag_present(skb)) { 8069 base_flags |= TXD_FLAG_VLAN; 8070 vlan = skb_vlan_tag_get(skb); 8071 } 8072 8073 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8074 tg3_flag(tp, TX_TSTAMP_EN)) { 8075 tg3_full_lock(tp, 0); 8076 if (!tp->pre_tx_ts) { 8077 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8078 base_flags |= TXD_FLAG_HWTSTAMP; 8079 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts); 8080 } 8081 tg3_full_unlock(tp); 8082 } 8083 8084 len = skb_headlen(skb); 8085 8086 mapping = dma_map_single(&tp->pdev->dev, skb->data, len, 8087 DMA_TO_DEVICE); 8088 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8089 goto drop; 8090 8091 8092 tnapi->tx_buffers[entry].skb = skb; 8093 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8094 8095 would_hit_hwbug = 0; 8096 8097 if (tg3_flag(tp, 5701_DMA_BUG)) 8098 would_hit_hwbug = 1; 8099 8100 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8101 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8102 mss, vlan)) { 8103 would_hit_hwbug = 1; 8104 } else if (skb_shinfo(skb)->nr_frags > 0) { 8105 u32 tmp_mss = mss; 8106 8107 if (!tg3_flag(tp, HW_TSO_1) && 8108 !tg3_flag(tp, HW_TSO_2) && 8109 !tg3_flag(tp, HW_TSO_3)) 8110 tmp_mss = 0; 8111 8112 /* Now loop through additional data 8113 * fragments, and queue them. 8114 */ 8115 last = skb_shinfo(skb)->nr_frags - 1; 8116 for (i = 0; i <= last; i++) { 8117 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8118 8119 len = skb_frag_size(frag); 8120 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8121 len, DMA_TO_DEVICE); 8122 8123 tnapi->tx_buffers[entry].skb = NULL; 8124 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8125 mapping); 8126 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8127 goto dma_error; 8128 8129 if (!budget || 8130 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8131 len, base_flags | 8132 ((i == last) ? TXD_FLAG_END : 0), 8133 tmp_mss, vlan)) { 8134 would_hit_hwbug = 1; 8135 break; 8136 } 8137 } 8138 } 8139 8140 if (would_hit_hwbug) { 8141 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8142 8143 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8144 /* If it's a TSO packet, do GSO instead of 8145 * allocating and copying to a large linear SKB 8146 */ 8147 if (ip_tot_len) { 8148 iph->check = ip_csum; 8149 iph->tot_len = ip_tot_len; 8150 } 8151 tcph->check = tcp_csum; 8152 return tg3_tso_bug(tp, tnapi, txq, skb); 8153 } 8154 8155 /* If the workaround fails due to memory/mapping 8156 * failure, silently drop this packet. 8157 */ 8158 entry = tnapi->tx_prod; 8159 budget = tg3_tx_avail(tnapi); 8160 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8161 base_flags, mss, vlan)) 8162 goto drop_nofree; 8163 } 8164 8165 skb_tx_timestamp(skb); 8166 netdev_tx_sent_queue(txq, skb->len); 8167 8168 /* Sync BD data before updating mailbox */ 8169 wmb(); 8170 8171 tnapi->tx_prod = entry; 8172 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8173 netif_tx_stop_queue(txq); 8174 8175 /* netif_tx_stop_queue() must be done before checking 8176 * checking tx index in tg3_tx_avail() below, because in 8177 * tg3_tx(), we update tx index before checking for 8178 * netif_tx_queue_stopped(). 8179 */ 8180 smp_mb(); 8181 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8182 netif_tx_wake_queue(txq); 8183 } 8184 8185 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8186 /* Packets are ready, update Tx producer idx on card. */ 8187 tw32_tx_mbox(tnapi->prodmbox, entry); 8188 } 8189 8190 return NETDEV_TX_OK; 8191 8192 dma_error: 8193 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8194 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8195 drop: 8196 dev_kfree_skb_any(skb); 8197 drop_nofree: 8198 tp->tx_dropped++; 8199 return NETDEV_TX_OK; 8200 } 8201 8202 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8203 { 8204 if (enable) { 8205 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8206 MAC_MODE_PORT_MODE_MASK); 8207 8208 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8209 8210 if (!tg3_flag(tp, 5705_PLUS)) 8211 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8212 8213 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8214 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8215 else 8216 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8217 } else { 8218 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8219 8220 if (tg3_flag(tp, 5705_PLUS) || 8221 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8222 tg3_asic_rev(tp) == ASIC_REV_5700) 8223 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8224 } 8225 8226 tw32(MAC_MODE, tp->mac_mode); 8227 udelay(40); 8228 } 8229 8230 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8231 { 8232 u32 val, bmcr, mac_mode, ptest = 0; 8233 8234 tg3_phy_toggle_apd(tp, false); 8235 tg3_phy_toggle_automdix(tp, false); 8236 8237 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8238 return -EIO; 8239 8240 bmcr = BMCR_FULLDPLX; 8241 switch (speed) { 8242 case SPEED_10: 8243 break; 8244 case SPEED_100: 8245 bmcr |= BMCR_SPEED100; 8246 break; 8247 case SPEED_1000: 8248 default: 8249 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8250 speed = SPEED_100; 8251 bmcr |= BMCR_SPEED100; 8252 } else { 8253 speed = SPEED_1000; 8254 bmcr |= BMCR_SPEED1000; 8255 } 8256 } 8257 8258 if (extlpbk) { 8259 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8260 tg3_readphy(tp, MII_CTRL1000, &val); 8261 val |= CTL1000_AS_MASTER | 8262 CTL1000_ENABLE_MASTER; 8263 tg3_writephy(tp, MII_CTRL1000, val); 8264 } else { 8265 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8266 MII_TG3_FET_PTEST_TRIM_2; 8267 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8268 } 8269 } else 8270 bmcr |= BMCR_LOOPBACK; 8271 8272 tg3_writephy(tp, MII_BMCR, bmcr); 8273 8274 /* The write needs to be flushed for the FETs */ 8275 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8276 tg3_readphy(tp, MII_BMCR, &bmcr); 8277 8278 udelay(40); 8279 8280 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8281 tg3_asic_rev(tp) == ASIC_REV_5785) { 8282 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8283 MII_TG3_FET_PTEST_FRC_TX_LINK | 8284 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8285 8286 /* The write needs to be flushed for the AC131 */ 8287 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8288 } 8289 8290 /* Reset to prevent losing 1st rx packet intermittently */ 8291 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8292 tg3_flag(tp, 5780_CLASS)) { 8293 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8294 udelay(10); 8295 tw32_f(MAC_RX_MODE, tp->rx_mode); 8296 } 8297 8298 mac_mode = tp->mac_mode & 8299 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8300 if (speed == SPEED_1000) 8301 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8302 else 8303 mac_mode |= MAC_MODE_PORT_MODE_MII; 8304 8305 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8306 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8307 8308 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8309 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8310 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8311 mac_mode |= MAC_MODE_LINK_POLARITY; 8312 8313 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8314 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8315 } 8316 8317 tw32(MAC_MODE, mac_mode); 8318 udelay(40); 8319 8320 return 0; 8321 } 8322 8323 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8324 { 8325 struct tg3 *tp = netdev_priv(dev); 8326 8327 if (features & NETIF_F_LOOPBACK) { 8328 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8329 return; 8330 8331 spin_lock_bh(&tp->lock); 8332 tg3_mac_loopback(tp, true); 8333 netif_carrier_on(tp->dev); 8334 spin_unlock_bh(&tp->lock); 8335 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8336 } else { 8337 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8338 return; 8339 8340 spin_lock_bh(&tp->lock); 8341 tg3_mac_loopback(tp, false); 8342 /* Force link status check */ 8343 tg3_setup_phy(tp, true); 8344 spin_unlock_bh(&tp->lock); 8345 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8346 } 8347 } 8348 8349 static netdev_features_t tg3_fix_features(struct net_device *dev, 8350 netdev_features_t features) 8351 { 8352 struct tg3 *tp = netdev_priv(dev); 8353 8354 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8355 features &= ~NETIF_F_ALL_TSO; 8356 8357 return features; 8358 } 8359 8360 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8361 { 8362 netdev_features_t changed = dev->features ^ features; 8363 8364 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8365 tg3_set_loopback(dev, features); 8366 8367 return 0; 8368 } 8369 8370 static void tg3_rx_prodring_free(struct tg3 *tp, 8371 struct tg3_rx_prodring_set *tpr) 8372 { 8373 int i; 8374 8375 if (tpr != &tp->napi[0].prodring) { 8376 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8377 i = (i + 1) & tp->rx_std_ring_mask) 8378 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8379 tp->rx_pkt_map_sz); 8380 8381 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8382 for (i = tpr->rx_jmb_cons_idx; 8383 i != tpr->rx_jmb_prod_idx; 8384 i = (i + 1) & tp->rx_jmb_ring_mask) { 8385 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8386 TG3_RX_JMB_MAP_SZ); 8387 } 8388 } 8389 8390 return; 8391 } 8392 8393 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8394 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8395 tp->rx_pkt_map_sz); 8396 8397 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8398 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8399 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8400 TG3_RX_JMB_MAP_SZ); 8401 } 8402 } 8403 8404 /* Initialize rx rings for packet processing. 8405 * 8406 * The chip has been shut down and the driver detached from 8407 * the networking, so no interrupts or new tx packets will 8408 * end up in the driver. tp->{tx,}lock are held and thus 8409 * we may not sleep. 8410 */ 8411 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8412 struct tg3_rx_prodring_set *tpr) 8413 { 8414 u32 i, rx_pkt_dma_sz; 8415 8416 tpr->rx_std_cons_idx = 0; 8417 tpr->rx_std_prod_idx = 0; 8418 tpr->rx_jmb_cons_idx = 0; 8419 tpr->rx_jmb_prod_idx = 0; 8420 8421 if (tpr != &tp->napi[0].prodring) { 8422 memset(&tpr->rx_std_buffers[0], 0, 8423 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8424 if (tpr->rx_jmb_buffers) 8425 memset(&tpr->rx_jmb_buffers[0], 0, 8426 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8427 goto done; 8428 } 8429 8430 /* Zero out all descriptors. */ 8431 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8432 8433 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8434 if (tg3_flag(tp, 5780_CLASS) && 8435 tp->dev->mtu > ETH_DATA_LEN) 8436 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8437 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8438 8439 /* Initialize invariants of the rings, we only set this 8440 * stuff once. This works because the card does not 8441 * write into the rx buffer posting rings. 8442 */ 8443 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8444 struct tg3_rx_buffer_desc *rxd; 8445 8446 rxd = &tpr->rx_std[i]; 8447 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8448 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8449 rxd->opaque = (RXD_OPAQUE_RING_STD | 8450 (i << RXD_OPAQUE_INDEX_SHIFT)); 8451 } 8452 8453 /* Now allocate fresh SKBs for each rx ring. */ 8454 for (i = 0; i < tp->rx_pending; i++) { 8455 unsigned int frag_size; 8456 8457 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8458 &frag_size) < 0) { 8459 netdev_warn(tp->dev, 8460 "Using a smaller RX standard ring. Only " 8461 "%d out of %d buffers were allocated " 8462 "successfully\n", i, tp->rx_pending); 8463 if (i == 0) 8464 goto initfail; 8465 tp->rx_pending = i; 8466 break; 8467 } 8468 } 8469 8470 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8471 goto done; 8472 8473 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8474 8475 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8476 goto done; 8477 8478 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8479 struct tg3_rx_buffer_desc *rxd; 8480 8481 rxd = &tpr->rx_jmb[i].std; 8482 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8483 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8484 RXD_FLAG_JUMBO; 8485 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8486 (i << RXD_OPAQUE_INDEX_SHIFT)); 8487 } 8488 8489 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8490 unsigned int frag_size; 8491 8492 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8493 &frag_size) < 0) { 8494 netdev_warn(tp->dev, 8495 "Using a smaller RX jumbo ring. Only %d " 8496 "out of %d buffers were allocated " 8497 "successfully\n", i, tp->rx_jumbo_pending); 8498 if (i == 0) 8499 goto initfail; 8500 tp->rx_jumbo_pending = i; 8501 break; 8502 } 8503 } 8504 8505 done: 8506 return 0; 8507 8508 initfail: 8509 tg3_rx_prodring_free(tp, tpr); 8510 return -ENOMEM; 8511 } 8512 8513 static void tg3_rx_prodring_fini(struct tg3 *tp, 8514 struct tg3_rx_prodring_set *tpr) 8515 { 8516 kfree(tpr->rx_std_buffers); 8517 tpr->rx_std_buffers = NULL; 8518 kfree(tpr->rx_jmb_buffers); 8519 tpr->rx_jmb_buffers = NULL; 8520 if (tpr->rx_std) { 8521 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8522 tpr->rx_std, tpr->rx_std_mapping); 8523 tpr->rx_std = NULL; 8524 } 8525 if (tpr->rx_jmb) { 8526 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8527 tpr->rx_jmb, tpr->rx_jmb_mapping); 8528 tpr->rx_jmb = NULL; 8529 } 8530 } 8531 8532 static int tg3_rx_prodring_init(struct tg3 *tp, 8533 struct tg3_rx_prodring_set *tpr) 8534 { 8535 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8536 GFP_KERNEL); 8537 if (!tpr->rx_std_buffers) 8538 return -ENOMEM; 8539 8540 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8541 TG3_RX_STD_RING_BYTES(tp), 8542 &tpr->rx_std_mapping, 8543 GFP_KERNEL); 8544 if (!tpr->rx_std) 8545 goto err_out; 8546 8547 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8548 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8549 GFP_KERNEL); 8550 if (!tpr->rx_jmb_buffers) 8551 goto err_out; 8552 8553 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8554 TG3_RX_JMB_RING_BYTES(tp), 8555 &tpr->rx_jmb_mapping, 8556 GFP_KERNEL); 8557 if (!tpr->rx_jmb) 8558 goto err_out; 8559 } 8560 8561 return 0; 8562 8563 err_out: 8564 tg3_rx_prodring_fini(tp, tpr); 8565 return -ENOMEM; 8566 } 8567 8568 /* Free up pending packets in all rx/tx rings. 8569 * 8570 * The chip has been shut down and the driver detached from 8571 * the networking, so no interrupts or new tx packets will 8572 * end up in the driver. tp->{tx,}lock is not held and we are not 8573 * in an interrupt context and thus may sleep. 8574 */ 8575 static void tg3_free_rings(struct tg3 *tp) 8576 { 8577 int i, j; 8578 8579 for (j = 0; j < tp->irq_cnt; j++) { 8580 struct tg3_napi *tnapi = &tp->napi[j]; 8581 8582 tg3_rx_prodring_free(tp, &tnapi->prodring); 8583 8584 if (!tnapi->tx_buffers) 8585 continue; 8586 8587 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8588 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8589 8590 if (!skb) 8591 continue; 8592 8593 tg3_tx_skb_unmap(tnapi, i, 8594 skb_shinfo(skb)->nr_frags - 1); 8595 8596 dev_consume_skb_any(skb); 8597 } 8598 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8599 } 8600 } 8601 8602 /* Initialize tx/rx rings for packet processing. 8603 * 8604 * The chip has been shut down and the driver detached from 8605 * the networking, so no interrupts or new tx packets will 8606 * end up in the driver. tp->{tx,}lock are held and thus 8607 * we may not sleep. 8608 */ 8609 static int tg3_init_rings(struct tg3 *tp) 8610 { 8611 int i; 8612 8613 /* Free up all the SKBs. */ 8614 tg3_free_rings(tp); 8615 8616 for (i = 0; i < tp->irq_cnt; i++) { 8617 struct tg3_napi *tnapi = &tp->napi[i]; 8618 8619 tnapi->last_tag = 0; 8620 tnapi->last_irq_tag = 0; 8621 tnapi->hw_status->status = 0; 8622 tnapi->hw_status->status_tag = 0; 8623 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8624 8625 tnapi->tx_prod = 0; 8626 tnapi->tx_cons = 0; 8627 if (tnapi->tx_ring) 8628 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8629 8630 tnapi->rx_rcb_ptr = 0; 8631 if (tnapi->rx_rcb) 8632 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8633 8634 if (tnapi->prodring.rx_std && 8635 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8636 tg3_free_rings(tp); 8637 return -ENOMEM; 8638 } 8639 } 8640 8641 return 0; 8642 } 8643 8644 static void tg3_mem_tx_release(struct tg3 *tp) 8645 { 8646 int i; 8647 8648 for (i = 0; i < tp->irq_max; i++) { 8649 struct tg3_napi *tnapi = &tp->napi[i]; 8650 8651 if (tnapi->tx_ring) { 8652 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8653 tnapi->tx_ring, tnapi->tx_desc_mapping); 8654 tnapi->tx_ring = NULL; 8655 } 8656 8657 kfree(tnapi->tx_buffers); 8658 tnapi->tx_buffers = NULL; 8659 } 8660 } 8661 8662 static int tg3_mem_tx_acquire(struct tg3 *tp) 8663 { 8664 int i; 8665 struct tg3_napi *tnapi = &tp->napi[0]; 8666 8667 /* If multivector TSS is enabled, vector 0 does not handle 8668 * tx interrupts. Don't allocate any resources for it. 8669 */ 8670 if (tg3_flag(tp, ENABLE_TSS)) 8671 tnapi++; 8672 8673 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8674 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, 8675 sizeof(struct tg3_tx_ring_info), 8676 GFP_KERNEL); 8677 if (!tnapi->tx_buffers) 8678 goto err_out; 8679 8680 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8681 TG3_TX_RING_BYTES, 8682 &tnapi->tx_desc_mapping, 8683 GFP_KERNEL); 8684 if (!tnapi->tx_ring) 8685 goto err_out; 8686 } 8687 8688 return 0; 8689 8690 err_out: 8691 tg3_mem_tx_release(tp); 8692 return -ENOMEM; 8693 } 8694 8695 static void tg3_mem_rx_release(struct tg3 *tp) 8696 { 8697 int i; 8698 8699 for (i = 0; i < tp->irq_max; i++) { 8700 struct tg3_napi *tnapi = &tp->napi[i]; 8701 8702 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8703 8704 if (!tnapi->rx_rcb) 8705 continue; 8706 8707 dma_free_coherent(&tp->pdev->dev, 8708 TG3_RX_RCB_RING_BYTES(tp), 8709 tnapi->rx_rcb, 8710 tnapi->rx_rcb_mapping); 8711 tnapi->rx_rcb = NULL; 8712 } 8713 } 8714 8715 static int tg3_mem_rx_acquire(struct tg3 *tp) 8716 { 8717 unsigned int i, limit; 8718 8719 limit = tp->rxq_cnt; 8720 8721 /* If RSS is enabled, we need a (dummy) producer ring 8722 * set on vector zero. This is the true hw prodring. 8723 */ 8724 if (tg3_flag(tp, ENABLE_RSS)) 8725 limit++; 8726 8727 for (i = 0; i < limit; i++) { 8728 struct tg3_napi *tnapi = &tp->napi[i]; 8729 8730 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8731 goto err_out; 8732 8733 /* If multivector RSS is enabled, vector 0 8734 * does not handle rx or tx interrupts. 8735 * Don't allocate any resources for it. 8736 */ 8737 if (!i && tg3_flag(tp, ENABLE_RSS)) 8738 continue; 8739 8740 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8741 TG3_RX_RCB_RING_BYTES(tp), 8742 &tnapi->rx_rcb_mapping, 8743 GFP_KERNEL); 8744 if (!tnapi->rx_rcb) 8745 goto err_out; 8746 } 8747 8748 return 0; 8749 8750 err_out: 8751 tg3_mem_rx_release(tp); 8752 return -ENOMEM; 8753 } 8754 8755 /* 8756 * Must not be invoked with interrupt sources disabled and 8757 * the hardware shutdown down. 8758 */ 8759 static void tg3_free_consistent(struct tg3 *tp) 8760 { 8761 int i; 8762 8763 for (i = 0; i < tp->irq_cnt; i++) { 8764 struct tg3_napi *tnapi = &tp->napi[i]; 8765 8766 if (tnapi->hw_status) { 8767 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8768 tnapi->hw_status, 8769 tnapi->status_mapping); 8770 tnapi->hw_status = NULL; 8771 } 8772 } 8773 8774 tg3_mem_rx_release(tp); 8775 tg3_mem_tx_release(tp); 8776 8777 /* tp->hw_stats can be referenced safely: 8778 * 1. under rtnl_lock 8779 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. 8780 */ 8781 if (tp->hw_stats) { 8782 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8783 tp->hw_stats, tp->stats_mapping); 8784 tp->hw_stats = NULL; 8785 } 8786 } 8787 8788 /* 8789 * Must not be invoked with interrupt sources disabled and 8790 * the hardware shutdown down. Can sleep. 8791 */ 8792 static int tg3_alloc_consistent(struct tg3 *tp) 8793 { 8794 int i; 8795 8796 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8797 sizeof(struct tg3_hw_stats), 8798 &tp->stats_mapping, GFP_KERNEL); 8799 if (!tp->hw_stats) 8800 goto err_out; 8801 8802 for (i = 0; i < tp->irq_cnt; i++) { 8803 struct tg3_napi *tnapi = &tp->napi[i]; 8804 struct tg3_hw_status *sblk; 8805 8806 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8807 TG3_HW_STATUS_SIZE, 8808 &tnapi->status_mapping, 8809 GFP_KERNEL); 8810 if (!tnapi->hw_status) 8811 goto err_out; 8812 8813 sblk = tnapi->hw_status; 8814 8815 if (tg3_flag(tp, ENABLE_RSS)) { 8816 u16 *prodptr = NULL; 8817 8818 /* 8819 * When RSS is enabled, the status block format changes 8820 * slightly. The "rx_jumbo_consumer", "reserved", 8821 * and "rx_mini_consumer" members get mapped to the 8822 * other three rx return ring producer indexes. 8823 */ 8824 switch (i) { 8825 case 1: 8826 prodptr = &sblk->idx[0].rx_producer; 8827 break; 8828 case 2: 8829 prodptr = &sblk->rx_jumbo_consumer; 8830 break; 8831 case 3: 8832 prodptr = &sblk->reserved; 8833 break; 8834 case 4: 8835 prodptr = &sblk->rx_mini_consumer; 8836 break; 8837 } 8838 tnapi->rx_rcb_prod_idx = prodptr; 8839 } else { 8840 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8841 } 8842 } 8843 8844 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8845 goto err_out; 8846 8847 return 0; 8848 8849 err_out: 8850 tg3_free_consistent(tp); 8851 return -ENOMEM; 8852 } 8853 8854 #define MAX_WAIT_CNT 1000 8855 8856 /* To stop a block, clear the enable bit and poll till it 8857 * clears. tp->lock is held. 8858 */ 8859 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8860 { 8861 unsigned int i; 8862 u32 val; 8863 8864 if (tg3_flag(tp, 5705_PLUS)) { 8865 switch (ofs) { 8866 case RCVLSC_MODE: 8867 case DMAC_MODE: 8868 case MBFREE_MODE: 8869 case BUFMGR_MODE: 8870 case MEMARB_MODE: 8871 /* We can't enable/disable these bits of the 8872 * 5705/5750, just say success. 8873 */ 8874 return 0; 8875 8876 default: 8877 break; 8878 } 8879 } 8880 8881 val = tr32(ofs); 8882 val &= ~enable_bit; 8883 tw32_f(ofs, val); 8884 8885 for (i = 0; i < MAX_WAIT_CNT; i++) { 8886 if (pci_channel_offline(tp->pdev)) { 8887 dev_err(&tp->pdev->dev, 8888 "tg3_stop_block device offline, " 8889 "ofs=%lx enable_bit=%x\n", 8890 ofs, enable_bit); 8891 return -ENODEV; 8892 } 8893 8894 udelay(100); 8895 val = tr32(ofs); 8896 if ((val & enable_bit) == 0) 8897 break; 8898 } 8899 8900 if (i == MAX_WAIT_CNT && !silent) { 8901 dev_err(&tp->pdev->dev, 8902 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8903 ofs, enable_bit); 8904 return -ENODEV; 8905 } 8906 8907 return 0; 8908 } 8909 8910 /* tp->lock is held. */ 8911 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8912 { 8913 int i, err; 8914 8915 tg3_disable_ints(tp); 8916 8917 if (pci_channel_offline(tp->pdev)) { 8918 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8919 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8920 err = -ENODEV; 8921 goto err_no_dev; 8922 } 8923 8924 tp->rx_mode &= ~RX_MODE_ENABLE; 8925 tw32_f(MAC_RX_MODE, tp->rx_mode); 8926 udelay(10); 8927 8928 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8929 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8930 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8931 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8932 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8933 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8934 8935 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8936 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8937 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8938 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8939 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8940 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8941 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8942 8943 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8944 tw32_f(MAC_MODE, tp->mac_mode); 8945 udelay(40); 8946 8947 tp->tx_mode &= ~TX_MODE_ENABLE; 8948 tw32_f(MAC_TX_MODE, tp->tx_mode); 8949 8950 for (i = 0; i < MAX_WAIT_CNT; i++) { 8951 udelay(100); 8952 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8953 break; 8954 } 8955 if (i >= MAX_WAIT_CNT) { 8956 dev_err(&tp->pdev->dev, 8957 "%s timed out, TX_MODE_ENABLE will not clear " 8958 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8959 err |= -ENODEV; 8960 } 8961 8962 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8963 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8964 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8965 8966 tw32(FTQ_RESET, 0xffffffff); 8967 tw32(FTQ_RESET, 0x00000000); 8968 8969 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8970 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8971 8972 err_no_dev: 8973 for (i = 0; i < tp->irq_cnt; i++) { 8974 struct tg3_napi *tnapi = &tp->napi[i]; 8975 if (tnapi->hw_status) 8976 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8977 } 8978 8979 return err; 8980 } 8981 8982 /* Save PCI command register before chip reset */ 8983 static void tg3_save_pci_state(struct tg3 *tp) 8984 { 8985 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 8986 } 8987 8988 /* Restore PCI state after chip reset */ 8989 static void tg3_restore_pci_state(struct tg3 *tp) 8990 { 8991 u32 val; 8992 8993 /* Re-enable indirect register accesses. */ 8994 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 8995 tp->misc_host_ctrl); 8996 8997 /* Set MAX PCI retry to zero. */ 8998 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 8999 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 9000 tg3_flag(tp, PCIX_MODE)) 9001 val |= PCISTATE_RETRY_SAME_DMA; 9002 /* Allow reads and writes to the APE register and memory space. */ 9003 if (tg3_flag(tp, ENABLE_APE)) 9004 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 9005 PCISTATE_ALLOW_APE_SHMEM_WR | 9006 PCISTATE_ALLOW_APE_PSPACE_WR; 9007 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 9008 9009 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 9010 9011 if (!tg3_flag(tp, PCI_EXPRESS)) { 9012 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 9013 tp->pci_cacheline_sz); 9014 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 9015 tp->pci_lat_timer); 9016 } 9017 9018 /* Make sure PCI-X relaxed ordering bit is clear. */ 9019 if (tg3_flag(tp, PCIX_MODE)) { 9020 u16 pcix_cmd; 9021 9022 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 9023 &pcix_cmd); 9024 pcix_cmd &= ~PCI_X_CMD_ERO; 9025 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 9026 pcix_cmd); 9027 } 9028 9029 if (tg3_flag(tp, 5780_CLASS)) { 9030 9031 /* Chip reset on 5780 will reset MSI enable bit, 9032 * so need to restore it. 9033 */ 9034 if (tg3_flag(tp, USING_MSI)) { 9035 u16 ctrl; 9036 9037 pci_read_config_word(tp->pdev, 9038 tp->msi_cap + PCI_MSI_FLAGS, 9039 &ctrl); 9040 pci_write_config_word(tp->pdev, 9041 tp->msi_cap + PCI_MSI_FLAGS, 9042 ctrl | PCI_MSI_FLAGS_ENABLE); 9043 val = tr32(MSGINT_MODE); 9044 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 9045 } 9046 } 9047 } 9048 9049 static void tg3_override_clk(struct tg3 *tp) 9050 { 9051 u32 val; 9052 9053 switch (tg3_asic_rev(tp)) { 9054 case ASIC_REV_5717: 9055 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9056 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9057 TG3_CPMU_MAC_ORIDE_ENABLE); 9058 break; 9059 9060 case ASIC_REV_5719: 9061 case ASIC_REV_5720: 9062 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9063 break; 9064 9065 default: 9066 return; 9067 } 9068 } 9069 9070 static void tg3_restore_clk(struct tg3 *tp) 9071 { 9072 u32 val; 9073 9074 switch (tg3_asic_rev(tp)) { 9075 case ASIC_REV_5717: 9076 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9077 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9078 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9079 break; 9080 9081 case ASIC_REV_5719: 9082 case ASIC_REV_5720: 9083 val = tr32(TG3_CPMU_CLCK_ORIDE); 9084 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9085 break; 9086 9087 default: 9088 return; 9089 } 9090 } 9091 9092 /* tp->lock is held. */ 9093 static int tg3_chip_reset(struct tg3 *tp) 9094 __releases(tp->lock) 9095 __acquires(tp->lock) 9096 { 9097 u32 val; 9098 void (*write_op)(struct tg3 *, u32, u32); 9099 int i, err; 9100 9101 if (!pci_device_is_present(tp->pdev)) 9102 return -ENODEV; 9103 9104 tg3_nvram_lock(tp); 9105 9106 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9107 9108 /* No matching tg3_nvram_unlock() after this because 9109 * chip reset below will undo the nvram lock. 9110 */ 9111 tp->nvram_lock_cnt = 0; 9112 9113 /* GRC_MISC_CFG core clock reset will clear the memory 9114 * enable bit in PCI register 4 and the MSI enable bit 9115 * on some chips, so we save relevant registers here. 9116 */ 9117 tg3_save_pci_state(tp); 9118 9119 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9120 tg3_flag(tp, 5755_PLUS)) 9121 tw32(GRC_FASTBOOT_PC, 0); 9122 9123 /* 9124 * We must avoid the readl() that normally takes place. 9125 * It locks machines, causes machine checks, and other 9126 * fun things. So, temporarily disable the 5701 9127 * hardware workaround, while we do the reset. 9128 */ 9129 write_op = tp->write32; 9130 if (write_op == tg3_write_flush_reg32) 9131 tp->write32 = tg3_write32; 9132 9133 /* Prevent the irq handler from reading or writing PCI registers 9134 * during chip reset when the memory enable bit in the PCI command 9135 * register may be cleared. The chip does not generate interrupt 9136 * at this time, but the irq handler may still be called due to irq 9137 * sharing or irqpoll. 9138 */ 9139 tg3_flag_set(tp, CHIP_RESETTING); 9140 for (i = 0; i < tp->irq_cnt; i++) { 9141 struct tg3_napi *tnapi = &tp->napi[i]; 9142 if (tnapi->hw_status) { 9143 tnapi->hw_status->status = 0; 9144 tnapi->hw_status->status_tag = 0; 9145 } 9146 tnapi->last_tag = 0; 9147 tnapi->last_irq_tag = 0; 9148 } 9149 smp_mb(); 9150 9151 tg3_full_unlock(tp); 9152 9153 for (i = 0; i < tp->irq_cnt; i++) 9154 synchronize_irq(tp->napi[i].irq_vec); 9155 9156 tg3_full_lock(tp, 0); 9157 9158 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9159 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9160 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9161 } 9162 9163 /* do the reset */ 9164 val = GRC_MISC_CFG_CORECLK_RESET; 9165 9166 if (tg3_flag(tp, PCI_EXPRESS)) { 9167 /* Force PCIe 1.0a mode */ 9168 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9169 !tg3_flag(tp, 57765_PLUS) && 9170 tr32(TG3_PCIE_PHY_TSTCTL) == 9171 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9172 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9173 9174 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9175 tw32(GRC_MISC_CFG, (1 << 29)); 9176 val |= (1 << 29); 9177 } 9178 } 9179 9180 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9181 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9182 tw32(GRC_VCPU_EXT_CTRL, 9183 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9184 } 9185 9186 /* Set the clock to the highest frequency to avoid timeouts. With link 9187 * aware mode, the clock speed could be slow and bootcode does not 9188 * complete within the expected time. Override the clock to allow the 9189 * bootcode to finish sooner and then restore it. 9190 */ 9191 tg3_override_clk(tp); 9192 9193 /* Manage gphy power for all CPMU absent PCIe devices. */ 9194 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9195 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9196 9197 tw32(GRC_MISC_CFG, val); 9198 9199 /* restore 5701 hardware bug workaround write method */ 9200 tp->write32 = write_op; 9201 9202 /* Unfortunately, we have to delay before the PCI read back. 9203 * Some 575X chips even will not respond to a PCI cfg access 9204 * when the reset command is given to the chip. 9205 * 9206 * How do these hardware designers expect things to work 9207 * properly if the PCI write is posted for a long period 9208 * of time? It is always necessary to have some method by 9209 * which a register read back can occur to push the write 9210 * out which does the reset. 9211 * 9212 * For most tg3 variants the trick below was working. 9213 * Ho hum... 9214 */ 9215 udelay(120); 9216 9217 /* Flush PCI posted writes. The normal MMIO registers 9218 * are inaccessible at this time so this is the only 9219 * way to make this reliably (actually, this is no longer 9220 * the case, see above). I tried to use indirect 9221 * register read/write but this upset some 5701 variants. 9222 */ 9223 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9224 9225 udelay(120); 9226 9227 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9228 u16 val16; 9229 9230 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9231 int j; 9232 u32 cfg_val; 9233 9234 /* Wait for link training to complete. */ 9235 for (j = 0; j < 5000; j++) 9236 udelay(100); 9237 9238 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9239 pci_write_config_dword(tp->pdev, 0xc4, 9240 cfg_val | (1 << 15)); 9241 } 9242 9243 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9244 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9245 /* 9246 * Older PCIe devices only support the 128 byte 9247 * MPS setting. Enforce the restriction. 9248 */ 9249 if (!tg3_flag(tp, CPMU_PRESENT)) 9250 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9251 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9252 9253 /* Clear error status */ 9254 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9255 PCI_EXP_DEVSTA_CED | 9256 PCI_EXP_DEVSTA_NFED | 9257 PCI_EXP_DEVSTA_FED | 9258 PCI_EXP_DEVSTA_URD); 9259 } 9260 9261 tg3_restore_pci_state(tp); 9262 9263 tg3_flag_clear(tp, CHIP_RESETTING); 9264 tg3_flag_clear(tp, ERROR_PROCESSED); 9265 9266 val = 0; 9267 if (tg3_flag(tp, 5780_CLASS)) 9268 val = tr32(MEMARB_MODE); 9269 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9270 9271 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9272 tg3_stop_fw(tp); 9273 tw32(0x5000, 0x400); 9274 } 9275 9276 if (tg3_flag(tp, IS_SSB_CORE)) { 9277 /* 9278 * BCM4785: In order to avoid repercussions from using 9279 * potentially defective internal ROM, stop the Rx RISC CPU, 9280 * which is not required. 9281 */ 9282 tg3_stop_fw(tp); 9283 tg3_halt_cpu(tp, RX_CPU_BASE); 9284 } 9285 9286 err = tg3_poll_fw(tp); 9287 if (err) 9288 return err; 9289 9290 tw32(GRC_MODE, tp->grc_mode); 9291 9292 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9293 val = tr32(0xc4); 9294 9295 tw32(0xc4, val | (1 << 15)); 9296 } 9297 9298 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9299 tg3_asic_rev(tp) == ASIC_REV_5705) { 9300 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9301 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9302 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9303 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9304 } 9305 9306 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9307 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9308 val = tp->mac_mode; 9309 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9310 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9311 val = tp->mac_mode; 9312 } else 9313 val = 0; 9314 9315 tw32_f(MAC_MODE, val); 9316 udelay(40); 9317 9318 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9319 9320 tg3_mdio_start(tp); 9321 9322 if (tg3_flag(tp, PCI_EXPRESS) && 9323 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9324 tg3_asic_rev(tp) != ASIC_REV_5785 && 9325 !tg3_flag(tp, 57765_PLUS)) { 9326 val = tr32(0x7c00); 9327 9328 tw32(0x7c00, val | (1 << 25)); 9329 } 9330 9331 tg3_restore_clk(tp); 9332 9333 /* Increase the core clock speed to fix tx timeout issue for 5762 9334 * with 100Mbps link speed. 9335 */ 9336 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 9337 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9338 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9339 TG3_CPMU_MAC_ORIDE_ENABLE); 9340 } 9341 9342 /* Reprobe ASF enable state. */ 9343 tg3_flag_clear(tp, ENABLE_ASF); 9344 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9345 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9346 9347 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9348 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9349 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9350 u32 nic_cfg; 9351 9352 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9353 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9354 tg3_flag_set(tp, ENABLE_ASF); 9355 tp->last_event_jiffies = jiffies; 9356 if (tg3_flag(tp, 5750_PLUS)) 9357 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9358 9359 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9360 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9361 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9362 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9363 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9364 } 9365 } 9366 9367 return 0; 9368 } 9369 9370 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9371 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9372 static void __tg3_set_rx_mode(struct net_device *); 9373 9374 /* tp->lock is held. */ 9375 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9376 { 9377 int err; 9378 9379 tg3_stop_fw(tp); 9380 9381 tg3_write_sig_pre_reset(tp, kind); 9382 9383 tg3_abort_hw(tp, silent); 9384 err = tg3_chip_reset(tp); 9385 9386 __tg3_set_mac_addr(tp, false); 9387 9388 tg3_write_sig_legacy(tp, kind); 9389 tg3_write_sig_post_reset(tp, kind); 9390 9391 if (tp->hw_stats) { 9392 /* Save the stats across chip resets... */ 9393 tg3_get_nstats(tp, &tp->net_stats_prev); 9394 tg3_get_estats(tp, &tp->estats_prev); 9395 9396 /* And make sure the next sample is new data */ 9397 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9398 } 9399 9400 return err; 9401 } 9402 9403 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9404 { 9405 struct tg3 *tp = netdev_priv(dev); 9406 struct sockaddr *addr = p; 9407 int err = 0; 9408 bool skip_mac_1 = false; 9409 9410 if (!is_valid_ether_addr(addr->sa_data)) 9411 return -EADDRNOTAVAIL; 9412 9413 eth_hw_addr_set(dev, addr->sa_data); 9414 9415 if (!netif_running(dev)) 9416 return 0; 9417 9418 if (tg3_flag(tp, ENABLE_ASF)) { 9419 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9420 9421 addr0_high = tr32(MAC_ADDR_0_HIGH); 9422 addr0_low = tr32(MAC_ADDR_0_LOW); 9423 addr1_high = tr32(MAC_ADDR_1_HIGH); 9424 addr1_low = tr32(MAC_ADDR_1_LOW); 9425 9426 /* Skip MAC addr 1 if ASF is using it. */ 9427 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9428 !(addr1_high == 0 && addr1_low == 0)) 9429 skip_mac_1 = true; 9430 } 9431 spin_lock_bh(&tp->lock); 9432 __tg3_set_mac_addr(tp, skip_mac_1); 9433 __tg3_set_rx_mode(dev); 9434 spin_unlock_bh(&tp->lock); 9435 9436 return err; 9437 } 9438 9439 /* tp->lock is held. */ 9440 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9441 dma_addr_t mapping, u32 maxlen_flags, 9442 u32 nic_addr) 9443 { 9444 tg3_write_mem(tp, 9445 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9446 ((u64) mapping >> 32)); 9447 tg3_write_mem(tp, 9448 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9449 ((u64) mapping & 0xffffffff)); 9450 tg3_write_mem(tp, 9451 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9452 maxlen_flags); 9453 9454 if (!tg3_flag(tp, 5705_PLUS)) 9455 tg3_write_mem(tp, 9456 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9457 nic_addr); 9458 } 9459 9460 9461 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9462 { 9463 int i = 0; 9464 9465 if (!tg3_flag(tp, ENABLE_TSS)) { 9466 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9467 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9468 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9469 } else { 9470 tw32(HOSTCC_TXCOL_TICKS, 0); 9471 tw32(HOSTCC_TXMAX_FRAMES, 0); 9472 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9473 9474 for (; i < tp->txq_cnt; i++) { 9475 u32 reg; 9476 9477 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9478 tw32(reg, ec->tx_coalesce_usecs); 9479 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9480 tw32(reg, ec->tx_max_coalesced_frames); 9481 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9482 tw32(reg, ec->tx_max_coalesced_frames_irq); 9483 } 9484 } 9485 9486 for (; i < tp->irq_max - 1; i++) { 9487 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9488 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9489 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9490 } 9491 } 9492 9493 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9494 { 9495 int i = 0; 9496 u32 limit = tp->rxq_cnt; 9497 9498 if (!tg3_flag(tp, ENABLE_RSS)) { 9499 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9500 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9501 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9502 limit--; 9503 } else { 9504 tw32(HOSTCC_RXCOL_TICKS, 0); 9505 tw32(HOSTCC_RXMAX_FRAMES, 0); 9506 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9507 } 9508 9509 for (; i < limit; i++) { 9510 u32 reg; 9511 9512 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9513 tw32(reg, ec->rx_coalesce_usecs); 9514 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9515 tw32(reg, ec->rx_max_coalesced_frames); 9516 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9517 tw32(reg, ec->rx_max_coalesced_frames_irq); 9518 } 9519 9520 for (; i < tp->irq_max - 1; i++) { 9521 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9522 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9523 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9524 } 9525 } 9526 9527 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9528 { 9529 tg3_coal_tx_init(tp, ec); 9530 tg3_coal_rx_init(tp, ec); 9531 9532 if (!tg3_flag(tp, 5705_PLUS)) { 9533 u32 val = ec->stats_block_coalesce_usecs; 9534 9535 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9536 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9537 9538 if (!tp->link_up) 9539 val = 0; 9540 9541 tw32(HOSTCC_STAT_COAL_TICKS, val); 9542 } 9543 } 9544 9545 /* tp->lock is held. */ 9546 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9547 { 9548 u32 txrcb, limit; 9549 9550 /* Disable all transmit rings but the first. */ 9551 if (!tg3_flag(tp, 5705_PLUS)) 9552 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9553 else if (tg3_flag(tp, 5717_PLUS)) 9554 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9555 else if (tg3_flag(tp, 57765_CLASS) || 9556 tg3_asic_rev(tp) == ASIC_REV_5762) 9557 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9558 else 9559 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9560 9561 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9562 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9563 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9564 BDINFO_FLAGS_DISABLED); 9565 } 9566 9567 /* tp->lock is held. */ 9568 static void tg3_tx_rcbs_init(struct tg3 *tp) 9569 { 9570 int i = 0; 9571 u32 txrcb = NIC_SRAM_SEND_RCB; 9572 9573 if (tg3_flag(tp, ENABLE_TSS)) 9574 i++; 9575 9576 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9577 struct tg3_napi *tnapi = &tp->napi[i]; 9578 9579 if (!tnapi->tx_ring) 9580 continue; 9581 9582 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9583 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9584 NIC_SRAM_TX_BUFFER_DESC); 9585 } 9586 } 9587 9588 /* tp->lock is held. */ 9589 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9590 { 9591 u32 rxrcb, limit; 9592 9593 /* Disable all receive return rings but the first. */ 9594 if (tg3_flag(tp, 5717_PLUS)) 9595 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9596 else if (!tg3_flag(tp, 5705_PLUS)) 9597 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9598 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9599 tg3_asic_rev(tp) == ASIC_REV_5762 || 9600 tg3_flag(tp, 57765_CLASS)) 9601 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9602 else 9603 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9604 9605 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9606 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9607 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9608 BDINFO_FLAGS_DISABLED); 9609 } 9610 9611 /* tp->lock is held. */ 9612 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9613 { 9614 int i = 0; 9615 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9616 9617 if (tg3_flag(tp, ENABLE_RSS)) 9618 i++; 9619 9620 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9621 struct tg3_napi *tnapi = &tp->napi[i]; 9622 9623 if (!tnapi->rx_rcb) 9624 continue; 9625 9626 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9627 (tp->rx_ret_ring_mask + 1) << 9628 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9629 } 9630 } 9631 9632 /* tp->lock is held. */ 9633 static void tg3_rings_reset(struct tg3 *tp) 9634 { 9635 int i; 9636 u32 stblk; 9637 struct tg3_napi *tnapi = &tp->napi[0]; 9638 9639 tg3_tx_rcbs_disable(tp); 9640 9641 tg3_rx_ret_rcbs_disable(tp); 9642 9643 /* Disable interrupts */ 9644 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9645 tp->napi[0].chk_msi_cnt = 0; 9646 tp->napi[0].last_rx_cons = 0; 9647 tp->napi[0].last_tx_cons = 0; 9648 9649 /* Zero mailbox registers. */ 9650 if (tg3_flag(tp, SUPPORT_MSIX)) { 9651 for (i = 1; i < tp->irq_max; i++) { 9652 tp->napi[i].tx_prod = 0; 9653 tp->napi[i].tx_cons = 0; 9654 if (tg3_flag(tp, ENABLE_TSS)) 9655 tw32_mailbox(tp->napi[i].prodmbox, 0); 9656 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9657 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9658 tp->napi[i].chk_msi_cnt = 0; 9659 tp->napi[i].last_rx_cons = 0; 9660 tp->napi[i].last_tx_cons = 0; 9661 } 9662 if (!tg3_flag(tp, ENABLE_TSS)) 9663 tw32_mailbox(tp->napi[0].prodmbox, 0); 9664 } else { 9665 tp->napi[0].tx_prod = 0; 9666 tp->napi[0].tx_cons = 0; 9667 tw32_mailbox(tp->napi[0].prodmbox, 0); 9668 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9669 } 9670 9671 /* Make sure the NIC-based send BD rings are disabled. */ 9672 if (!tg3_flag(tp, 5705_PLUS)) { 9673 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9674 for (i = 0; i < 16; i++) 9675 tw32_tx_mbox(mbox + i * 8, 0); 9676 } 9677 9678 /* Clear status block in ram. */ 9679 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9680 9681 /* Set status block DMA address */ 9682 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9683 ((u64) tnapi->status_mapping >> 32)); 9684 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9685 ((u64) tnapi->status_mapping & 0xffffffff)); 9686 9687 stblk = HOSTCC_STATBLCK_RING1; 9688 9689 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9690 u64 mapping = (u64)tnapi->status_mapping; 9691 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9692 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9693 stblk += 8; 9694 9695 /* Clear status block in ram. */ 9696 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9697 } 9698 9699 tg3_tx_rcbs_init(tp); 9700 tg3_rx_ret_rcbs_init(tp); 9701 } 9702 9703 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9704 { 9705 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9706 9707 if (!tg3_flag(tp, 5750_PLUS) || 9708 tg3_flag(tp, 5780_CLASS) || 9709 tg3_asic_rev(tp) == ASIC_REV_5750 || 9710 tg3_asic_rev(tp) == ASIC_REV_5752 || 9711 tg3_flag(tp, 57765_PLUS)) 9712 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9713 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9714 tg3_asic_rev(tp) == ASIC_REV_5787) 9715 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9716 else 9717 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9718 9719 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9720 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9721 9722 val = min(nic_rep_thresh, host_rep_thresh); 9723 tw32(RCVBDI_STD_THRESH, val); 9724 9725 if (tg3_flag(tp, 57765_PLUS)) 9726 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9727 9728 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9729 return; 9730 9731 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9732 9733 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9734 9735 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9736 tw32(RCVBDI_JUMBO_THRESH, val); 9737 9738 if (tg3_flag(tp, 57765_PLUS)) 9739 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9740 } 9741 9742 static inline u32 calc_crc(unsigned char *buf, int len) 9743 { 9744 u32 reg; 9745 u32 tmp; 9746 int j, k; 9747 9748 reg = 0xffffffff; 9749 9750 for (j = 0; j < len; j++) { 9751 reg ^= buf[j]; 9752 9753 for (k = 0; k < 8; k++) { 9754 tmp = reg & 0x01; 9755 9756 reg >>= 1; 9757 9758 if (tmp) 9759 reg ^= CRC32_POLY_LE; 9760 } 9761 } 9762 9763 return ~reg; 9764 } 9765 9766 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9767 { 9768 /* accept or reject all multicast frames */ 9769 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9770 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9771 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9772 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9773 } 9774 9775 static void __tg3_set_rx_mode(struct net_device *dev) 9776 { 9777 struct tg3 *tp = netdev_priv(dev); 9778 u32 rx_mode; 9779 9780 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9781 RX_MODE_KEEP_VLAN_TAG); 9782 9783 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9784 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9785 * flag clear. 9786 */ 9787 if (!tg3_flag(tp, ENABLE_ASF)) 9788 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9789 #endif 9790 9791 if (dev->flags & IFF_PROMISC) { 9792 /* Promiscuous mode. */ 9793 rx_mode |= RX_MODE_PROMISC; 9794 } else if (dev->flags & IFF_ALLMULTI) { 9795 /* Accept all multicast. */ 9796 tg3_set_multi(tp, 1); 9797 } else if (netdev_mc_empty(dev)) { 9798 /* Reject all multicast. */ 9799 tg3_set_multi(tp, 0); 9800 } else { 9801 /* Accept one or more multicast(s). */ 9802 struct netdev_hw_addr *ha; 9803 u32 mc_filter[4] = { 0, }; 9804 u32 regidx; 9805 u32 bit; 9806 u32 crc; 9807 9808 netdev_for_each_mc_addr(ha, dev) { 9809 crc = calc_crc(ha->addr, ETH_ALEN); 9810 bit = ~crc & 0x7f; 9811 regidx = (bit & 0x60) >> 5; 9812 bit &= 0x1f; 9813 mc_filter[regidx] |= (1 << bit); 9814 } 9815 9816 tw32(MAC_HASH_REG_0, mc_filter[0]); 9817 tw32(MAC_HASH_REG_1, mc_filter[1]); 9818 tw32(MAC_HASH_REG_2, mc_filter[2]); 9819 tw32(MAC_HASH_REG_3, mc_filter[3]); 9820 } 9821 9822 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9823 rx_mode |= RX_MODE_PROMISC; 9824 } else if (!(dev->flags & IFF_PROMISC)) { 9825 /* Add all entries into to the mac addr filter list */ 9826 int i = 0; 9827 struct netdev_hw_addr *ha; 9828 9829 netdev_for_each_uc_addr(ha, dev) { 9830 __tg3_set_one_mac_addr(tp, ha->addr, 9831 i + TG3_UCAST_ADDR_IDX(tp)); 9832 i++; 9833 } 9834 } 9835 9836 if (rx_mode != tp->rx_mode) { 9837 tp->rx_mode = rx_mode; 9838 tw32_f(MAC_RX_MODE, rx_mode); 9839 udelay(10); 9840 } 9841 } 9842 9843 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9844 { 9845 int i; 9846 9847 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9848 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9849 } 9850 9851 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9852 { 9853 int i; 9854 9855 if (!tg3_flag(tp, SUPPORT_MSIX)) 9856 return; 9857 9858 if (tp->rxq_cnt == 1) { 9859 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9860 return; 9861 } 9862 9863 /* Validate table against current IRQ count */ 9864 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9865 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9866 break; 9867 } 9868 9869 if (i != TG3_RSS_INDIR_TBL_SIZE) 9870 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9871 } 9872 9873 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9874 { 9875 int i = 0; 9876 u32 reg = MAC_RSS_INDIR_TBL_0; 9877 9878 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9879 u32 val = tp->rss_ind_tbl[i]; 9880 i++; 9881 for (; i % 8; i++) { 9882 val <<= 4; 9883 val |= tp->rss_ind_tbl[i]; 9884 } 9885 tw32(reg, val); 9886 reg += 4; 9887 } 9888 } 9889 9890 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9891 { 9892 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9893 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9894 else 9895 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9896 } 9897 9898 /* tp->lock is held. */ 9899 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9900 { 9901 u32 val, rdmac_mode; 9902 int i, err, limit; 9903 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9904 9905 tg3_disable_ints(tp); 9906 9907 tg3_stop_fw(tp); 9908 9909 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9910 9911 if (tg3_flag(tp, INIT_COMPLETE)) 9912 tg3_abort_hw(tp, 1); 9913 9914 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9915 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9916 tg3_phy_pull_config(tp); 9917 tg3_eee_pull_config(tp, NULL); 9918 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9919 } 9920 9921 /* Enable MAC control of LPI */ 9922 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9923 tg3_setup_eee(tp); 9924 9925 if (reset_phy) 9926 tg3_phy_reset(tp); 9927 9928 err = tg3_chip_reset(tp); 9929 if (err) 9930 return err; 9931 9932 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9933 9934 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9935 val = tr32(TG3_CPMU_CTRL); 9936 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9937 tw32(TG3_CPMU_CTRL, val); 9938 9939 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9940 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9941 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9942 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9943 9944 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9945 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9946 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9947 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9948 9949 val = tr32(TG3_CPMU_HST_ACC); 9950 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9951 val |= CPMU_HST_ACC_MACCLK_6_25; 9952 tw32(TG3_CPMU_HST_ACC, val); 9953 } 9954 9955 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9956 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9957 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9958 PCIE_PWR_MGMT_L1_THRESH_4MS; 9959 tw32(PCIE_PWR_MGMT_THRESH, val); 9960 9961 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 9962 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 9963 9964 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 9965 9966 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9967 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9968 } 9969 9970 if (tg3_flag(tp, L1PLLPD_EN)) { 9971 u32 grc_mode = tr32(GRC_MODE); 9972 9973 /* Access the lower 1K of PL PCIE block registers. */ 9974 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9975 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9976 9977 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 9978 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 9979 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 9980 9981 tw32(GRC_MODE, grc_mode); 9982 } 9983 9984 if (tg3_flag(tp, 57765_CLASS)) { 9985 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 9986 u32 grc_mode = tr32(GRC_MODE); 9987 9988 /* Access the lower 1K of PL PCIE block registers. */ 9989 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9990 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9991 9992 val = tr32(TG3_PCIE_TLDLPL_PORT + 9993 TG3_PCIE_PL_LO_PHYCTL5); 9994 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 9995 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 9996 9997 tw32(GRC_MODE, grc_mode); 9998 } 9999 10000 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 10001 u32 grc_mode; 10002 10003 /* Fix transmit hangs */ 10004 val = tr32(TG3_CPMU_PADRNG_CTL); 10005 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 10006 tw32(TG3_CPMU_PADRNG_CTL, val); 10007 10008 grc_mode = tr32(GRC_MODE); 10009 10010 /* Access the lower 1K of DL PCIE block registers. */ 10011 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 10012 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 10013 10014 val = tr32(TG3_PCIE_TLDLPL_PORT + 10015 TG3_PCIE_DL_LO_FTSMAX); 10016 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 10017 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 10018 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 10019 10020 tw32(GRC_MODE, grc_mode); 10021 } 10022 10023 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 10024 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 10025 val |= CPMU_LSPD_10MB_MACCLK_6_25; 10026 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 10027 } 10028 10029 /* This works around an issue with Athlon chipsets on 10030 * B3 tigon3 silicon. This bit has no effect on any 10031 * other revision. But do not set this on PCI Express 10032 * chips and don't even touch the clocks if the CPMU is present. 10033 */ 10034 if (!tg3_flag(tp, CPMU_PRESENT)) { 10035 if (!tg3_flag(tp, PCI_EXPRESS)) 10036 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 10037 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 10038 } 10039 10040 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 10041 tg3_flag(tp, PCIX_MODE)) { 10042 val = tr32(TG3PCI_PCISTATE); 10043 val |= PCISTATE_RETRY_SAME_DMA; 10044 tw32(TG3PCI_PCISTATE, val); 10045 } 10046 10047 if (tg3_flag(tp, ENABLE_APE)) { 10048 /* Allow reads and writes to the 10049 * APE register and memory space. 10050 */ 10051 val = tr32(TG3PCI_PCISTATE); 10052 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 10053 PCISTATE_ALLOW_APE_SHMEM_WR | 10054 PCISTATE_ALLOW_APE_PSPACE_WR; 10055 tw32(TG3PCI_PCISTATE, val); 10056 } 10057 10058 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 10059 /* Enable some hw fixes. */ 10060 val = tr32(TG3PCI_MSI_DATA); 10061 val |= (1 << 26) | (1 << 28) | (1 << 29); 10062 tw32(TG3PCI_MSI_DATA, val); 10063 } 10064 10065 /* Descriptor ring init may make accesses to the 10066 * NIC SRAM area to setup the TX descriptors, so we 10067 * can only do this after the hardware has been 10068 * successfully reset. 10069 */ 10070 err = tg3_init_rings(tp); 10071 if (err) 10072 return err; 10073 10074 if (tg3_flag(tp, 57765_PLUS)) { 10075 val = tr32(TG3PCI_DMA_RW_CTRL) & 10076 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 10077 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 10078 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 10079 if (!tg3_flag(tp, 57765_CLASS) && 10080 tg3_asic_rev(tp) != ASIC_REV_5717 && 10081 tg3_asic_rev(tp) != ASIC_REV_5762) 10082 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10083 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10084 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10085 tg3_asic_rev(tp) != ASIC_REV_5761) { 10086 /* This value is determined during the probe time DMA 10087 * engine test, tg3_test_dma. 10088 */ 10089 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10090 } 10091 10092 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10093 GRC_MODE_4X_NIC_SEND_RINGS | 10094 GRC_MODE_NO_TX_PHDR_CSUM | 10095 GRC_MODE_NO_RX_PHDR_CSUM); 10096 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10097 10098 /* Pseudo-header checksum is done by hardware logic and not 10099 * the offload processers, so make the chip do the pseudo- 10100 * header checksums on receive. For transmit it is more 10101 * convenient to do the pseudo-header checksum in software 10102 * as Linux does that on transmit for us in all cases. 10103 */ 10104 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10105 10106 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10107 if (tp->rxptpctl) 10108 tw32(TG3_RX_PTP_CTL, 10109 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10110 10111 if (tg3_flag(tp, PTP_CAPABLE)) 10112 val |= GRC_MODE_TIME_SYNC_ENABLE; 10113 10114 tw32(GRC_MODE, tp->grc_mode | val); 10115 10116 /* On one of the AMD platform, MRRS is restricted to 4000 because of 10117 * south bridge limitation. As a workaround, Driver is setting MRRS 10118 * to 2048 instead of default 4096. 10119 */ 10120 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10121 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { 10122 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; 10123 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); 10124 } 10125 10126 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10127 val = tr32(GRC_MISC_CFG); 10128 val &= ~0xff; 10129 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10130 tw32(GRC_MISC_CFG, val); 10131 10132 /* Initialize MBUF/DESC pool. */ 10133 if (tg3_flag(tp, 5750_PLUS)) { 10134 /* Do nothing. */ 10135 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10136 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10137 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10138 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10139 else 10140 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10141 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10142 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10143 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10144 int fw_len; 10145 10146 fw_len = tp->fw_len; 10147 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10148 tw32(BUFMGR_MB_POOL_ADDR, 10149 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10150 tw32(BUFMGR_MB_POOL_SIZE, 10151 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10152 } 10153 10154 if (tp->dev->mtu <= ETH_DATA_LEN) { 10155 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10156 tp->bufmgr_config.mbuf_read_dma_low_water); 10157 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10158 tp->bufmgr_config.mbuf_mac_rx_low_water); 10159 tw32(BUFMGR_MB_HIGH_WATER, 10160 tp->bufmgr_config.mbuf_high_water); 10161 } else { 10162 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10163 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10164 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10165 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10166 tw32(BUFMGR_MB_HIGH_WATER, 10167 tp->bufmgr_config.mbuf_high_water_jumbo); 10168 } 10169 tw32(BUFMGR_DMA_LOW_WATER, 10170 tp->bufmgr_config.dma_low_water); 10171 tw32(BUFMGR_DMA_HIGH_WATER, 10172 tp->bufmgr_config.dma_high_water); 10173 10174 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10175 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10176 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10177 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10178 tg3_asic_rev(tp) == ASIC_REV_5762 || 10179 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10180 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10181 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10182 tw32(BUFMGR_MODE, val); 10183 for (i = 0; i < 2000; i++) { 10184 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10185 break; 10186 udelay(10); 10187 } 10188 if (i >= 2000) { 10189 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10190 return -ENODEV; 10191 } 10192 10193 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10194 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10195 10196 tg3_setup_rxbd_thresholds(tp); 10197 10198 /* Initialize TG3_BDINFO's at: 10199 * RCVDBDI_STD_BD: standard eth size rx ring 10200 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10201 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10202 * 10203 * like so: 10204 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10205 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10206 * ring attribute flags 10207 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10208 * 10209 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10210 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10211 * 10212 * The size of each ring is fixed in the firmware, but the location is 10213 * configurable. 10214 */ 10215 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10216 ((u64) tpr->rx_std_mapping >> 32)); 10217 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10218 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10219 if (!tg3_flag(tp, 5717_PLUS)) 10220 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10221 NIC_SRAM_RX_BUFFER_DESC); 10222 10223 /* Disable the mini ring */ 10224 if (!tg3_flag(tp, 5705_PLUS)) 10225 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10226 BDINFO_FLAGS_DISABLED); 10227 10228 /* Program the jumbo buffer descriptor ring control 10229 * blocks on those devices that have them. 10230 */ 10231 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10232 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10233 10234 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10235 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10236 ((u64) tpr->rx_jmb_mapping >> 32)); 10237 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10238 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10239 val = TG3_RX_JMB_RING_SIZE(tp) << 10240 BDINFO_FLAGS_MAXLEN_SHIFT; 10241 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10242 val | BDINFO_FLAGS_USE_EXT_RECV); 10243 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10244 tg3_flag(tp, 57765_CLASS) || 10245 tg3_asic_rev(tp) == ASIC_REV_5762) 10246 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10247 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10248 } else { 10249 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10250 BDINFO_FLAGS_DISABLED); 10251 } 10252 10253 if (tg3_flag(tp, 57765_PLUS)) { 10254 val = TG3_RX_STD_RING_SIZE(tp); 10255 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10256 val |= (TG3_RX_STD_DMA_SZ << 2); 10257 } else 10258 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10259 } else 10260 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10261 10262 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10263 10264 tpr->rx_std_prod_idx = tp->rx_pending; 10265 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10266 10267 tpr->rx_jmb_prod_idx = 10268 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10269 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10270 10271 tg3_rings_reset(tp); 10272 10273 /* Initialize MAC address and backoff seed. */ 10274 __tg3_set_mac_addr(tp, false); 10275 10276 /* MTU + ethernet header + FCS + optional VLAN tag */ 10277 tw32(MAC_RX_MTU_SIZE, 10278 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10279 10280 /* The slot time is changed by tg3_setup_phy if we 10281 * run at gigabit with half duplex. 10282 */ 10283 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10284 (6 << TX_LENGTHS_IPG_SHIFT) | 10285 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10286 10287 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10288 tg3_asic_rev(tp) == ASIC_REV_5762) 10289 val |= tr32(MAC_TX_LENGTHS) & 10290 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10291 TX_LENGTHS_CNT_DWN_VAL_MSK); 10292 10293 tw32(MAC_TX_LENGTHS, val); 10294 10295 /* Receive rules. */ 10296 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10297 tw32(RCVLPC_CONFIG, 0x0181); 10298 10299 /* Calculate RDMAC_MODE setting early, we need it to determine 10300 * the RCVLPC_STATE_ENABLE mask. 10301 */ 10302 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10303 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10304 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10305 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10306 RDMAC_MODE_LNGREAD_ENAB); 10307 10308 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10309 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10310 10311 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10312 tg3_asic_rev(tp) == ASIC_REV_5785 || 10313 tg3_asic_rev(tp) == ASIC_REV_57780) 10314 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10315 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10316 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10317 10318 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10319 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10320 if (tg3_flag(tp, TSO_CAPABLE)) { 10321 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10322 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10323 !tg3_flag(tp, IS_5788)) { 10324 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10325 } 10326 } 10327 10328 if (tg3_flag(tp, PCI_EXPRESS)) 10329 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10330 10331 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10332 tp->dma_limit = 0; 10333 if (tp->dev->mtu <= ETH_DATA_LEN) { 10334 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10335 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10336 } 10337 } 10338 10339 if (tg3_flag(tp, HW_TSO_1) || 10340 tg3_flag(tp, HW_TSO_2) || 10341 tg3_flag(tp, HW_TSO_3)) 10342 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10343 10344 if (tg3_flag(tp, 57765_PLUS) || 10345 tg3_asic_rev(tp) == ASIC_REV_5785 || 10346 tg3_asic_rev(tp) == ASIC_REV_57780) 10347 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10348 10349 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10350 tg3_asic_rev(tp) == ASIC_REV_5762) 10351 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10352 10353 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10354 tg3_asic_rev(tp) == ASIC_REV_5784 || 10355 tg3_asic_rev(tp) == ASIC_REV_5785 || 10356 tg3_asic_rev(tp) == ASIC_REV_57780 || 10357 tg3_flag(tp, 57765_PLUS)) { 10358 u32 tgtreg; 10359 10360 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10361 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10362 else 10363 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10364 10365 val = tr32(tgtreg); 10366 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10367 tg3_asic_rev(tp) == ASIC_REV_5762) { 10368 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10369 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10370 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10371 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10372 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10373 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10374 } 10375 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10376 } 10377 10378 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10379 tg3_asic_rev(tp) == ASIC_REV_5720 || 10380 tg3_asic_rev(tp) == ASIC_REV_5762) { 10381 u32 tgtreg; 10382 10383 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10384 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10385 else 10386 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10387 10388 val = tr32(tgtreg); 10389 tw32(tgtreg, val | 10390 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10391 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10392 } 10393 10394 /* Receive/send statistics. */ 10395 if (tg3_flag(tp, 5750_PLUS)) { 10396 val = tr32(RCVLPC_STATS_ENABLE); 10397 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10398 tw32(RCVLPC_STATS_ENABLE, val); 10399 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10400 tg3_flag(tp, TSO_CAPABLE)) { 10401 val = tr32(RCVLPC_STATS_ENABLE); 10402 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10403 tw32(RCVLPC_STATS_ENABLE, val); 10404 } else { 10405 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10406 } 10407 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10408 tw32(SNDDATAI_STATSENAB, 0xffffff); 10409 tw32(SNDDATAI_STATSCTRL, 10410 (SNDDATAI_SCTRL_ENABLE | 10411 SNDDATAI_SCTRL_FASTUPD)); 10412 10413 /* Setup host coalescing engine. */ 10414 tw32(HOSTCC_MODE, 0); 10415 for (i = 0; i < 2000; i++) { 10416 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10417 break; 10418 udelay(10); 10419 } 10420 10421 __tg3_set_coalesce(tp, &tp->coal); 10422 10423 if (!tg3_flag(tp, 5705_PLUS)) { 10424 /* Status/statistics block address. See tg3_timer, 10425 * the tg3_periodic_fetch_stats call there, and 10426 * tg3_get_stats to see how this works for 5705/5750 chips. 10427 */ 10428 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10429 ((u64) tp->stats_mapping >> 32)); 10430 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10431 ((u64) tp->stats_mapping & 0xffffffff)); 10432 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10433 10434 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10435 10436 /* Clear statistics and status block memory areas */ 10437 for (i = NIC_SRAM_STATS_BLK; 10438 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10439 i += sizeof(u32)) { 10440 tg3_write_mem(tp, i, 0); 10441 udelay(40); 10442 } 10443 } 10444 10445 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10446 10447 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10448 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10449 if (!tg3_flag(tp, 5705_PLUS)) 10450 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10451 10452 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10453 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10454 /* reset to prevent losing 1st rx packet intermittently */ 10455 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10456 udelay(10); 10457 } 10458 10459 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10460 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10461 MAC_MODE_FHDE_ENABLE; 10462 if (tg3_flag(tp, ENABLE_APE)) 10463 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10464 if (!tg3_flag(tp, 5705_PLUS) && 10465 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10466 tg3_asic_rev(tp) != ASIC_REV_5700) 10467 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10468 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10469 udelay(40); 10470 10471 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10472 * If TG3_FLAG_IS_NIC is zero, we should read the 10473 * register to preserve the GPIO settings for LOMs. The GPIOs, 10474 * whether used as inputs or outputs, are set by boot code after 10475 * reset. 10476 */ 10477 if (!tg3_flag(tp, IS_NIC)) { 10478 u32 gpio_mask; 10479 10480 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10481 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10482 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10483 10484 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10485 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10486 GRC_LCLCTRL_GPIO_OUTPUT3; 10487 10488 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10489 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10490 10491 tp->grc_local_ctrl &= ~gpio_mask; 10492 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10493 10494 /* GPIO1 must be driven high for eeprom write protect */ 10495 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10496 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10497 GRC_LCLCTRL_GPIO_OUTPUT1); 10498 } 10499 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10500 udelay(100); 10501 10502 if (tg3_flag(tp, USING_MSIX)) { 10503 val = tr32(MSGINT_MODE); 10504 val |= MSGINT_MODE_ENABLE; 10505 if (tp->irq_cnt > 1) 10506 val |= MSGINT_MODE_MULTIVEC_EN; 10507 if (!tg3_flag(tp, 1SHOT_MSI)) 10508 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10509 tw32(MSGINT_MODE, val); 10510 } 10511 10512 if (!tg3_flag(tp, 5705_PLUS)) { 10513 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10514 udelay(40); 10515 } 10516 10517 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10518 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10519 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10520 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10521 WDMAC_MODE_LNGREAD_ENAB); 10522 10523 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10524 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10525 if (tg3_flag(tp, TSO_CAPABLE) && 10526 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10527 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10528 /* nothing */ 10529 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10530 !tg3_flag(tp, IS_5788)) { 10531 val |= WDMAC_MODE_RX_ACCEL; 10532 } 10533 } 10534 10535 /* Enable host coalescing bug fix */ 10536 if (tg3_flag(tp, 5755_PLUS)) 10537 val |= WDMAC_MODE_STATUS_TAG_FIX; 10538 10539 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10540 val |= WDMAC_MODE_BURST_ALL_DATA; 10541 10542 tw32_f(WDMAC_MODE, val); 10543 udelay(40); 10544 10545 if (tg3_flag(tp, PCIX_MODE)) { 10546 u16 pcix_cmd; 10547 10548 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10549 &pcix_cmd); 10550 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10551 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10552 pcix_cmd |= PCI_X_CMD_READ_2K; 10553 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10554 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10555 pcix_cmd |= PCI_X_CMD_READ_2K; 10556 } 10557 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10558 pcix_cmd); 10559 } 10560 10561 tw32_f(RDMAC_MODE, rdmac_mode); 10562 udelay(40); 10563 10564 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10565 tg3_asic_rev(tp) == ASIC_REV_5720) { 10566 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10567 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10568 break; 10569 } 10570 if (i < TG3_NUM_RDMA_CHANNELS) { 10571 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10572 val |= tg3_lso_rd_dma_workaround_bit(tp); 10573 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10574 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10575 } 10576 } 10577 10578 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10579 if (!tg3_flag(tp, 5705_PLUS)) 10580 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10581 10582 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10583 tw32(SNDDATAC_MODE, 10584 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10585 else 10586 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10587 10588 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10589 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10590 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10591 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10592 val |= RCVDBDI_MODE_LRG_RING_SZ; 10593 tw32(RCVDBDI_MODE, val); 10594 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10595 if (tg3_flag(tp, HW_TSO_1) || 10596 tg3_flag(tp, HW_TSO_2) || 10597 tg3_flag(tp, HW_TSO_3)) 10598 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10599 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10600 if (tg3_flag(tp, ENABLE_TSS)) 10601 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10602 tw32(SNDBDI_MODE, val); 10603 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10604 10605 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10606 err = tg3_load_5701_a0_firmware_fix(tp); 10607 if (err) 10608 return err; 10609 } 10610 10611 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10612 /* Ignore any errors for the firmware download. If download 10613 * fails, the device will operate with EEE disabled 10614 */ 10615 tg3_load_57766_firmware(tp); 10616 } 10617 10618 if (tg3_flag(tp, TSO_CAPABLE)) { 10619 err = tg3_load_tso_firmware(tp); 10620 if (err) 10621 return err; 10622 } 10623 10624 tp->tx_mode = TX_MODE_ENABLE; 10625 10626 if (tg3_flag(tp, 5755_PLUS) || 10627 tg3_asic_rev(tp) == ASIC_REV_5906) 10628 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10629 10630 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10631 tg3_asic_rev(tp) == ASIC_REV_5762) { 10632 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10633 tp->tx_mode &= ~val; 10634 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10635 } 10636 10637 tw32_f(MAC_TX_MODE, tp->tx_mode); 10638 udelay(100); 10639 10640 if (tg3_flag(tp, ENABLE_RSS)) { 10641 u32 rss_key[10]; 10642 10643 tg3_rss_write_indir_tbl(tp); 10644 10645 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10646 10647 for (i = 0; i < 10 ; i++) 10648 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10649 } 10650 10651 tp->rx_mode = RX_MODE_ENABLE; 10652 if (tg3_flag(tp, 5755_PLUS)) 10653 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10654 10655 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10656 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10657 10658 if (tg3_flag(tp, ENABLE_RSS)) 10659 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10660 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10661 RX_MODE_RSS_IPV6_HASH_EN | 10662 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10663 RX_MODE_RSS_IPV4_HASH_EN | 10664 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10665 10666 tw32_f(MAC_RX_MODE, tp->rx_mode); 10667 udelay(10); 10668 10669 tw32(MAC_LED_CTRL, tp->led_ctrl); 10670 10671 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10672 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10673 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10674 udelay(10); 10675 } 10676 tw32_f(MAC_RX_MODE, tp->rx_mode); 10677 udelay(10); 10678 10679 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10680 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10681 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10682 /* Set drive transmission level to 1.2V */ 10683 /* only if the signal pre-emphasis bit is not set */ 10684 val = tr32(MAC_SERDES_CFG); 10685 val &= 0xfffff000; 10686 val |= 0x880; 10687 tw32(MAC_SERDES_CFG, val); 10688 } 10689 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10690 tw32(MAC_SERDES_CFG, 0x616000); 10691 } 10692 10693 /* Prevent chip from dropping frames when flow control 10694 * is enabled. 10695 */ 10696 if (tg3_flag(tp, 57765_CLASS)) 10697 val = 1; 10698 else 10699 val = 2; 10700 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10701 10702 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10703 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10704 /* Use hardware link auto-negotiation */ 10705 tg3_flag_set(tp, HW_AUTONEG); 10706 } 10707 10708 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10709 tg3_asic_rev(tp) == ASIC_REV_5714) { 10710 u32 tmp; 10711 10712 tmp = tr32(SERDES_RX_CTRL); 10713 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10714 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10715 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10716 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10717 } 10718 10719 if (!tg3_flag(tp, USE_PHYLIB)) { 10720 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10721 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10722 10723 err = tg3_setup_phy(tp, false); 10724 if (err) 10725 return err; 10726 10727 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10728 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10729 u32 tmp; 10730 10731 /* Clear CRC stats. */ 10732 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10733 tg3_writephy(tp, MII_TG3_TEST1, 10734 tmp | MII_TG3_TEST1_CRC_EN); 10735 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10736 } 10737 } 10738 } 10739 10740 __tg3_set_rx_mode(tp->dev); 10741 10742 /* Initialize receive rules. */ 10743 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10744 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10745 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10746 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10747 10748 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10749 limit = 8; 10750 else 10751 limit = 16; 10752 if (tg3_flag(tp, ENABLE_ASF)) 10753 limit -= 4; 10754 switch (limit) { 10755 case 16: 10756 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10757 fallthrough; 10758 case 15: 10759 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10760 fallthrough; 10761 case 14: 10762 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10763 fallthrough; 10764 case 13: 10765 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10766 fallthrough; 10767 case 12: 10768 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10769 fallthrough; 10770 case 11: 10771 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10772 fallthrough; 10773 case 10: 10774 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10775 fallthrough; 10776 case 9: 10777 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10778 fallthrough; 10779 case 8: 10780 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10781 fallthrough; 10782 case 7: 10783 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10784 fallthrough; 10785 case 6: 10786 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10787 fallthrough; 10788 case 5: 10789 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10790 fallthrough; 10791 case 4: 10792 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10793 case 3: 10794 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10795 case 2: 10796 case 1: 10797 10798 default: 10799 break; 10800 } 10801 10802 if (tg3_flag(tp, ENABLE_APE)) 10803 /* Write our heartbeat update interval to APE. */ 10804 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10805 APE_HOST_HEARTBEAT_INT_5SEC); 10806 10807 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10808 10809 return 0; 10810 } 10811 10812 /* Called at device open time to get the chip ready for 10813 * packet processing. Invoked with tp->lock held. 10814 */ 10815 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10816 { 10817 /* Chip may have been just powered on. If so, the boot code may still 10818 * be running initialization. Wait for it to finish to avoid races in 10819 * accessing the hardware. 10820 */ 10821 tg3_enable_register_access(tp); 10822 tg3_poll_fw(tp); 10823 10824 tg3_switch_clocks(tp); 10825 10826 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10827 10828 return tg3_reset_hw(tp, reset_phy); 10829 } 10830 10831 #ifdef CONFIG_TIGON3_HWMON 10832 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10833 { 10834 u32 off, len = TG3_OCIR_LEN; 10835 int i; 10836 10837 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) { 10838 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10839 10840 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10841 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10842 memset(ocir, 0, len); 10843 } 10844 } 10845 10846 /* sysfs attributes for hwmon */ 10847 static ssize_t tg3_show_temp(struct device *dev, 10848 struct device_attribute *devattr, char *buf) 10849 { 10850 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10851 struct tg3 *tp = dev_get_drvdata(dev); 10852 u32 temperature; 10853 10854 spin_lock_bh(&tp->lock); 10855 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10856 sizeof(temperature)); 10857 spin_unlock_bh(&tp->lock); 10858 return sprintf(buf, "%u\n", temperature * 1000); 10859 } 10860 10861 10862 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, 10863 TG3_TEMP_SENSOR_OFFSET); 10864 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, 10865 TG3_TEMP_CAUTION_OFFSET); 10866 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, 10867 TG3_TEMP_MAX_OFFSET); 10868 10869 static struct attribute *tg3_attrs[] = { 10870 &sensor_dev_attr_temp1_input.dev_attr.attr, 10871 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10872 &sensor_dev_attr_temp1_max.dev_attr.attr, 10873 NULL 10874 }; 10875 ATTRIBUTE_GROUPS(tg3); 10876 10877 static void tg3_hwmon_close(struct tg3 *tp) 10878 { 10879 if (tp->hwmon_dev) { 10880 hwmon_device_unregister(tp->hwmon_dev); 10881 tp->hwmon_dev = NULL; 10882 } 10883 } 10884 10885 static void tg3_hwmon_open(struct tg3 *tp) 10886 { 10887 int i; 10888 u32 size = 0; 10889 struct pci_dev *pdev = tp->pdev; 10890 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10891 10892 tg3_sd_scan_scratchpad(tp, ocirs); 10893 10894 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10895 if (!ocirs[i].src_data_length) 10896 continue; 10897 10898 size += ocirs[i].src_hdr_length; 10899 size += ocirs[i].src_data_length; 10900 } 10901 10902 if (!size) 10903 return; 10904 10905 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10906 tp, tg3_groups); 10907 if (IS_ERR(tp->hwmon_dev)) { 10908 tp->hwmon_dev = NULL; 10909 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10910 } 10911 } 10912 #else 10913 static inline void tg3_hwmon_close(struct tg3 *tp) { } 10914 static inline void tg3_hwmon_open(struct tg3 *tp) { } 10915 #endif /* CONFIG_TIGON3_HWMON */ 10916 10917 10918 #define TG3_STAT_ADD32(PSTAT, REG) \ 10919 do { u32 __val = tr32(REG); \ 10920 (PSTAT)->low += __val; \ 10921 if ((PSTAT)->low < __val) \ 10922 (PSTAT)->high += 1; \ 10923 } while (0) 10924 10925 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10926 { 10927 struct tg3_hw_stats *sp = tp->hw_stats; 10928 10929 if (!tp->link_up) 10930 return; 10931 10932 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10933 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10934 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10935 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10936 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10937 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10938 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10939 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10940 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10941 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10942 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10943 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10944 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10945 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10946 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10947 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10948 u32 val; 10949 10950 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10951 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10952 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10953 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10954 } 10955 10956 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10957 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10958 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10959 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 10960 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 10961 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 10962 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 10963 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 10964 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 10965 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 10966 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 10967 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 10968 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 10969 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 10970 10971 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 10972 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 10973 tg3_asic_rev(tp) != ASIC_REV_5762 && 10974 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 10975 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 10976 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 10977 } else { 10978 u32 val = tr32(HOSTCC_FLOW_ATTN); 10979 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 10980 if (val) { 10981 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 10982 sp->rx_discards.low += val; 10983 if (sp->rx_discards.low < val) 10984 sp->rx_discards.high += 1; 10985 } 10986 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 10987 } 10988 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 10989 } 10990 10991 static void tg3_chk_missed_msi(struct tg3 *tp) 10992 { 10993 u32 i; 10994 10995 for (i = 0; i < tp->irq_cnt; i++) { 10996 struct tg3_napi *tnapi = &tp->napi[i]; 10997 10998 if (tg3_has_work(tnapi)) { 10999 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 11000 tnapi->last_tx_cons == tnapi->tx_cons) { 11001 if (tnapi->chk_msi_cnt < 1) { 11002 tnapi->chk_msi_cnt++; 11003 return; 11004 } 11005 tg3_msi(0, tnapi); 11006 } 11007 } 11008 tnapi->chk_msi_cnt = 0; 11009 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 11010 tnapi->last_tx_cons = tnapi->tx_cons; 11011 } 11012 } 11013 11014 static void tg3_timer(struct timer_list *t) 11015 { 11016 struct tg3 *tp = from_timer(tp, t, timer); 11017 11018 spin_lock(&tp->lock); 11019 11020 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 11021 spin_unlock(&tp->lock); 11022 goto restart_timer; 11023 } 11024 11025 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 11026 tg3_flag(tp, 57765_CLASS)) 11027 tg3_chk_missed_msi(tp); 11028 11029 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 11030 /* BCM4785: Flush posted writes from GbE to host memory. */ 11031 tr32(HOSTCC_MODE); 11032 } 11033 11034 if (!tg3_flag(tp, TAGGED_STATUS)) { 11035 /* All of this garbage is because when using non-tagged 11036 * IRQ status the mailbox/status_block protocol the chip 11037 * uses with the cpu is race prone. 11038 */ 11039 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 11040 tw32(GRC_LOCAL_CTRL, 11041 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 11042 } else { 11043 tw32(HOSTCC_MODE, tp->coalesce_mode | 11044 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 11045 } 11046 11047 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 11048 spin_unlock(&tp->lock); 11049 tg3_reset_task_schedule(tp); 11050 goto restart_timer; 11051 } 11052 } 11053 11054 /* This part only runs once per second. */ 11055 if (!--tp->timer_counter) { 11056 if (tg3_flag(tp, 5705_PLUS)) 11057 tg3_periodic_fetch_stats(tp); 11058 11059 if (tp->setlpicnt && !--tp->setlpicnt) 11060 tg3_phy_eee_enable(tp); 11061 11062 if (tg3_flag(tp, USE_LINKCHG_REG)) { 11063 u32 mac_stat; 11064 int phy_event; 11065 11066 mac_stat = tr32(MAC_STATUS); 11067 11068 phy_event = 0; 11069 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 11070 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 11071 phy_event = 1; 11072 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 11073 phy_event = 1; 11074 11075 if (phy_event) 11076 tg3_setup_phy(tp, false); 11077 } else if (tg3_flag(tp, POLL_SERDES)) { 11078 u32 mac_stat = tr32(MAC_STATUS); 11079 int need_setup = 0; 11080 11081 if (tp->link_up && 11082 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 11083 need_setup = 1; 11084 } 11085 if (!tp->link_up && 11086 (mac_stat & (MAC_STATUS_PCS_SYNCED | 11087 MAC_STATUS_SIGNAL_DET))) { 11088 need_setup = 1; 11089 } 11090 if (need_setup) { 11091 if (!tp->serdes_counter) { 11092 tw32_f(MAC_MODE, 11093 (tp->mac_mode & 11094 ~MAC_MODE_PORT_MODE_MASK)); 11095 udelay(40); 11096 tw32_f(MAC_MODE, tp->mac_mode); 11097 udelay(40); 11098 } 11099 tg3_setup_phy(tp, false); 11100 } 11101 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 11102 tg3_flag(tp, 5780_CLASS)) { 11103 tg3_serdes_parallel_detect(tp); 11104 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11105 u32 cpmu = tr32(TG3_CPMU_STATUS); 11106 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11107 TG3_CPMU_STATUS_LINK_MASK); 11108 11109 if (link_up != tp->link_up) 11110 tg3_setup_phy(tp, false); 11111 } 11112 11113 tp->timer_counter = tp->timer_multiplier; 11114 } 11115 11116 /* Heartbeat is only sent once every 2 seconds. 11117 * 11118 * The heartbeat is to tell the ASF firmware that the host 11119 * driver is still alive. In the event that the OS crashes, 11120 * ASF needs to reset the hardware to free up the FIFO space 11121 * that may be filled with rx packets destined for the host. 11122 * If the FIFO is full, ASF will no longer function properly. 11123 * 11124 * Unintended resets have been reported on real time kernels 11125 * where the timer doesn't run on time. Netpoll will also have 11126 * same problem. 11127 * 11128 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11129 * to check the ring condition when the heartbeat is expiring 11130 * before doing the reset. This will prevent most unintended 11131 * resets. 11132 */ 11133 if (!--tp->asf_counter) { 11134 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11135 tg3_wait_for_event_ack(tp); 11136 11137 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11138 FWCMD_NICDRV_ALIVE3); 11139 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11140 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11141 TG3_FW_UPDATE_TIMEOUT_SEC); 11142 11143 tg3_generate_fw_event(tp); 11144 } 11145 tp->asf_counter = tp->asf_multiplier; 11146 } 11147 11148 /* Update the APE heartbeat every 5 seconds.*/ 11149 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); 11150 11151 spin_unlock(&tp->lock); 11152 11153 restart_timer: 11154 tp->timer.expires = jiffies + tp->timer_offset; 11155 add_timer(&tp->timer); 11156 } 11157 11158 static void tg3_timer_init(struct tg3 *tp) 11159 { 11160 if (tg3_flag(tp, TAGGED_STATUS) && 11161 tg3_asic_rev(tp) != ASIC_REV_5717 && 11162 !tg3_flag(tp, 57765_CLASS)) 11163 tp->timer_offset = HZ; 11164 else 11165 tp->timer_offset = HZ / 10; 11166 11167 BUG_ON(tp->timer_offset > HZ); 11168 11169 tp->timer_multiplier = (HZ / tp->timer_offset); 11170 tp->asf_multiplier = (HZ / tp->timer_offset) * 11171 TG3_FW_UPDATE_FREQ_SEC; 11172 11173 timer_setup(&tp->timer, tg3_timer, 0); 11174 } 11175 11176 static void tg3_timer_start(struct tg3 *tp) 11177 { 11178 tp->asf_counter = tp->asf_multiplier; 11179 tp->timer_counter = tp->timer_multiplier; 11180 11181 tp->timer.expires = jiffies + tp->timer_offset; 11182 add_timer(&tp->timer); 11183 } 11184 11185 static void tg3_timer_stop(struct tg3 *tp) 11186 { 11187 del_timer_sync(&tp->timer); 11188 } 11189 11190 /* Restart hardware after configuration changes, self-test, etc. 11191 * Invoked with tp->lock held. 11192 */ 11193 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11194 __releases(tp->lock) 11195 __acquires(tp->lock) 11196 { 11197 int err; 11198 11199 err = tg3_init_hw(tp, reset_phy); 11200 if (err) { 11201 netdev_err(tp->dev, 11202 "Failed to re-initialize device, aborting\n"); 11203 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11204 tg3_full_unlock(tp); 11205 tg3_timer_stop(tp); 11206 tp->irq_sync = 0; 11207 tg3_napi_enable(tp); 11208 dev_close(tp->dev); 11209 tg3_full_lock(tp, 0); 11210 } 11211 return err; 11212 } 11213 11214 static void tg3_reset_task(struct work_struct *work) 11215 { 11216 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11217 int err; 11218 11219 rtnl_lock(); 11220 tg3_full_lock(tp, 0); 11221 11222 if (tp->pcierr_recovery || !netif_running(tp->dev)) { 11223 tg3_flag_clear(tp, RESET_TASK_PENDING); 11224 tg3_full_unlock(tp); 11225 rtnl_unlock(); 11226 return; 11227 } 11228 11229 tg3_full_unlock(tp); 11230 11231 tg3_phy_stop(tp); 11232 11233 tg3_netif_stop(tp); 11234 11235 tg3_full_lock(tp, 1); 11236 11237 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11238 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11239 tp->write32_rx_mbox = tg3_write_flush_reg32; 11240 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11241 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11242 } 11243 11244 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11245 err = tg3_init_hw(tp, true); 11246 if (err) { 11247 tg3_full_unlock(tp); 11248 tp->irq_sync = 0; 11249 tg3_napi_enable(tp); 11250 /* Clear this flag so that tg3_reset_task_cancel() will not 11251 * call cancel_work_sync() and wait forever. 11252 */ 11253 tg3_flag_clear(tp, RESET_TASK_PENDING); 11254 dev_close(tp->dev); 11255 goto out; 11256 } 11257 11258 tg3_netif_start(tp); 11259 tg3_full_unlock(tp); 11260 tg3_phy_start(tp); 11261 tg3_flag_clear(tp, RESET_TASK_PENDING); 11262 out: 11263 rtnl_unlock(); 11264 } 11265 11266 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11267 { 11268 irq_handler_t fn; 11269 unsigned long flags; 11270 char *name; 11271 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11272 11273 if (tp->irq_cnt == 1) 11274 name = tp->dev->name; 11275 else { 11276 name = &tnapi->irq_lbl[0]; 11277 if (tnapi->tx_buffers && tnapi->rx_rcb) 11278 snprintf(name, IFNAMSIZ, 11279 "%s-txrx-%d", tp->dev->name, irq_num); 11280 else if (tnapi->tx_buffers) 11281 snprintf(name, IFNAMSIZ, 11282 "%s-tx-%d", tp->dev->name, irq_num); 11283 else if (tnapi->rx_rcb) 11284 snprintf(name, IFNAMSIZ, 11285 "%s-rx-%d", tp->dev->name, irq_num); 11286 else 11287 snprintf(name, IFNAMSIZ, 11288 "%s-%d", tp->dev->name, irq_num); 11289 name[IFNAMSIZ-1] = 0; 11290 } 11291 11292 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11293 fn = tg3_msi; 11294 if (tg3_flag(tp, 1SHOT_MSI)) 11295 fn = tg3_msi_1shot; 11296 flags = 0; 11297 } else { 11298 fn = tg3_interrupt; 11299 if (tg3_flag(tp, TAGGED_STATUS)) 11300 fn = tg3_interrupt_tagged; 11301 flags = IRQF_SHARED; 11302 } 11303 11304 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11305 } 11306 11307 static int tg3_test_interrupt(struct tg3 *tp) 11308 { 11309 struct tg3_napi *tnapi = &tp->napi[0]; 11310 struct net_device *dev = tp->dev; 11311 int err, i, intr_ok = 0; 11312 u32 val; 11313 11314 if (!netif_running(dev)) 11315 return -ENODEV; 11316 11317 tg3_disable_ints(tp); 11318 11319 free_irq(tnapi->irq_vec, tnapi); 11320 11321 /* 11322 * Turn off MSI one shot mode. Otherwise this test has no 11323 * observable way to know whether the interrupt was delivered. 11324 */ 11325 if (tg3_flag(tp, 57765_PLUS)) { 11326 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11327 tw32(MSGINT_MODE, val); 11328 } 11329 11330 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11331 IRQF_SHARED, dev->name, tnapi); 11332 if (err) 11333 return err; 11334 11335 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11336 tg3_enable_ints(tp); 11337 11338 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11339 tnapi->coal_now); 11340 11341 for (i = 0; i < 5; i++) { 11342 u32 int_mbox, misc_host_ctrl; 11343 11344 int_mbox = tr32_mailbox(tnapi->int_mbox); 11345 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11346 11347 if ((int_mbox != 0) || 11348 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11349 intr_ok = 1; 11350 break; 11351 } 11352 11353 if (tg3_flag(tp, 57765_PLUS) && 11354 tnapi->hw_status->status_tag != tnapi->last_tag) 11355 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11356 11357 msleep(10); 11358 } 11359 11360 tg3_disable_ints(tp); 11361 11362 free_irq(tnapi->irq_vec, tnapi); 11363 11364 err = tg3_request_irq(tp, 0); 11365 11366 if (err) 11367 return err; 11368 11369 if (intr_ok) { 11370 /* Reenable MSI one shot mode. */ 11371 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11372 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11373 tw32(MSGINT_MODE, val); 11374 } 11375 return 0; 11376 } 11377 11378 return -EIO; 11379 } 11380 11381 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11382 * successfully restored 11383 */ 11384 static int tg3_test_msi(struct tg3 *tp) 11385 { 11386 int err; 11387 u16 pci_cmd; 11388 11389 if (!tg3_flag(tp, USING_MSI)) 11390 return 0; 11391 11392 /* Turn off SERR reporting in case MSI terminates with Master 11393 * Abort. 11394 */ 11395 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11396 pci_write_config_word(tp->pdev, PCI_COMMAND, 11397 pci_cmd & ~PCI_COMMAND_SERR); 11398 11399 err = tg3_test_interrupt(tp); 11400 11401 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11402 11403 if (!err) 11404 return 0; 11405 11406 /* other failures */ 11407 if (err != -EIO) 11408 return err; 11409 11410 /* MSI test failed, go back to INTx mode */ 11411 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11412 "to INTx mode. Please report this failure to the PCI " 11413 "maintainer and include system chipset information\n"); 11414 11415 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11416 11417 pci_disable_msi(tp->pdev); 11418 11419 tg3_flag_clear(tp, USING_MSI); 11420 tp->napi[0].irq_vec = tp->pdev->irq; 11421 11422 err = tg3_request_irq(tp, 0); 11423 if (err) 11424 return err; 11425 11426 /* Need to reset the chip because the MSI cycle may have terminated 11427 * with Master Abort. 11428 */ 11429 tg3_full_lock(tp, 1); 11430 11431 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11432 err = tg3_init_hw(tp, true); 11433 11434 tg3_full_unlock(tp); 11435 11436 if (err) 11437 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11438 11439 return err; 11440 } 11441 11442 static int tg3_request_firmware(struct tg3 *tp) 11443 { 11444 const struct tg3_firmware_hdr *fw_hdr; 11445 11446 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11447 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11448 tp->fw_needed); 11449 return -ENOENT; 11450 } 11451 11452 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11453 11454 /* Firmware blob starts with version numbers, followed by 11455 * start address and _full_ length including BSS sections 11456 * (which must be longer than the actual data, of course 11457 */ 11458 11459 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11460 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11461 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11462 tp->fw_len, tp->fw_needed); 11463 release_firmware(tp->fw); 11464 tp->fw = NULL; 11465 return -EINVAL; 11466 } 11467 11468 /* We no longer need firmware; we have it. */ 11469 tp->fw_needed = NULL; 11470 return 0; 11471 } 11472 11473 static u32 tg3_irq_count(struct tg3 *tp) 11474 { 11475 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11476 11477 if (irq_cnt > 1) { 11478 /* We want as many rx rings enabled as there are cpus. 11479 * In multiqueue MSI-X mode, the first MSI-X vector 11480 * only deals with link interrupts, etc, so we add 11481 * one to the number of vectors we are requesting. 11482 */ 11483 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11484 } 11485 11486 return irq_cnt; 11487 } 11488 11489 static bool tg3_enable_msix(struct tg3 *tp) 11490 { 11491 int i, rc; 11492 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11493 11494 tp->txq_cnt = tp->txq_req; 11495 tp->rxq_cnt = tp->rxq_req; 11496 if (!tp->rxq_cnt) 11497 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11498 if (tp->rxq_cnt > tp->rxq_max) 11499 tp->rxq_cnt = tp->rxq_max; 11500 11501 /* Disable multiple TX rings by default. Simple round-robin hardware 11502 * scheduling of the TX rings can cause starvation of rings with 11503 * small packets when other rings have TSO or jumbo packets. 11504 */ 11505 if (!tp->txq_req) 11506 tp->txq_cnt = 1; 11507 11508 tp->irq_cnt = tg3_irq_count(tp); 11509 11510 for (i = 0; i < tp->irq_max; i++) { 11511 msix_ent[i].entry = i; 11512 msix_ent[i].vector = 0; 11513 } 11514 11515 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11516 if (rc < 0) { 11517 return false; 11518 } else if (rc < tp->irq_cnt) { 11519 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11520 tp->irq_cnt, rc); 11521 tp->irq_cnt = rc; 11522 tp->rxq_cnt = max(rc - 1, 1); 11523 if (tp->txq_cnt) 11524 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11525 } 11526 11527 for (i = 0; i < tp->irq_max; i++) 11528 tp->napi[i].irq_vec = msix_ent[i].vector; 11529 11530 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11531 pci_disable_msix(tp->pdev); 11532 return false; 11533 } 11534 11535 if (tp->irq_cnt == 1) 11536 return true; 11537 11538 tg3_flag_set(tp, ENABLE_RSS); 11539 11540 if (tp->txq_cnt > 1) 11541 tg3_flag_set(tp, ENABLE_TSS); 11542 11543 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11544 11545 return true; 11546 } 11547 11548 static void tg3_ints_init(struct tg3 *tp) 11549 { 11550 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11551 !tg3_flag(tp, TAGGED_STATUS)) { 11552 /* All MSI supporting chips should support tagged 11553 * status. Assert that this is the case. 11554 */ 11555 netdev_warn(tp->dev, 11556 "MSI without TAGGED_STATUS? Not using MSI\n"); 11557 goto defcfg; 11558 } 11559 11560 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11561 tg3_flag_set(tp, USING_MSIX); 11562 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11563 tg3_flag_set(tp, USING_MSI); 11564 11565 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11566 u32 msi_mode = tr32(MSGINT_MODE); 11567 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11568 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11569 if (!tg3_flag(tp, 1SHOT_MSI)) 11570 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11571 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11572 } 11573 defcfg: 11574 if (!tg3_flag(tp, USING_MSIX)) { 11575 tp->irq_cnt = 1; 11576 tp->napi[0].irq_vec = tp->pdev->irq; 11577 } 11578 11579 if (tp->irq_cnt == 1) { 11580 tp->txq_cnt = 1; 11581 tp->rxq_cnt = 1; 11582 netif_set_real_num_tx_queues(tp->dev, 1); 11583 netif_set_real_num_rx_queues(tp->dev, 1); 11584 } 11585 } 11586 11587 static void tg3_ints_fini(struct tg3 *tp) 11588 { 11589 if (tg3_flag(tp, USING_MSIX)) 11590 pci_disable_msix(tp->pdev); 11591 else if (tg3_flag(tp, USING_MSI)) 11592 pci_disable_msi(tp->pdev); 11593 tg3_flag_clear(tp, USING_MSI); 11594 tg3_flag_clear(tp, USING_MSIX); 11595 tg3_flag_clear(tp, ENABLE_RSS); 11596 tg3_flag_clear(tp, ENABLE_TSS); 11597 } 11598 11599 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11600 bool init) 11601 { 11602 struct net_device *dev = tp->dev; 11603 int i, err; 11604 11605 /* 11606 * Setup interrupts first so we know how 11607 * many NAPI resources to allocate 11608 */ 11609 tg3_ints_init(tp); 11610 11611 tg3_rss_check_indir_tbl(tp); 11612 11613 /* The placement of this call is tied 11614 * to the setup and use of Host TX descriptors. 11615 */ 11616 err = tg3_alloc_consistent(tp); 11617 if (err) 11618 goto out_ints_fini; 11619 11620 tg3_napi_init(tp); 11621 11622 tg3_napi_enable(tp); 11623 11624 for (i = 0; i < tp->irq_cnt; i++) { 11625 err = tg3_request_irq(tp, i); 11626 if (err) { 11627 for (i--; i >= 0; i--) { 11628 struct tg3_napi *tnapi = &tp->napi[i]; 11629 11630 free_irq(tnapi->irq_vec, tnapi); 11631 } 11632 goto out_napi_fini; 11633 } 11634 } 11635 11636 tg3_full_lock(tp, 0); 11637 11638 if (init) 11639 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11640 11641 err = tg3_init_hw(tp, reset_phy); 11642 if (err) { 11643 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11644 tg3_free_rings(tp); 11645 } 11646 11647 tg3_full_unlock(tp); 11648 11649 if (err) 11650 goto out_free_irq; 11651 11652 if (test_irq && tg3_flag(tp, USING_MSI)) { 11653 err = tg3_test_msi(tp); 11654 11655 if (err) { 11656 tg3_full_lock(tp, 0); 11657 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11658 tg3_free_rings(tp); 11659 tg3_full_unlock(tp); 11660 11661 goto out_napi_fini; 11662 } 11663 11664 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11665 u32 val = tr32(PCIE_TRANSACTION_CFG); 11666 11667 tw32(PCIE_TRANSACTION_CFG, 11668 val | PCIE_TRANS_CFG_1SHOT_MSI); 11669 } 11670 } 11671 11672 tg3_phy_start(tp); 11673 11674 tg3_hwmon_open(tp); 11675 11676 tg3_full_lock(tp, 0); 11677 11678 tg3_timer_start(tp); 11679 tg3_flag_set(tp, INIT_COMPLETE); 11680 tg3_enable_ints(tp); 11681 11682 tg3_ptp_resume(tp); 11683 11684 tg3_full_unlock(tp); 11685 11686 netif_tx_start_all_queues(dev); 11687 11688 /* 11689 * Reset loopback feature if it was turned on while the device was down 11690 * make sure that it's installed properly now. 11691 */ 11692 if (dev->features & NETIF_F_LOOPBACK) 11693 tg3_set_loopback(dev, dev->features); 11694 11695 return 0; 11696 11697 out_free_irq: 11698 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11699 struct tg3_napi *tnapi = &tp->napi[i]; 11700 free_irq(tnapi->irq_vec, tnapi); 11701 } 11702 11703 out_napi_fini: 11704 tg3_napi_disable(tp); 11705 tg3_napi_fini(tp); 11706 tg3_free_consistent(tp); 11707 11708 out_ints_fini: 11709 tg3_ints_fini(tp); 11710 11711 return err; 11712 } 11713 11714 static void tg3_stop(struct tg3 *tp) 11715 { 11716 int i; 11717 11718 tg3_reset_task_cancel(tp); 11719 tg3_netif_stop(tp); 11720 11721 tg3_timer_stop(tp); 11722 11723 tg3_hwmon_close(tp); 11724 11725 tg3_phy_stop(tp); 11726 11727 tg3_full_lock(tp, 1); 11728 11729 tg3_disable_ints(tp); 11730 11731 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11732 tg3_free_rings(tp); 11733 tg3_flag_clear(tp, INIT_COMPLETE); 11734 11735 tg3_full_unlock(tp); 11736 11737 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11738 struct tg3_napi *tnapi = &tp->napi[i]; 11739 free_irq(tnapi->irq_vec, tnapi); 11740 } 11741 11742 tg3_ints_fini(tp); 11743 11744 tg3_napi_fini(tp); 11745 11746 tg3_free_consistent(tp); 11747 } 11748 11749 static int tg3_open(struct net_device *dev) 11750 { 11751 struct tg3 *tp = netdev_priv(dev); 11752 int err; 11753 11754 if (tp->pcierr_recovery) { 11755 netdev_err(dev, "Failed to open device. PCI error recovery " 11756 "in progress\n"); 11757 return -EAGAIN; 11758 } 11759 11760 if (tp->fw_needed) { 11761 err = tg3_request_firmware(tp); 11762 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11763 if (err) { 11764 netdev_warn(tp->dev, "EEE capability disabled\n"); 11765 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11766 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11767 netdev_warn(tp->dev, "EEE capability restored\n"); 11768 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11769 } 11770 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11771 if (err) 11772 return err; 11773 } else if (err) { 11774 netdev_warn(tp->dev, "TSO capability disabled\n"); 11775 tg3_flag_clear(tp, TSO_CAPABLE); 11776 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11777 netdev_notice(tp->dev, "TSO capability restored\n"); 11778 tg3_flag_set(tp, TSO_CAPABLE); 11779 } 11780 } 11781 11782 tg3_carrier_off(tp); 11783 11784 err = tg3_power_up(tp); 11785 if (err) 11786 return err; 11787 11788 tg3_full_lock(tp, 0); 11789 11790 tg3_disable_ints(tp); 11791 tg3_flag_clear(tp, INIT_COMPLETE); 11792 11793 tg3_full_unlock(tp); 11794 11795 err = tg3_start(tp, 11796 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11797 true, true); 11798 if (err) { 11799 tg3_frob_aux_power(tp, false); 11800 pci_set_power_state(tp->pdev, PCI_D3hot); 11801 } 11802 11803 return err; 11804 } 11805 11806 static int tg3_close(struct net_device *dev) 11807 { 11808 struct tg3 *tp = netdev_priv(dev); 11809 11810 if (tp->pcierr_recovery) { 11811 netdev_err(dev, "Failed to close device. PCI error recovery " 11812 "in progress\n"); 11813 return -EAGAIN; 11814 } 11815 11816 tg3_stop(tp); 11817 11818 if (pci_device_is_present(tp->pdev)) { 11819 tg3_power_down_prepare(tp); 11820 11821 tg3_carrier_off(tp); 11822 } 11823 return 0; 11824 } 11825 11826 static inline u64 get_stat64(tg3_stat64_t *val) 11827 { 11828 return ((u64)val->high << 32) | ((u64)val->low); 11829 } 11830 11831 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11832 { 11833 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11834 11835 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11836 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11837 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11838 u32 val; 11839 11840 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11841 tg3_writephy(tp, MII_TG3_TEST1, 11842 val | MII_TG3_TEST1_CRC_EN); 11843 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11844 } else 11845 val = 0; 11846 11847 tp->phy_crc_errors += val; 11848 11849 return tp->phy_crc_errors; 11850 } 11851 11852 return get_stat64(&hw_stats->rx_fcs_errors); 11853 } 11854 11855 #define ESTAT_ADD(member) \ 11856 estats->member = old_estats->member + \ 11857 get_stat64(&hw_stats->member) 11858 11859 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11860 { 11861 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11862 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11863 11864 ESTAT_ADD(rx_octets); 11865 ESTAT_ADD(rx_fragments); 11866 ESTAT_ADD(rx_ucast_packets); 11867 ESTAT_ADD(rx_mcast_packets); 11868 ESTAT_ADD(rx_bcast_packets); 11869 ESTAT_ADD(rx_fcs_errors); 11870 ESTAT_ADD(rx_align_errors); 11871 ESTAT_ADD(rx_xon_pause_rcvd); 11872 ESTAT_ADD(rx_xoff_pause_rcvd); 11873 ESTAT_ADD(rx_mac_ctrl_rcvd); 11874 ESTAT_ADD(rx_xoff_entered); 11875 ESTAT_ADD(rx_frame_too_long_errors); 11876 ESTAT_ADD(rx_jabbers); 11877 ESTAT_ADD(rx_undersize_packets); 11878 ESTAT_ADD(rx_in_length_errors); 11879 ESTAT_ADD(rx_out_length_errors); 11880 ESTAT_ADD(rx_64_or_less_octet_packets); 11881 ESTAT_ADD(rx_65_to_127_octet_packets); 11882 ESTAT_ADD(rx_128_to_255_octet_packets); 11883 ESTAT_ADD(rx_256_to_511_octet_packets); 11884 ESTAT_ADD(rx_512_to_1023_octet_packets); 11885 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11886 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11887 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11888 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11889 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11890 11891 ESTAT_ADD(tx_octets); 11892 ESTAT_ADD(tx_collisions); 11893 ESTAT_ADD(tx_xon_sent); 11894 ESTAT_ADD(tx_xoff_sent); 11895 ESTAT_ADD(tx_flow_control); 11896 ESTAT_ADD(tx_mac_errors); 11897 ESTAT_ADD(tx_single_collisions); 11898 ESTAT_ADD(tx_mult_collisions); 11899 ESTAT_ADD(tx_deferred); 11900 ESTAT_ADD(tx_excessive_collisions); 11901 ESTAT_ADD(tx_late_collisions); 11902 ESTAT_ADD(tx_collide_2times); 11903 ESTAT_ADD(tx_collide_3times); 11904 ESTAT_ADD(tx_collide_4times); 11905 ESTAT_ADD(tx_collide_5times); 11906 ESTAT_ADD(tx_collide_6times); 11907 ESTAT_ADD(tx_collide_7times); 11908 ESTAT_ADD(tx_collide_8times); 11909 ESTAT_ADD(tx_collide_9times); 11910 ESTAT_ADD(tx_collide_10times); 11911 ESTAT_ADD(tx_collide_11times); 11912 ESTAT_ADD(tx_collide_12times); 11913 ESTAT_ADD(tx_collide_13times); 11914 ESTAT_ADD(tx_collide_14times); 11915 ESTAT_ADD(tx_collide_15times); 11916 ESTAT_ADD(tx_ucast_packets); 11917 ESTAT_ADD(tx_mcast_packets); 11918 ESTAT_ADD(tx_bcast_packets); 11919 ESTAT_ADD(tx_carrier_sense_errors); 11920 ESTAT_ADD(tx_discards); 11921 ESTAT_ADD(tx_errors); 11922 11923 ESTAT_ADD(dma_writeq_full); 11924 ESTAT_ADD(dma_write_prioq_full); 11925 ESTAT_ADD(rxbds_empty); 11926 ESTAT_ADD(rx_discards); 11927 ESTAT_ADD(rx_errors); 11928 ESTAT_ADD(rx_threshold_hit); 11929 11930 ESTAT_ADD(dma_readq_full); 11931 ESTAT_ADD(dma_read_prioq_full); 11932 ESTAT_ADD(tx_comp_queue_full); 11933 11934 ESTAT_ADD(ring_set_send_prod_index); 11935 ESTAT_ADD(ring_status_update); 11936 ESTAT_ADD(nic_irqs); 11937 ESTAT_ADD(nic_avoided_irqs); 11938 ESTAT_ADD(nic_tx_threshold_hit); 11939 11940 ESTAT_ADD(mbuf_lwm_thresh_hit); 11941 } 11942 11943 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11944 { 11945 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11946 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11947 11948 stats->rx_packets = old_stats->rx_packets + 11949 get_stat64(&hw_stats->rx_ucast_packets) + 11950 get_stat64(&hw_stats->rx_mcast_packets) + 11951 get_stat64(&hw_stats->rx_bcast_packets); 11952 11953 stats->tx_packets = old_stats->tx_packets + 11954 get_stat64(&hw_stats->tx_ucast_packets) + 11955 get_stat64(&hw_stats->tx_mcast_packets) + 11956 get_stat64(&hw_stats->tx_bcast_packets); 11957 11958 stats->rx_bytes = old_stats->rx_bytes + 11959 get_stat64(&hw_stats->rx_octets); 11960 stats->tx_bytes = old_stats->tx_bytes + 11961 get_stat64(&hw_stats->tx_octets); 11962 11963 stats->rx_errors = old_stats->rx_errors + 11964 get_stat64(&hw_stats->rx_errors); 11965 stats->tx_errors = old_stats->tx_errors + 11966 get_stat64(&hw_stats->tx_errors) + 11967 get_stat64(&hw_stats->tx_mac_errors) + 11968 get_stat64(&hw_stats->tx_carrier_sense_errors) + 11969 get_stat64(&hw_stats->tx_discards); 11970 11971 stats->multicast = old_stats->multicast + 11972 get_stat64(&hw_stats->rx_mcast_packets); 11973 stats->collisions = old_stats->collisions + 11974 get_stat64(&hw_stats->tx_collisions); 11975 11976 stats->rx_length_errors = old_stats->rx_length_errors + 11977 get_stat64(&hw_stats->rx_frame_too_long_errors) + 11978 get_stat64(&hw_stats->rx_undersize_packets); 11979 11980 stats->rx_frame_errors = old_stats->rx_frame_errors + 11981 get_stat64(&hw_stats->rx_align_errors); 11982 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 11983 get_stat64(&hw_stats->tx_discards); 11984 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 11985 get_stat64(&hw_stats->tx_carrier_sense_errors); 11986 11987 stats->rx_crc_errors = old_stats->rx_crc_errors + 11988 tg3_calc_crc_errors(tp); 11989 11990 stats->rx_missed_errors = old_stats->rx_missed_errors + 11991 get_stat64(&hw_stats->rx_discards); 11992 11993 stats->rx_dropped = tp->rx_dropped; 11994 stats->tx_dropped = tp->tx_dropped; 11995 } 11996 11997 static int tg3_get_regs_len(struct net_device *dev) 11998 { 11999 return TG3_REG_BLK_SIZE; 12000 } 12001 12002 static void tg3_get_regs(struct net_device *dev, 12003 struct ethtool_regs *regs, void *_p) 12004 { 12005 struct tg3 *tp = netdev_priv(dev); 12006 12007 regs->version = 0; 12008 12009 memset(_p, 0, TG3_REG_BLK_SIZE); 12010 12011 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 12012 return; 12013 12014 tg3_full_lock(tp, 0); 12015 12016 tg3_dump_legacy_regs(tp, (u32 *)_p); 12017 12018 tg3_full_unlock(tp); 12019 } 12020 12021 static int tg3_get_eeprom_len(struct net_device *dev) 12022 { 12023 struct tg3 *tp = netdev_priv(dev); 12024 12025 return tp->nvram_size; 12026 } 12027 12028 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12029 { 12030 struct tg3 *tp = netdev_priv(dev); 12031 int ret, cpmu_restore = 0; 12032 u8 *pd; 12033 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 12034 __be32 val; 12035 12036 if (tg3_flag(tp, NO_NVRAM)) 12037 return -EINVAL; 12038 12039 offset = eeprom->offset; 12040 len = eeprom->len; 12041 eeprom->len = 0; 12042 12043 eeprom->magic = TG3_EEPROM_MAGIC; 12044 12045 /* Override clock, link aware and link idle modes */ 12046 if (tg3_flag(tp, CPMU_PRESENT)) { 12047 cpmu_val = tr32(TG3_CPMU_CTRL); 12048 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 12049 CPMU_CTRL_LINK_IDLE_MODE)) { 12050 tw32(TG3_CPMU_CTRL, cpmu_val & 12051 ~(CPMU_CTRL_LINK_AWARE_MODE | 12052 CPMU_CTRL_LINK_IDLE_MODE)); 12053 cpmu_restore = 1; 12054 } 12055 } 12056 tg3_override_clk(tp); 12057 12058 if (offset & 3) { 12059 /* adjustments to start on required 4 byte boundary */ 12060 b_offset = offset & 3; 12061 b_count = 4 - b_offset; 12062 if (b_count > len) { 12063 /* i.e. offset=1 len=2 */ 12064 b_count = len; 12065 } 12066 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 12067 if (ret) 12068 goto eeprom_done; 12069 memcpy(data, ((char *)&val) + b_offset, b_count); 12070 len -= b_count; 12071 offset += b_count; 12072 eeprom->len += b_count; 12073 } 12074 12075 /* read bytes up to the last 4 byte boundary */ 12076 pd = &data[eeprom->len]; 12077 for (i = 0; i < (len - (len & 3)); i += 4) { 12078 ret = tg3_nvram_read_be32(tp, offset + i, &val); 12079 if (ret) { 12080 if (i) 12081 i -= 4; 12082 eeprom->len += i; 12083 goto eeprom_done; 12084 } 12085 memcpy(pd + i, &val, 4); 12086 if (need_resched()) { 12087 if (signal_pending(current)) { 12088 eeprom->len += i; 12089 ret = -EINTR; 12090 goto eeprom_done; 12091 } 12092 cond_resched(); 12093 } 12094 } 12095 eeprom->len += i; 12096 12097 if (len & 3) { 12098 /* read last bytes not ending on 4 byte boundary */ 12099 pd = &data[eeprom->len]; 12100 b_count = len & 3; 12101 b_offset = offset + len - b_count; 12102 ret = tg3_nvram_read_be32(tp, b_offset, &val); 12103 if (ret) 12104 goto eeprom_done; 12105 memcpy(pd, &val, b_count); 12106 eeprom->len += b_count; 12107 } 12108 ret = 0; 12109 12110 eeprom_done: 12111 /* Restore clock, link aware and link idle modes */ 12112 tg3_restore_clk(tp); 12113 if (cpmu_restore) 12114 tw32(TG3_CPMU_CTRL, cpmu_val); 12115 12116 return ret; 12117 } 12118 12119 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12120 { 12121 struct tg3 *tp = netdev_priv(dev); 12122 int ret; 12123 u32 offset, len, b_offset, odd_len; 12124 u8 *buf; 12125 __be32 start = 0, end; 12126 12127 if (tg3_flag(tp, NO_NVRAM) || 12128 eeprom->magic != TG3_EEPROM_MAGIC) 12129 return -EINVAL; 12130 12131 offset = eeprom->offset; 12132 len = eeprom->len; 12133 12134 if ((b_offset = (offset & 3))) { 12135 /* adjustments to start on required 4 byte boundary */ 12136 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12137 if (ret) 12138 return ret; 12139 len += b_offset; 12140 offset &= ~3; 12141 if (len < 4) 12142 len = 4; 12143 } 12144 12145 odd_len = 0; 12146 if (len & 3) { 12147 /* adjustments to end on required 4 byte boundary */ 12148 odd_len = 1; 12149 len = (len + 3) & ~3; 12150 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12151 if (ret) 12152 return ret; 12153 } 12154 12155 buf = data; 12156 if (b_offset || odd_len) { 12157 buf = kmalloc(len, GFP_KERNEL); 12158 if (!buf) 12159 return -ENOMEM; 12160 if (b_offset) 12161 memcpy(buf, &start, 4); 12162 if (odd_len) 12163 memcpy(buf+len-4, &end, 4); 12164 memcpy(buf + b_offset, data, eeprom->len); 12165 } 12166 12167 ret = tg3_nvram_write_block(tp, offset, len, buf); 12168 12169 if (buf != data) 12170 kfree(buf); 12171 12172 return ret; 12173 } 12174 12175 static int tg3_get_link_ksettings(struct net_device *dev, 12176 struct ethtool_link_ksettings *cmd) 12177 { 12178 struct tg3 *tp = netdev_priv(dev); 12179 u32 supported, advertising; 12180 12181 if (tg3_flag(tp, USE_PHYLIB)) { 12182 struct phy_device *phydev; 12183 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12184 return -EAGAIN; 12185 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12186 phy_ethtool_ksettings_get(phydev, cmd); 12187 12188 return 0; 12189 } 12190 12191 supported = (SUPPORTED_Autoneg); 12192 12193 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12194 supported |= (SUPPORTED_1000baseT_Half | 12195 SUPPORTED_1000baseT_Full); 12196 12197 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12198 supported |= (SUPPORTED_100baseT_Half | 12199 SUPPORTED_100baseT_Full | 12200 SUPPORTED_10baseT_Half | 12201 SUPPORTED_10baseT_Full | 12202 SUPPORTED_TP); 12203 cmd->base.port = PORT_TP; 12204 } else { 12205 supported |= SUPPORTED_FIBRE; 12206 cmd->base.port = PORT_FIBRE; 12207 } 12208 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 12209 supported); 12210 12211 advertising = tp->link_config.advertising; 12212 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12213 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12214 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12215 advertising |= ADVERTISED_Pause; 12216 } else { 12217 advertising |= ADVERTISED_Pause | 12218 ADVERTISED_Asym_Pause; 12219 } 12220 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12221 advertising |= ADVERTISED_Asym_Pause; 12222 } 12223 } 12224 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 12225 advertising); 12226 12227 if (netif_running(dev) && tp->link_up) { 12228 cmd->base.speed = tp->link_config.active_speed; 12229 cmd->base.duplex = tp->link_config.active_duplex; 12230 ethtool_convert_legacy_u32_to_link_mode( 12231 cmd->link_modes.lp_advertising, 12232 tp->link_config.rmt_adv); 12233 12234 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12235 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12236 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; 12237 else 12238 cmd->base.eth_tp_mdix = ETH_TP_MDI; 12239 } 12240 } else { 12241 cmd->base.speed = SPEED_UNKNOWN; 12242 cmd->base.duplex = DUPLEX_UNKNOWN; 12243 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 12244 } 12245 cmd->base.phy_address = tp->phy_addr; 12246 cmd->base.autoneg = tp->link_config.autoneg; 12247 return 0; 12248 } 12249 12250 static int tg3_set_link_ksettings(struct net_device *dev, 12251 const struct ethtool_link_ksettings *cmd) 12252 { 12253 struct tg3 *tp = netdev_priv(dev); 12254 u32 speed = cmd->base.speed; 12255 u32 advertising; 12256 12257 if (tg3_flag(tp, USE_PHYLIB)) { 12258 struct phy_device *phydev; 12259 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12260 return -EAGAIN; 12261 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12262 return phy_ethtool_ksettings_set(phydev, cmd); 12263 } 12264 12265 if (cmd->base.autoneg != AUTONEG_ENABLE && 12266 cmd->base.autoneg != AUTONEG_DISABLE) 12267 return -EINVAL; 12268 12269 if (cmd->base.autoneg == AUTONEG_DISABLE && 12270 cmd->base.duplex != DUPLEX_FULL && 12271 cmd->base.duplex != DUPLEX_HALF) 12272 return -EINVAL; 12273 12274 ethtool_convert_link_mode_to_legacy_u32(&advertising, 12275 cmd->link_modes.advertising); 12276 12277 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12278 u32 mask = ADVERTISED_Autoneg | 12279 ADVERTISED_Pause | 12280 ADVERTISED_Asym_Pause; 12281 12282 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12283 mask |= ADVERTISED_1000baseT_Half | 12284 ADVERTISED_1000baseT_Full; 12285 12286 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12287 mask |= ADVERTISED_100baseT_Half | 12288 ADVERTISED_100baseT_Full | 12289 ADVERTISED_10baseT_Half | 12290 ADVERTISED_10baseT_Full | 12291 ADVERTISED_TP; 12292 else 12293 mask |= ADVERTISED_FIBRE; 12294 12295 if (advertising & ~mask) 12296 return -EINVAL; 12297 12298 mask &= (ADVERTISED_1000baseT_Half | 12299 ADVERTISED_1000baseT_Full | 12300 ADVERTISED_100baseT_Half | 12301 ADVERTISED_100baseT_Full | 12302 ADVERTISED_10baseT_Half | 12303 ADVERTISED_10baseT_Full); 12304 12305 advertising &= mask; 12306 } else { 12307 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12308 if (speed != SPEED_1000) 12309 return -EINVAL; 12310 12311 if (cmd->base.duplex != DUPLEX_FULL) 12312 return -EINVAL; 12313 } else { 12314 if (speed != SPEED_100 && 12315 speed != SPEED_10) 12316 return -EINVAL; 12317 } 12318 } 12319 12320 tg3_full_lock(tp, 0); 12321 12322 tp->link_config.autoneg = cmd->base.autoneg; 12323 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12324 tp->link_config.advertising = (advertising | 12325 ADVERTISED_Autoneg); 12326 tp->link_config.speed = SPEED_UNKNOWN; 12327 tp->link_config.duplex = DUPLEX_UNKNOWN; 12328 } else { 12329 tp->link_config.advertising = 0; 12330 tp->link_config.speed = speed; 12331 tp->link_config.duplex = cmd->base.duplex; 12332 } 12333 12334 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12335 12336 tg3_warn_mgmt_link_flap(tp); 12337 12338 if (netif_running(dev)) 12339 tg3_setup_phy(tp, true); 12340 12341 tg3_full_unlock(tp); 12342 12343 return 0; 12344 } 12345 12346 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12347 { 12348 struct tg3 *tp = netdev_priv(dev); 12349 12350 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12351 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12352 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12353 } 12354 12355 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12356 { 12357 struct tg3 *tp = netdev_priv(dev); 12358 12359 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12360 wol->supported = WAKE_MAGIC; 12361 else 12362 wol->supported = 0; 12363 wol->wolopts = 0; 12364 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12365 wol->wolopts = WAKE_MAGIC; 12366 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12367 } 12368 12369 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12370 { 12371 struct tg3 *tp = netdev_priv(dev); 12372 struct device *dp = &tp->pdev->dev; 12373 12374 if (wol->wolopts & ~WAKE_MAGIC) 12375 return -EINVAL; 12376 if ((wol->wolopts & WAKE_MAGIC) && 12377 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12378 return -EINVAL; 12379 12380 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12381 12382 if (device_may_wakeup(dp)) 12383 tg3_flag_set(tp, WOL_ENABLE); 12384 else 12385 tg3_flag_clear(tp, WOL_ENABLE); 12386 12387 return 0; 12388 } 12389 12390 static u32 tg3_get_msglevel(struct net_device *dev) 12391 { 12392 struct tg3 *tp = netdev_priv(dev); 12393 return tp->msg_enable; 12394 } 12395 12396 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12397 { 12398 struct tg3 *tp = netdev_priv(dev); 12399 tp->msg_enable = value; 12400 } 12401 12402 static int tg3_nway_reset(struct net_device *dev) 12403 { 12404 struct tg3 *tp = netdev_priv(dev); 12405 int r; 12406 12407 if (!netif_running(dev)) 12408 return -EAGAIN; 12409 12410 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12411 return -EINVAL; 12412 12413 tg3_warn_mgmt_link_flap(tp); 12414 12415 if (tg3_flag(tp, USE_PHYLIB)) { 12416 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12417 return -EAGAIN; 12418 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12419 } else { 12420 u32 bmcr; 12421 12422 spin_lock_bh(&tp->lock); 12423 r = -EINVAL; 12424 tg3_readphy(tp, MII_BMCR, &bmcr); 12425 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12426 ((bmcr & BMCR_ANENABLE) || 12427 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12428 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12429 BMCR_ANENABLE); 12430 r = 0; 12431 } 12432 spin_unlock_bh(&tp->lock); 12433 } 12434 12435 return r; 12436 } 12437 12438 static void tg3_get_ringparam(struct net_device *dev, 12439 struct ethtool_ringparam *ering, 12440 struct kernel_ethtool_ringparam *kernel_ering, 12441 struct netlink_ext_ack *extack) 12442 { 12443 struct tg3 *tp = netdev_priv(dev); 12444 12445 ering->rx_max_pending = tp->rx_std_ring_mask; 12446 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12447 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12448 else 12449 ering->rx_jumbo_max_pending = 0; 12450 12451 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12452 12453 ering->rx_pending = tp->rx_pending; 12454 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12455 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12456 else 12457 ering->rx_jumbo_pending = 0; 12458 12459 ering->tx_pending = tp->napi[0].tx_pending; 12460 } 12461 12462 static int tg3_set_ringparam(struct net_device *dev, 12463 struct ethtool_ringparam *ering, 12464 struct kernel_ethtool_ringparam *kernel_ering, 12465 struct netlink_ext_ack *extack) 12466 { 12467 struct tg3 *tp = netdev_priv(dev); 12468 int i, irq_sync = 0, err = 0; 12469 bool reset_phy = false; 12470 12471 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12472 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12473 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12474 (ering->tx_pending <= MAX_SKB_FRAGS) || 12475 (tg3_flag(tp, TSO_BUG) && 12476 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12477 return -EINVAL; 12478 12479 if (netif_running(dev)) { 12480 tg3_phy_stop(tp); 12481 tg3_netif_stop(tp); 12482 irq_sync = 1; 12483 } 12484 12485 tg3_full_lock(tp, irq_sync); 12486 12487 tp->rx_pending = ering->rx_pending; 12488 12489 if (tg3_flag(tp, MAX_RXPEND_64) && 12490 tp->rx_pending > 63) 12491 tp->rx_pending = 63; 12492 12493 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12494 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12495 12496 for (i = 0; i < tp->irq_max; i++) 12497 tp->napi[i].tx_pending = ering->tx_pending; 12498 12499 if (netif_running(dev)) { 12500 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12501 /* Reset PHY to avoid PHY lock up */ 12502 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12503 tg3_asic_rev(tp) == ASIC_REV_5719 || 12504 tg3_asic_rev(tp) == ASIC_REV_5720) 12505 reset_phy = true; 12506 12507 err = tg3_restart_hw(tp, reset_phy); 12508 if (!err) 12509 tg3_netif_start(tp); 12510 } 12511 12512 tg3_full_unlock(tp); 12513 12514 if (irq_sync && !err) 12515 tg3_phy_start(tp); 12516 12517 return err; 12518 } 12519 12520 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12521 { 12522 struct tg3 *tp = netdev_priv(dev); 12523 12524 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12525 12526 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12527 epause->rx_pause = 1; 12528 else 12529 epause->rx_pause = 0; 12530 12531 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12532 epause->tx_pause = 1; 12533 else 12534 epause->tx_pause = 0; 12535 } 12536 12537 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12538 { 12539 struct tg3 *tp = netdev_priv(dev); 12540 int err = 0; 12541 bool reset_phy = false; 12542 12543 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12544 tg3_warn_mgmt_link_flap(tp); 12545 12546 if (tg3_flag(tp, USE_PHYLIB)) { 12547 struct phy_device *phydev; 12548 12549 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12550 12551 if (!phy_validate_pause(phydev, epause)) 12552 return -EINVAL; 12553 12554 tp->link_config.flowctrl = 0; 12555 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 12556 if (epause->rx_pause) { 12557 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12558 12559 if (epause->tx_pause) { 12560 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12561 } 12562 } else if (epause->tx_pause) { 12563 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12564 } 12565 12566 if (epause->autoneg) 12567 tg3_flag_set(tp, PAUSE_AUTONEG); 12568 else 12569 tg3_flag_clear(tp, PAUSE_AUTONEG); 12570 12571 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12572 if (phydev->autoneg) { 12573 /* phy_set_asym_pause() will 12574 * renegotiate the link to inform our 12575 * link partner of our flow control 12576 * settings, even if the flow control 12577 * is forced. Let tg3_adjust_link() 12578 * do the final flow control setup. 12579 */ 12580 return 0; 12581 } 12582 12583 if (!epause->autoneg) 12584 tg3_setup_flow_control(tp, 0, 0); 12585 } 12586 } else { 12587 int irq_sync = 0; 12588 12589 if (netif_running(dev)) { 12590 tg3_netif_stop(tp); 12591 irq_sync = 1; 12592 } 12593 12594 tg3_full_lock(tp, irq_sync); 12595 12596 if (epause->autoneg) 12597 tg3_flag_set(tp, PAUSE_AUTONEG); 12598 else 12599 tg3_flag_clear(tp, PAUSE_AUTONEG); 12600 if (epause->rx_pause) 12601 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12602 else 12603 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12604 if (epause->tx_pause) 12605 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12606 else 12607 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12608 12609 if (netif_running(dev)) { 12610 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12611 /* Reset PHY to avoid PHY lock up */ 12612 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12613 tg3_asic_rev(tp) == ASIC_REV_5719 || 12614 tg3_asic_rev(tp) == ASIC_REV_5720) 12615 reset_phy = true; 12616 12617 err = tg3_restart_hw(tp, reset_phy); 12618 if (!err) 12619 tg3_netif_start(tp); 12620 } 12621 12622 tg3_full_unlock(tp); 12623 } 12624 12625 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12626 12627 return err; 12628 } 12629 12630 static int tg3_get_sset_count(struct net_device *dev, int sset) 12631 { 12632 switch (sset) { 12633 case ETH_SS_TEST: 12634 return TG3_NUM_TEST; 12635 case ETH_SS_STATS: 12636 return TG3_NUM_STATS; 12637 default: 12638 return -EOPNOTSUPP; 12639 } 12640 } 12641 12642 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12643 u32 *rules __always_unused) 12644 { 12645 struct tg3 *tp = netdev_priv(dev); 12646 12647 if (!tg3_flag(tp, SUPPORT_MSIX)) 12648 return -EOPNOTSUPP; 12649 12650 switch (info->cmd) { 12651 case ETHTOOL_GRXRINGS: 12652 if (netif_running(tp->dev)) 12653 info->data = tp->rxq_cnt; 12654 else { 12655 info->data = num_online_cpus(); 12656 if (info->data > TG3_RSS_MAX_NUM_QS) 12657 info->data = TG3_RSS_MAX_NUM_QS; 12658 } 12659 12660 return 0; 12661 12662 default: 12663 return -EOPNOTSUPP; 12664 } 12665 } 12666 12667 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12668 { 12669 u32 size = 0; 12670 struct tg3 *tp = netdev_priv(dev); 12671 12672 if (tg3_flag(tp, SUPPORT_MSIX)) 12673 size = TG3_RSS_INDIR_TBL_SIZE; 12674 12675 return size; 12676 } 12677 12678 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 12679 { 12680 struct tg3 *tp = netdev_priv(dev); 12681 int i; 12682 12683 if (hfunc) 12684 *hfunc = ETH_RSS_HASH_TOP; 12685 if (!indir) 12686 return 0; 12687 12688 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12689 indir[i] = tp->rss_ind_tbl[i]; 12690 12691 return 0; 12692 } 12693 12694 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 12695 const u8 hfunc) 12696 { 12697 struct tg3 *tp = netdev_priv(dev); 12698 size_t i; 12699 12700 /* We require at least one supported parameter to be changed and no 12701 * change in any of the unsupported parameters 12702 */ 12703 if (key || 12704 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 12705 return -EOPNOTSUPP; 12706 12707 if (!indir) 12708 return 0; 12709 12710 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12711 tp->rss_ind_tbl[i] = indir[i]; 12712 12713 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12714 return 0; 12715 12716 /* It is legal to write the indirection 12717 * table while the device is running. 12718 */ 12719 tg3_full_lock(tp, 0); 12720 tg3_rss_write_indir_tbl(tp); 12721 tg3_full_unlock(tp); 12722 12723 return 0; 12724 } 12725 12726 static void tg3_get_channels(struct net_device *dev, 12727 struct ethtool_channels *channel) 12728 { 12729 struct tg3 *tp = netdev_priv(dev); 12730 u32 deflt_qs = netif_get_num_default_rss_queues(); 12731 12732 channel->max_rx = tp->rxq_max; 12733 channel->max_tx = tp->txq_max; 12734 12735 if (netif_running(dev)) { 12736 channel->rx_count = tp->rxq_cnt; 12737 channel->tx_count = tp->txq_cnt; 12738 } else { 12739 if (tp->rxq_req) 12740 channel->rx_count = tp->rxq_req; 12741 else 12742 channel->rx_count = min(deflt_qs, tp->rxq_max); 12743 12744 if (tp->txq_req) 12745 channel->tx_count = tp->txq_req; 12746 else 12747 channel->tx_count = min(deflt_qs, tp->txq_max); 12748 } 12749 } 12750 12751 static int tg3_set_channels(struct net_device *dev, 12752 struct ethtool_channels *channel) 12753 { 12754 struct tg3 *tp = netdev_priv(dev); 12755 12756 if (!tg3_flag(tp, SUPPORT_MSIX)) 12757 return -EOPNOTSUPP; 12758 12759 if (channel->rx_count > tp->rxq_max || 12760 channel->tx_count > tp->txq_max) 12761 return -EINVAL; 12762 12763 tp->rxq_req = channel->rx_count; 12764 tp->txq_req = channel->tx_count; 12765 12766 if (!netif_running(dev)) 12767 return 0; 12768 12769 tg3_stop(tp); 12770 12771 tg3_carrier_off(tp); 12772 12773 tg3_start(tp, true, false, false); 12774 12775 return 0; 12776 } 12777 12778 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12779 { 12780 switch (stringset) { 12781 case ETH_SS_STATS: 12782 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12783 break; 12784 case ETH_SS_TEST: 12785 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12786 break; 12787 default: 12788 WARN_ON(1); /* we need a WARN() */ 12789 break; 12790 } 12791 } 12792 12793 static int tg3_set_phys_id(struct net_device *dev, 12794 enum ethtool_phys_id_state state) 12795 { 12796 struct tg3 *tp = netdev_priv(dev); 12797 12798 switch (state) { 12799 case ETHTOOL_ID_ACTIVE: 12800 return 1; /* cycle on/off once per second */ 12801 12802 case ETHTOOL_ID_ON: 12803 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12804 LED_CTRL_1000MBPS_ON | 12805 LED_CTRL_100MBPS_ON | 12806 LED_CTRL_10MBPS_ON | 12807 LED_CTRL_TRAFFIC_OVERRIDE | 12808 LED_CTRL_TRAFFIC_BLINK | 12809 LED_CTRL_TRAFFIC_LED); 12810 break; 12811 12812 case ETHTOOL_ID_OFF: 12813 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12814 LED_CTRL_TRAFFIC_OVERRIDE); 12815 break; 12816 12817 case ETHTOOL_ID_INACTIVE: 12818 tw32(MAC_LED_CTRL, tp->led_ctrl); 12819 break; 12820 } 12821 12822 return 0; 12823 } 12824 12825 static void tg3_get_ethtool_stats(struct net_device *dev, 12826 struct ethtool_stats *estats, u64 *tmp_stats) 12827 { 12828 struct tg3 *tp = netdev_priv(dev); 12829 12830 if (tp->hw_stats) 12831 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12832 else 12833 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12834 } 12835 12836 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen) 12837 { 12838 int i; 12839 __be32 *buf; 12840 u32 offset = 0, len = 0; 12841 u32 magic, val; 12842 12843 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12844 return NULL; 12845 12846 if (magic == TG3_EEPROM_MAGIC) { 12847 for (offset = TG3_NVM_DIR_START; 12848 offset < TG3_NVM_DIR_END; 12849 offset += TG3_NVM_DIRENT_SIZE) { 12850 if (tg3_nvram_read(tp, offset, &val)) 12851 return NULL; 12852 12853 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12854 TG3_NVM_DIRTYPE_EXTVPD) 12855 break; 12856 } 12857 12858 if (offset != TG3_NVM_DIR_END) { 12859 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12860 if (tg3_nvram_read(tp, offset + 4, &offset)) 12861 return NULL; 12862 12863 offset = tg3_nvram_logical_addr(tp, offset); 12864 } 12865 12866 if (!offset || !len) { 12867 offset = TG3_NVM_VPD_OFF; 12868 len = TG3_NVM_VPD_LEN; 12869 } 12870 12871 buf = kmalloc(len, GFP_KERNEL); 12872 if (!buf) 12873 return NULL; 12874 12875 for (i = 0; i < len; i += 4) { 12876 /* The data is in little-endian format in NVRAM. 12877 * Use the big-endian read routines to preserve 12878 * the byte order as it exists in NVRAM. 12879 */ 12880 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12881 goto error; 12882 } 12883 *vpdlen = len; 12884 } else { 12885 buf = pci_vpd_alloc(tp->pdev, vpdlen); 12886 if (IS_ERR(buf)) 12887 return NULL; 12888 } 12889 12890 return buf; 12891 12892 error: 12893 kfree(buf); 12894 return NULL; 12895 } 12896 12897 #define NVRAM_TEST_SIZE 0x100 12898 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12899 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12900 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12901 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12902 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12903 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12904 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12905 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12906 12907 static int tg3_test_nvram(struct tg3 *tp) 12908 { 12909 u32 csum, magic; 12910 __be32 *buf; 12911 int i, j, k, err = 0, size; 12912 unsigned int len; 12913 12914 if (tg3_flag(tp, NO_NVRAM)) 12915 return 0; 12916 12917 if (tg3_nvram_read(tp, 0, &magic) != 0) 12918 return -EIO; 12919 12920 if (magic == TG3_EEPROM_MAGIC) 12921 size = NVRAM_TEST_SIZE; 12922 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12923 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12924 TG3_EEPROM_SB_FORMAT_1) { 12925 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12926 case TG3_EEPROM_SB_REVISION_0: 12927 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12928 break; 12929 case TG3_EEPROM_SB_REVISION_2: 12930 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12931 break; 12932 case TG3_EEPROM_SB_REVISION_3: 12933 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12934 break; 12935 case TG3_EEPROM_SB_REVISION_4: 12936 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12937 break; 12938 case TG3_EEPROM_SB_REVISION_5: 12939 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 12940 break; 12941 case TG3_EEPROM_SB_REVISION_6: 12942 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 12943 break; 12944 default: 12945 return -EIO; 12946 } 12947 } else 12948 return 0; 12949 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12950 size = NVRAM_SELFBOOT_HW_SIZE; 12951 else 12952 return -EIO; 12953 12954 buf = kmalloc(size, GFP_KERNEL); 12955 if (buf == NULL) 12956 return -ENOMEM; 12957 12958 err = -EIO; 12959 for (i = 0, j = 0; i < size; i += 4, j++) { 12960 err = tg3_nvram_read_be32(tp, i, &buf[j]); 12961 if (err) 12962 break; 12963 } 12964 if (i < size) 12965 goto out; 12966 12967 /* Selfboot format */ 12968 magic = be32_to_cpu(buf[0]); 12969 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 12970 TG3_EEPROM_MAGIC_FW) { 12971 u8 *buf8 = (u8 *) buf, csum8 = 0; 12972 12973 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 12974 TG3_EEPROM_SB_REVISION_2) { 12975 /* For rev 2, the csum doesn't include the MBA. */ 12976 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 12977 csum8 += buf8[i]; 12978 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 12979 csum8 += buf8[i]; 12980 } else { 12981 for (i = 0; i < size; i++) 12982 csum8 += buf8[i]; 12983 } 12984 12985 if (csum8 == 0) { 12986 err = 0; 12987 goto out; 12988 } 12989 12990 err = -EIO; 12991 goto out; 12992 } 12993 12994 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 12995 TG3_EEPROM_MAGIC_HW) { 12996 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 12997 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 12998 u8 *buf8 = (u8 *) buf; 12999 13000 /* Separate the parity bits and the data bytes. */ 13001 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 13002 if ((i == 0) || (i == 8)) { 13003 int l; 13004 u8 msk; 13005 13006 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 13007 parity[k++] = buf8[i] & msk; 13008 i++; 13009 } else if (i == 16) { 13010 int l; 13011 u8 msk; 13012 13013 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 13014 parity[k++] = buf8[i] & msk; 13015 i++; 13016 13017 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 13018 parity[k++] = buf8[i] & msk; 13019 i++; 13020 } 13021 data[j++] = buf8[i]; 13022 } 13023 13024 err = -EIO; 13025 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 13026 u8 hw8 = hweight8(data[i]); 13027 13028 if ((hw8 & 0x1) && parity[i]) 13029 goto out; 13030 else if (!(hw8 & 0x1) && !parity[i]) 13031 goto out; 13032 } 13033 err = 0; 13034 goto out; 13035 } 13036 13037 err = -EIO; 13038 13039 /* Bootstrap checksum at offset 0x10 */ 13040 csum = calc_crc((unsigned char *) buf, 0x10); 13041 if (csum != le32_to_cpu(buf[0x10/4])) 13042 goto out; 13043 13044 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 13045 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 13046 if (csum != le32_to_cpu(buf[0xfc/4])) 13047 goto out; 13048 13049 kfree(buf); 13050 13051 buf = tg3_vpd_readblock(tp, &len); 13052 if (!buf) 13053 return -ENOMEM; 13054 13055 err = pci_vpd_check_csum(buf, len); 13056 /* go on if no checksum found */ 13057 if (err == 1) 13058 err = 0; 13059 out: 13060 kfree(buf); 13061 return err; 13062 } 13063 13064 #define TG3_SERDES_TIMEOUT_SEC 2 13065 #define TG3_COPPER_TIMEOUT_SEC 6 13066 13067 static int tg3_test_link(struct tg3 *tp) 13068 { 13069 int i, max; 13070 13071 if (!netif_running(tp->dev)) 13072 return -ENODEV; 13073 13074 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13075 max = TG3_SERDES_TIMEOUT_SEC; 13076 else 13077 max = TG3_COPPER_TIMEOUT_SEC; 13078 13079 for (i = 0; i < max; i++) { 13080 if (tp->link_up) 13081 return 0; 13082 13083 if (msleep_interruptible(1000)) 13084 break; 13085 } 13086 13087 return -EIO; 13088 } 13089 13090 /* Only test the commonly used registers */ 13091 static int tg3_test_registers(struct tg3 *tp) 13092 { 13093 int i, is_5705, is_5750; 13094 u32 offset, read_mask, write_mask, val, save_val, read_val; 13095 static struct { 13096 u16 offset; 13097 u16 flags; 13098 #define TG3_FL_5705 0x1 13099 #define TG3_FL_NOT_5705 0x2 13100 #define TG3_FL_NOT_5788 0x4 13101 #define TG3_FL_NOT_5750 0x8 13102 u32 read_mask; 13103 u32 write_mask; 13104 } reg_tbl[] = { 13105 /* MAC Control Registers */ 13106 { MAC_MODE, TG3_FL_NOT_5705, 13107 0x00000000, 0x00ef6f8c }, 13108 { MAC_MODE, TG3_FL_5705, 13109 0x00000000, 0x01ef6b8c }, 13110 { MAC_STATUS, TG3_FL_NOT_5705, 13111 0x03800107, 0x00000000 }, 13112 { MAC_STATUS, TG3_FL_5705, 13113 0x03800100, 0x00000000 }, 13114 { MAC_ADDR_0_HIGH, 0x0000, 13115 0x00000000, 0x0000ffff }, 13116 { MAC_ADDR_0_LOW, 0x0000, 13117 0x00000000, 0xffffffff }, 13118 { MAC_RX_MTU_SIZE, 0x0000, 13119 0x00000000, 0x0000ffff }, 13120 { MAC_TX_MODE, 0x0000, 13121 0x00000000, 0x00000070 }, 13122 { MAC_TX_LENGTHS, 0x0000, 13123 0x00000000, 0x00003fff }, 13124 { MAC_RX_MODE, TG3_FL_NOT_5705, 13125 0x00000000, 0x000007fc }, 13126 { MAC_RX_MODE, TG3_FL_5705, 13127 0x00000000, 0x000007dc }, 13128 { MAC_HASH_REG_0, 0x0000, 13129 0x00000000, 0xffffffff }, 13130 { MAC_HASH_REG_1, 0x0000, 13131 0x00000000, 0xffffffff }, 13132 { MAC_HASH_REG_2, 0x0000, 13133 0x00000000, 0xffffffff }, 13134 { MAC_HASH_REG_3, 0x0000, 13135 0x00000000, 0xffffffff }, 13136 13137 /* Receive Data and Receive BD Initiator Control Registers. */ 13138 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13139 0x00000000, 0xffffffff }, 13140 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13141 0x00000000, 0xffffffff }, 13142 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13143 0x00000000, 0x00000003 }, 13144 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13145 0x00000000, 0xffffffff }, 13146 { RCVDBDI_STD_BD+0, 0x0000, 13147 0x00000000, 0xffffffff }, 13148 { RCVDBDI_STD_BD+4, 0x0000, 13149 0x00000000, 0xffffffff }, 13150 { RCVDBDI_STD_BD+8, 0x0000, 13151 0x00000000, 0xffff0002 }, 13152 { RCVDBDI_STD_BD+0xc, 0x0000, 13153 0x00000000, 0xffffffff }, 13154 13155 /* Receive BD Initiator Control Registers. */ 13156 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13157 0x00000000, 0xffffffff }, 13158 { RCVBDI_STD_THRESH, TG3_FL_5705, 13159 0x00000000, 0x000003ff }, 13160 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13161 0x00000000, 0xffffffff }, 13162 13163 /* Host Coalescing Control Registers. */ 13164 { HOSTCC_MODE, TG3_FL_NOT_5705, 13165 0x00000000, 0x00000004 }, 13166 { HOSTCC_MODE, TG3_FL_5705, 13167 0x00000000, 0x000000f6 }, 13168 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13169 0x00000000, 0xffffffff }, 13170 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13171 0x00000000, 0x000003ff }, 13172 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13173 0x00000000, 0xffffffff }, 13174 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13175 0x00000000, 0x000003ff }, 13176 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13177 0x00000000, 0xffffffff }, 13178 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13179 0x00000000, 0x000000ff }, 13180 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13181 0x00000000, 0xffffffff }, 13182 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13183 0x00000000, 0x000000ff }, 13184 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13185 0x00000000, 0xffffffff }, 13186 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13187 0x00000000, 0xffffffff }, 13188 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13189 0x00000000, 0xffffffff }, 13190 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13191 0x00000000, 0x000000ff }, 13192 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13193 0x00000000, 0xffffffff }, 13194 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13195 0x00000000, 0x000000ff }, 13196 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13197 0x00000000, 0xffffffff }, 13198 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13199 0x00000000, 0xffffffff }, 13200 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13201 0x00000000, 0xffffffff }, 13202 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13203 0x00000000, 0xffffffff }, 13204 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13205 0x00000000, 0xffffffff }, 13206 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13207 0xffffffff, 0x00000000 }, 13208 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13209 0xffffffff, 0x00000000 }, 13210 13211 /* Buffer Manager Control Registers. */ 13212 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13213 0x00000000, 0x007fff80 }, 13214 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13215 0x00000000, 0x007fffff }, 13216 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13217 0x00000000, 0x0000003f }, 13218 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13219 0x00000000, 0x000001ff }, 13220 { BUFMGR_MB_HIGH_WATER, 0x0000, 13221 0x00000000, 0x000001ff }, 13222 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13223 0xffffffff, 0x00000000 }, 13224 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13225 0xffffffff, 0x00000000 }, 13226 13227 /* Mailbox Registers */ 13228 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13229 0x00000000, 0x000001ff }, 13230 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13231 0x00000000, 0x000001ff }, 13232 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13233 0x00000000, 0x000007ff }, 13234 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13235 0x00000000, 0x000001ff }, 13236 13237 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13238 }; 13239 13240 is_5705 = is_5750 = 0; 13241 if (tg3_flag(tp, 5705_PLUS)) { 13242 is_5705 = 1; 13243 if (tg3_flag(tp, 5750_PLUS)) 13244 is_5750 = 1; 13245 } 13246 13247 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13248 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13249 continue; 13250 13251 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13252 continue; 13253 13254 if (tg3_flag(tp, IS_5788) && 13255 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13256 continue; 13257 13258 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13259 continue; 13260 13261 offset = (u32) reg_tbl[i].offset; 13262 read_mask = reg_tbl[i].read_mask; 13263 write_mask = reg_tbl[i].write_mask; 13264 13265 /* Save the original register content */ 13266 save_val = tr32(offset); 13267 13268 /* Determine the read-only value. */ 13269 read_val = save_val & read_mask; 13270 13271 /* Write zero to the register, then make sure the read-only bits 13272 * are not changed and the read/write bits are all zeros. 13273 */ 13274 tw32(offset, 0); 13275 13276 val = tr32(offset); 13277 13278 /* Test the read-only and read/write bits. */ 13279 if (((val & read_mask) != read_val) || (val & write_mask)) 13280 goto out; 13281 13282 /* Write ones to all the bits defined by RdMask and WrMask, then 13283 * make sure the read-only bits are not changed and the 13284 * read/write bits are all ones. 13285 */ 13286 tw32(offset, read_mask | write_mask); 13287 13288 val = tr32(offset); 13289 13290 /* Test the read-only bits. */ 13291 if ((val & read_mask) != read_val) 13292 goto out; 13293 13294 /* Test the read/write bits. */ 13295 if ((val & write_mask) != write_mask) 13296 goto out; 13297 13298 tw32(offset, save_val); 13299 } 13300 13301 return 0; 13302 13303 out: 13304 if (netif_msg_hw(tp)) 13305 netdev_err(tp->dev, 13306 "Register test failed at offset %x\n", offset); 13307 tw32(offset, save_val); 13308 return -EIO; 13309 } 13310 13311 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13312 { 13313 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13314 int i; 13315 u32 j; 13316 13317 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13318 for (j = 0; j < len; j += 4) { 13319 u32 val; 13320 13321 tg3_write_mem(tp, offset + j, test_pattern[i]); 13322 tg3_read_mem(tp, offset + j, &val); 13323 if (val != test_pattern[i]) 13324 return -EIO; 13325 } 13326 } 13327 return 0; 13328 } 13329 13330 static int tg3_test_memory(struct tg3 *tp) 13331 { 13332 static struct mem_entry { 13333 u32 offset; 13334 u32 len; 13335 } mem_tbl_570x[] = { 13336 { 0x00000000, 0x00b50}, 13337 { 0x00002000, 0x1c000}, 13338 { 0xffffffff, 0x00000} 13339 }, mem_tbl_5705[] = { 13340 { 0x00000100, 0x0000c}, 13341 { 0x00000200, 0x00008}, 13342 { 0x00004000, 0x00800}, 13343 { 0x00006000, 0x01000}, 13344 { 0x00008000, 0x02000}, 13345 { 0x00010000, 0x0e000}, 13346 { 0xffffffff, 0x00000} 13347 }, mem_tbl_5755[] = { 13348 { 0x00000200, 0x00008}, 13349 { 0x00004000, 0x00800}, 13350 { 0x00006000, 0x00800}, 13351 { 0x00008000, 0x02000}, 13352 { 0x00010000, 0x0c000}, 13353 { 0xffffffff, 0x00000} 13354 }, mem_tbl_5906[] = { 13355 { 0x00000200, 0x00008}, 13356 { 0x00004000, 0x00400}, 13357 { 0x00006000, 0x00400}, 13358 { 0x00008000, 0x01000}, 13359 { 0x00010000, 0x01000}, 13360 { 0xffffffff, 0x00000} 13361 }, mem_tbl_5717[] = { 13362 { 0x00000200, 0x00008}, 13363 { 0x00010000, 0x0a000}, 13364 { 0x00020000, 0x13c00}, 13365 { 0xffffffff, 0x00000} 13366 }, mem_tbl_57765[] = { 13367 { 0x00000200, 0x00008}, 13368 { 0x00004000, 0x00800}, 13369 { 0x00006000, 0x09800}, 13370 { 0x00010000, 0x0a000}, 13371 { 0xffffffff, 0x00000} 13372 }; 13373 struct mem_entry *mem_tbl; 13374 int err = 0; 13375 int i; 13376 13377 if (tg3_flag(tp, 5717_PLUS)) 13378 mem_tbl = mem_tbl_5717; 13379 else if (tg3_flag(tp, 57765_CLASS) || 13380 tg3_asic_rev(tp) == ASIC_REV_5762) 13381 mem_tbl = mem_tbl_57765; 13382 else if (tg3_flag(tp, 5755_PLUS)) 13383 mem_tbl = mem_tbl_5755; 13384 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13385 mem_tbl = mem_tbl_5906; 13386 else if (tg3_flag(tp, 5705_PLUS)) 13387 mem_tbl = mem_tbl_5705; 13388 else 13389 mem_tbl = mem_tbl_570x; 13390 13391 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13392 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13393 if (err) 13394 break; 13395 } 13396 13397 return err; 13398 } 13399 13400 #define TG3_TSO_MSS 500 13401 13402 #define TG3_TSO_IP_HDR_LEN 20 13403 #define TG3_TSO_TCP_HDR_LEN 20 13404 #define TG3_TSO_TCP_OPT_LEN 12 13405 13406 static const u8 tg3_tso_header[] = { 13407 0x08, 0x00, 13408 0x45, 0x00, 0x00, 0x00, 13409 0x00, 0x00, 0x40, 0x00, 13410 0x40, 0x06, 0x00, 0x00, 13411 0x0a, 0x00, 0x00, 0x01, 13412 0x0a, 0x00, 0x00, 0x02, 13413 0x0d, 0x00, 0xe0, 0x00, 13414 0x00, 0x00, 0x01, 0x00, 13415 0x00, 0x00, 0x02, 0x00, 13416 0x80, 0x10, 0x10, 0x00, 13417 0x14, 0x09, 0x00, 0x00, 13418 0x01, 0x01, 0x08, 0x0a, 13419 0x11, 0x11, 0x11, 0x11, 13420 0x11, 0x11, 0x11, 0x11, 13421 }; 13422 13423 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13424 { 13425 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13426 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13427 u32 budget; 13428 struct sk_buff *skb; 13429 u8 *tx_data, *rx_data; 13430 dma_addr_t map; 13431 int num_pkts, tx_len, rx_len, i, err; 13432 struct tg3_rx_buffer_desc *desc; 13433 struct tg3_napi *tnapi, *rnapi; 13434 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13435 13436 tnapi = &tp->napi[0]; 13437 rnapi = &tp->napi[0]; 13438 if (tp->irq_cnt > 1) { 13439 if (tg3_flag(tp, ENABLE_RSS)) 13440 rnapi = &tp->napi[1]; 13441 if (tg3_flag(tp, ENABLE_TSS)) 13442 tnapi = &tp->napi[1]; 13443 } 13444 coal_now = tnapi->coal_now | rnapi->coal_now; 13445 13446 err = -EIO; 13447 13448 tx_len = pktsz; 13449 skb = netdev_alloc_skb(tp->dev, tx_len); 13450 if (!skb) 13451 return -ENOMEM; 13452 13453 tx_data = skb_put(skb, tx_len); 13454 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13455 memset(tx_data + ETH_ALEN, 0x0, 8); 13456 13457 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13458 13459 if (tso_loopback) { 13460 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13461 13462 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13463 TG3_TSO_TCP_OPT_LEN; 13464 13465 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13466 sizeof(tg3_tso_header)); 13467 mss = TG3_TSO_MSS; 13468 13469 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13470 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13471 13472 /* Set the total length field in the IP header */ 13473 iph->tot_len = htons((u16)(mss + hdr_len)); 13474 13475 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13476 TXD_FLAG_CPU_POST_DMA); 13477 13478 if (tg3_flag(tp, HW_TSO_1) || 13479 tg3_flag(tp, HW_TSO_2) || 13480 tg3_flag(tp, HW_TSO_3)) { 13481 struct tcphdr *th; 13482 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13483 th = (struct tcphdr *)&tx_data[val]; 13484 th->check = 0; 13485 } else 13486 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13487 13488 if (tg3_flag(tp, HW_TSO_3)) { 13489 mss |= (hdr_len & 0xc) << 12; 13490 if (hdr_len & 0x10) 13491 base_flags |= 0x00000010; 13492 base_flags |= (hdr_len & 0x3e0) << 5; 13493 } else if (tg3_flag(tp, HW_TSO_2)) 13494 mss |= hdr_len << 9; 13495 else if (tg3_flag(tp, HW_TSO_1) || 13496 tg3_asic_rev(tp) == ASIC_REV_5705) { 13497 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13498 } else { 13499 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13500 } 13501 13502 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13503 } else { 13504 num_pkts = 1; 13505 data_off = ETH_HLEN; 13506 13507 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13508 tx_len > VLAN_ETH_FRAME_LEN) 13509 base_flags |= TXD_FLAG_JMB_PKT; 13510 } 13511 13512 for (i = data_off; i < tx_len; i++) 13513 tx_data[i] = (u8) (i & 0xff); 13514 13515 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); 13516 if (dma_mapping_error(&tp->pdev->dev, map)) { 13517 dev_kfree_skb(skb); 13518 return -EIO; 13519 } 13520 13521 val = tnapi->tx_prod; 13522 tnapi->tx_buffers[val].skb = skb; 13523 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13524 13525 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13526 rnapi->coal_now); 13527 13528 udelay(10); 13529 13530 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13531 13532 budget = tg3_tx_avail(tnapi); 13533 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13534 base_flags | TXD_FLAG_END, mss, 0)) { 13535 tnapi->tx_buffers[val].skb = NULL; 13536 dev_kfree_skb(skb); 13537 return -EIO; 13538 } 13539 13540 tnapi->tx_prod++; 13541 13542 /* Sync BD data before updating mailbox */ 13543 wmb(); 13544 13545 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13546 tr32_mailbox(tnapi->prodmbox); 13547 13548 udelay(10); 13549 13550 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13551 for (i = 0; i < 35; i++) { 13552 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13553 coal_now); 13554 13555 udelay(10); 13556 13557 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13558 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13559 if ((tx_idx == tnapi->tx_prod) && 13560 (rx_idx == (rx_start_idx + num_pkts))) 13561 break; 13562 } 13563 13564 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13565 dev_kfree_skb(skb); 13566 13567 if (tx_idx != tnapi->tx_prod) 13568 goto out; 13569 13570 if (rx_idx != rx_start_idx + num_pkts) 13571 goto out; 13572 13573 val = data_off; 13574 while (rx_idx != rx_start_idx) { 13575 desc = &rnapi->rx_rcb[rx_start_idx++]; 13576 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13577 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13578 13579 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13580 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13581 goto out; 13582 13583 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13584 - ETH_FCS_LEN; 13585 13586 if (!tso_loopback) { 13587 if (rx_len != tx_len) 13588 goto out; 13589 13590 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13591 if (opaque_key != RXD_OPAQUE_RING_STD) 13592 goto out; 13593 } else { 13594 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13595 goto out; 13596 } 13597 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13598 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13599 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13600 goto out; 13601 } 13602 13603 if (opaque_key == RXD_OPAQUE_RING_STD) { 13604 rx_data = tpr->rx_std_buffers[desc_idx].data; 13605 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13606 mapping); 13607 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13608 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13609 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13610 mapping); 13611 } else 13612 goto out; 13613 13614 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, 13615 DMA_FROM_DEVICE); 13616 13617 rx_data += TG3_RX_OFFSET(tp); 13618 for (i = data_off; i < rx_len; i++, val++) { 13619 if (*(rx_data + i) != (u8) (val & 0xff)) 13620 goto out; 13621 } 13622 } 13623 13624 err = 0; 13625 13626 /* tg3_free_rings will unmap and free the rx_data */ 13627 out: 13628 return err; 13629 } 13630 13631 #define TG3_STD_LOOPBACK_FAILED 1 13632 #define TG3_JMB_LOOPBACK_FAILED 2 13633 #define TG3_TSO_LOOPBACK_FAILED 4 13634 #define TG3_LOOPBACK_FAILED \ 13635 (TG3_STD_LOOPBACK_FAILED | \ 13636 TG3_JMB_LOOPBACK_FAILED | \ 13637 TG3_TSO_LOOPBACK_FAILED) 13638 13639 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13640 { 13641 int err = -EIO; 13642 u32 eee_cap; 13643 u32 jmb_pkt_sz = 9000; 13644 13645 if (tp->dma_limit) 13646 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13647 13648 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13649 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13650 13651 if (!netif_running(tp->dev)) { 13652 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13653 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13654 if (do_extlpbk) 13655 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13656 goto done; 13657 } 13658 13659 err = tg3_reset_hw(tp, true); 13660 if (err) { 13661 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13662 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13663 if (do_extlpbk) 13664 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13665 goto done; 13666 } 13667 13668 if (tg3_flag(tp, ENABLE_RSS)) { 13669 int i; 13670 13671 /* Reroute all rx packets to the 1st queue */ 13672 for (i = MAC_RSS_INDIR_TBL_0; 13673 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13674 tw32(i, 0x0); 13675 } 13676 13677 /* HW errata - mac loopback fails in some cases on 5780. 13678 * Normal traffic and PHY loopback are not affected by 13679 * errata. Also, the MAC loopback test is deprecated for 13680 * all newer ASIC revisions. 13681 */ 13682 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13683 !tg3_flag(tp, CPMU_PRESENT)) { 13684 tg3_mac_loopback(tp, true); 13685 13686 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13687 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13688 13689 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13690 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13691 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13692 13693 tg3_mac_loopback(tp, false); 13694 } 13695 13696 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13697 !tg3_flag(tp, USE_PHYLIB)) { 13698 int i; 13699 13700 tg3_phy_lpbk_set(tp, 0, false); 13701 13702 /* Wait for link */ 13703 for (i = 0; i < 100; i++) { 13704 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13705 break; 13706 mdelay(1); 13707 } 13708 13709 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13710 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13711 if (tg3_flag(tp, TSO_CAPABLE) && 13712 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13713 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13714 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13715 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13716 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13717 13718 if (do_extlpbk) { 13719 tg3_phy_lpbk_set(tp, 0, true); 13720 13721 /* All link indications report up, but the hardware 13722 * isn't really ready for about 20 msec. Double it 13723 * to be sure. 13724 */ 13725 mdelay(40); 13726 13727 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13728 data[TG3_EXT_LOOPB_TEST] |= 13729 TG3_STD_LOOPBACK_FAILED; 13730 if (tg3_flag(tp, TSO_CAPABLE) && 13731 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13732 data[TG3_EXT_LOOPB_TEST] |= 13733 TG3_TSO_LOOPBACK_FAILED; 13734 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13735 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13736 data[TG3_EXT_LOOPB_TEST] |= 13737 TG3_JMB_LOOPBACK_FAILED; 13738 } 13739 13740 /* Re-enable gphy autopowerdown. */ 13741 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13742 tg3_phy_toggle_apd(tp, true); 13743 } 13744 13745 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13746 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13747 13748 done: 13749 tp->phy_flags |= eee_cap; 13750 13751 return err; 13752 } 13753 13754 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13755 u64 *data) 13756 { 13757 struct tg3 *tp = netdev_priv(dev); 13758 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13759 13760 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13761 if (tg3_power_up(tp)) { 13762 etest->flags |= ETH_TEST_FL_FAILED; 13763 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13764 return; 13765 } 13766 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13767 } 13768 13769 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13770 13771 if (tg3_test_nvram(tp) != 0) { 13772 etest->flags |= ETH_TEST_FL_FAILED; 13773 data[TG3_NVRAM_TEST] = 1; 13774 } 13775 if (!doextlpbk && tg3_test_link(tp)) { 13776 etest->flags |= ETH_TEST_FL_FAILED; 13777 data[TG3_LINK_TEST] = 1; 13778 } 13779 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13780 int err, err2 = 0, irq_sync = 0; 13781 13782 if (netif_running(dev)) { 13783 tg3_phy_stop(tp); 13784 tg3_netif_stop(tp); 13785 irq_sync = 1; 13786 } 13787 13788 tg3_full_lock(tp, irq_sync); 13789 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13790 err = tg3_nvram_lock(tp); 13791 tg3_halt_cpu(tp, RX_CPU_BASE); 13792 if (!tg3_flag(tp, 5705_PLUS)) 13793 tg3_halt_cpu(tp, TX_CPU_BASE); 13794 if (!err) 13795 tg3_nvram_unlock(tp); 13796 13797 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13798 tg3_phy_reset(tp); 13799 13800 if (tg3_test_registers(tp) != 0) { 13801 etest->flags |= ETH_TEST_FL_FAILED; 13802 data[TG3_REGISTER_TEST] = 1; 13803 } 13804 13805 if (tg3_test_memory(tp) != 0) { 13806 etest->flags |= ETH_TEST_FL_FAILED; 13807 data[TG3_MEMORY_TEST] = 1; 13808 } 13809 13810 if (doextlpbk) 13811 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13812 13813 if (tg3_test_loopback(tp, data, doextlpbk)) 13814 etest->flags |= ETH_TEST_FL_FAILED; 13815 13816 tg3_full_unlock(tp); 13817 13818 if (tg3_test_interrupt(tp) != 0) { 13819 etest->flags |= ETH_TEST_FL_FAILED; 13820 data[TG3_INTERRUPT_TEST] = 1; 13821 } 13822 13823 tg3_full_lock(tp, 0); 13824 13825 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13826 if (netif_running(dev)) { 13827 tg3_flag_set(tp, INIT_COMPLETE); 13828 err2 = tg3_restart_hw(tp, true); 13829 if (!err2) 13830 tg3_netif_start(tp); 13831 } 13832 13833 tg3_full_unlock(tp); 13834 13835 if (irq_sync && !err2) 13836 tg3_phy_start(tp); 13837 } 13838 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13839 tg3_power_down_prepare(tp); 13840 13841 } 13842 13843 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13844 { 13845 struct tg3 *tp = netdev_priv(dev); 13846 struct hwtstamp_config stmpconf; 13847 13848 if (!tg3_flag(tp, PTP_CAPABLE)) 13849 return -EOPNOTSUPP; 13850 13851 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13852 return -EFAULT; 13853 13854 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13855 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13856 return -ERANGE; 13857 13858 switch (stmpconf.rx_filter) { 13859 case HWTSTAMP_FILTER_NONE: 13860 tp->rxptpctl = 0; 13861 break; 13862 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13863 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13864 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13865 break; 13866 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13867 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13868 TG3_RX_PTP_CTL_SYNC_EVNT; 13869 break; 13870 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13871 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13872 TG3_RX_PTP_CTL_DELAY_REQ; 13873 break; 13874 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13875 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13876 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13877 break; 13878 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13879 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13880 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13881 break; 13882 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13883 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13884 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13885 break; 13886 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13887 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13888 TG3_RX_PTP_CTL_SYNC_EVNT; 13889 break; 13890 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13891 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13892 TG3_RX_PTP_CTL_SYNC_EVNT; 13893 break; 13894 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13895 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13896 TG3_RX_PTP_CTL_SYNC_EVNT; 13897 break; 13898 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13899 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13900 TG3_RX_PTP_CTL_DELAY_REQ; 13901 break; 13902 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13903 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13904 TG3_RX_PTP_CTL_DELAY_REQ; 13905 break; 13906 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13907 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13908 TG3_RX_PTP_CTL_DELAY_REQ; 13909 break; 13910 default: 13911 return -ERANGE; 13912 } 13913 13914 if (netif_running(dev) && tp->rxptpctl) 13915 tw32(TG3_RX_PTP_CTL, 13916 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13917 13918 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13919 tg3_flag_set(tp, TX_TSTAMP_EN); 13920 else 13921 tg3_flag_clear(tp, TX_TSTAMP_EN); 13922 13923 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13924 -EFAULT : 0; 13925 } 13926 13927 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13928 { 13929 struct tg3 *tp = netdev_priv(dev); 13930 struct hwtstamp_config stmpconf; 13931 13932 if (!tg3_flag(tp, PTP_CAPABLE)) 13933 return -EOPNOTSUPP; 13934 13935 stmpconf.flags = 0; 13936 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13937 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13938 13939 switch (tp->rxptpctl) { 13940 case 0: 13941 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 13942 break; 13943 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 13944 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 13945 break; 13946 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13947 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 13948 break; 13949 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13950 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 13951 break; 13952 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13953 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 13954 break; 13955 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13956 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 13957 break; 13958 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13959 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 13960 break; 13961 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13962 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 13963 break; 13964 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13965 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 13966 break; 13967 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13968 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 13969 break; 13970 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13971 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 13972 break; 13973 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13974 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 13975 break; 13976 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13977 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 13978 break; 13979 default: 13980 WARN_ON_ONCE(1); 13981 return -ERANGE; 13982 } 13983 13984 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13985 -EFAULT : 0; 13986 } 13987 13988 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13989 { 13990 struct mii_ioctl_data *data = if_mii(ifr); 13991 struct tg3 *tp = netdev_priv(dev); 13992 int err; 13993 13994 if (tg3_flag(tp, USE_PHYLIB)) { 13995 struct phy_device *phydev; 13996 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 13997 return -EAGAIN; 13998 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 13999 return phy_mii_ioctl(phydev, ifr, cmd); 14000 } 14001 14002 switch (cmd) { 14003 case SIOCGMIIPHY: 14004 data->phy_id = tp->phy_addr; 14005 14006 fallthrough; 14007 case SIOCGMIIREG: { 14008 u32 mii_regval; 14009 14010 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14011 break; /* We have no PHY */ 14012 14013 if (!netif_running(dev)) 14014 return -EAGAIN; 14015 14016 spin_lock_bh(&tp->lock); 14017 err = __tg3_readphy(tp, data->phy_id & 0x1f, 14018 data->reg_num & 0x1f, &mii_regval); 14019 spin_unlock_bh(&tp->lock); 14020 14021 data->val_out = mii_regval; 14022 14023 return err; 14024 } 14025 14026 case SIOCSMIIREG: 14027 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14028 break; /* We have no PHY */ 14029 14030 if (!netif_running(dev)) 14031 return -EAGAIN; 14032 14033 spin_lock_bh(&tp->lock); 14034 err = __tg3_writephy(tp, data->phy_id & 0x1f, 14035 data->reg_num & 0x1f, data->val_in); 14036 spin_unlock_bh(&tp->lock); 14037 14038 return err; 14039 14040 case SIOCSHWTSTAMP: 14041 return tg3_hwtstamp_set(dev, ifr); 14042 14043 case SIOCGHWTSTAMP: 14044 return tg3_hwtstamp_get(dev, ifr); 14045 14046 default: 14047 /* do nothing */ 14048 break; 14049 } 14050 return -EOPNOTSUPP; 14051 } 14052 14053 static int tg3_get_coalesce(struct net_device *dev, 14054 struct ethtool_coalesce *ec, 14055 struct kernel_ethtool_coalesce *kernel_coal, 14056 struct netlink_ext_ack *extack) 14057 { 14058 struct tg3 *tp = netdev_priv(dev); 14059 14060 memcpy(ec, &tp->coal, sizeof(*ec)); 14061 return 0; 14062 } 14063 14064 static int tg3_set_coalesce(struct net_device *dev, 14065 struct ethtool_coalesce *ec, 14066 struct kernel_ethtool_coalesce *kernel_coal, 14067 struct netlink_ext_ack *extack) 14068 { 14069 struct tg3 *tp = netdev_priv(dev); 14070 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 14071 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 14072 14073 if (!tg3_flag(tp, 5705_PLUS)) { 14074 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 14075 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 14076 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14077 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14078 } 14079 14080 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14081 (!ec->rx_coalesce_usecs) || 14082 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14083 (!ec->tx_coalesce_usecs) || 14084 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14085 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14086 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14087 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14088 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14089 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14090 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14091 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14092 return -EINVAL; 14093 14094 /* Only copy relevant parameters, ignore all others. */ 14095 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14096 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14097 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14098 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14099 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14100 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14101 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14102 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14103 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14104 14105 if (netif_running(dev)) { 14106 tg3_full_lock(tp, 0); 14107 __tg3_set_coalesce(tp, &tp->coal); 14108 tg3_full_unlock(tp); 14109 } 14110 return 0; 14111 } 14112 14113 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14114 { 14115 struct tg3 *tp = netdev_priv(dev); 14116 14117 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14118 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14119 return -EOPNOTSUPP; 14120 } 14121 14122 if (edata->advertised != tp->eee.advertised) { 14123 netdev_warn(tp->dev, 14124 "Direct manipulation of EEE advertisement is not supported\n"); 14125 return -EINVAL; 14126 } 14127 14128 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14129 netdev_warn(tp->dev, 14130 "Maximal Tx Lpi timer supported is %#x(u)\n", 14131 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14132 return -EINVAL; 14133 } 14134 14135 tp->eee = *edata; 14136 14137 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14138 tg3_warn_mgmt_link_flap(tp); 14139 14140 if (netif_running(tp->dev)) { 14141 tg3_full_lock(tp, 0); 14142 tg3_setup_eee(tp); 14143 tg3_phy_reset(tp); 14144 tg3_full_unlock(tp); 14145 } 14146 14147 return 0; 14148 } 14149 14150 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14151 { 14152 struct tg3 *tp = netdev_priv(dev); 14153 14154 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14155 netdev_warn(tp->dev, 14156 "Board does not support EEE!\n"); 14157 return -EOPNOTSUPP; 14158 } 14159 14160 *edata = tp->eee; 14161 return 0; 14162 } 14163 14164 static const struct ethtool_ops tg3_ethtool_ops = { 14165 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 14166 ETHTOOL_COALESCE_MAX_FRAMES | 14167 ETHTOOL_COALESCE_USECS_IRQ | 14168 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 14169 ETHTOOL_COALESCE_STATS_BLOCK_USECS, 14170 .get_drvinfo = tg3_get_drvinfo, 14171 .get_regs_len = tg3_get_regs_len, 14172 .get_regs = tg3_get_regs, 14173 .get_wol = tg3_get_wol, 14174 .set_wol = tg3_set_wol, 14175 .get_msglevel = tg3_get_msglevel, 14176 .set_msglevel = tg3_set_msglevel, 14177 .nway_reset = tg3_nway_reset, 14178 .get_link = ethtool_op_get_link, 14179 .get_eeprom_len = tg3_get_eeprom_len, 14180 .get_eeprom = tg3_get_eeprom, 14181 .set_eeprom = tg3_set_eeprom, 14182 .get_ringparam = tg3_get_ringparam, 14183 .set_ringparam = tg3_set_ringparam, 14184 .get_pauseparam = tg3_get_pauseparam, 14185 .set_pauseparam = tg3_set_pauseparam, 14186 .self_test = tg3_self_test, 14187 .get_strings = tg3_get_strings, 14188 .set_phys_id = tg3_set_phys_id, 14189 .get_ethtool_stats = tg3_get_ethtool_stats, 14190 .get_coalesce = tg3_get_coalesce, 14191 .set_coalesce = tg3_set_coalesce, 14192 .get_sset_count = tg3_get_sset_count, 14193 .get_rxnfc = tg3_get_rxnfc, 14194 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14195 .get_rxfh = tg3_get_rxfh, 14196 .set_rxfh = tg3_set_rxfh, 14197 .get_channels = tg3_get_channels, 14198 .set_channels = tg3_set_channels, 14199 .get_ts_info = tg3_get_ts_info, 14200 .get_eee = tg3_get_eee, 14201 .set_eee = tg3_set_eee, 14202 .get_link_ksettings = tg3_get_link_ksettings, 14203 .set_link_ksettings = tg3_set_link_ksettings, 14204 }; 14205 14206 static void tg3_get_stats64(struct net_device *dev, 14207 struct rtnl_link_stats64 *stats) 14208 { 14209 struct tg3 *tp = netdev_priv(dev); 14210 14211 spin_lock_bh(&tp->lock); 14212 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { 14213 *stats = tp->net_stats_prev; 14214 spin_unlock_bh(&tp->lock); 14215 return; 14216 } 14217 14218 tg3_get_nstats(tp, stats); 14219 spin_unlock_bh(&tp->lock); 14220 } 14221 14222 static void tg3_set_rx_mode(struct net_device *dev) 14223 { 14224 struct tg3 *tp = netdev_priv(dev); 14225 14226 if (!netif_running(dev)) 14227 return; 14228 14229 tg3_full_lock(tp, 0); 14230 __tg3_set_rx_mode(dev); 14231 tg3_full_unlock(tp); 14232 } 14233 14234 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14235 int new_mtu) 14236 { 14237 dev->mtu = new_mtu; 14238 14239 if (new_mtu > ETH_DATA_LEN) { 14240 if (tg3_flag(tp, 5780_CLASS)) { 14241 netdev_update_features(dev); 14242 tg3_flag_clear(tp, TSO_CAPABLE); 14243 } else { 14244 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14245 } 14246 } else { 14247 if (tg3_flag(tp, 5780_CLASS)) { 14248 tg3_flag_set(tp, TSO_CAPABLE); 14249 netdev_update_features(dev); 14250 } 14251 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14252 } 14253 } 14254 14255 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14256 { 14257 struct tg3 *tp = netdev_priv(dev); 14258 int err; 14259 bool reset_phy = false; 14260 14261 if (!netif_running(dev)) { 14262 /* We'll just catch it later when the 14263 * device is up'd. 14264 */ 14265 tg3_set_mtu(dev, tp, new_mtu); 14266 return 0; 14267 } 14268 14269 tg3_phy_stop(tp); 14270 14271 tg3_netif_stop(tp); 14272 14273 tg3_set_mtu(dev, tp, new_mtu); 14274 14275 tg3_full_lock(tp, 1); 14276 14277 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14278 14279 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14280 * breaks all requests to 256 bytes. 14281 */ 14282 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14283 tg3_asic_rev(tp) == ASIC_REV_5717 || 14284 tg3_asic_rev(tp) == ASIC_REV_5719 || 14285 tg3_asic_rev(tp) == ASIC_REV_5720) 14286 reset_phy = true; 14287 14288 err = tg3_restart_hw(tp, reset_phy); 14289 14290 if (!err) 14291 tg3_netif_start(tp); 14292 14293 tg3_full_unlock(tp); 14294 14295 if (!err) 14296 tg3_phy_start(tp); 14297 14298 return err; 14299 } 14300 14301 static const struct net_device_ops tg3_netdev_ops = { 14302 .ndo_open = tg3_open, 14303 .ndo_stop = tg3_close, 14304 .ndo_start_xmit = tg3_start_xmit, 14305 .ndo_get_stats64 = tg3_get_stats64, 14306 .ndo_validate_addr = eth_validate_addr, 14307 .ndo_set_rx_mode = tg3_set_rx_mode, 14308 .ndo_set_mac_address = tg3_set_mac_addr, 14309 .ndo_eth_ioctl = tg3_ioctl, 14310 .ndo_tx_timeout = tg3_tx_timeout, 14311 .ndo_change_mtu = tg3_change_mtu, 14312 .ndo_fix_features = tg3_fix_features, 14313 .ndo_set_features = tg3_set_features, 14314 #ifdef CONFIG_NET_POLL_CONTROLLER 14315 .ndo_poll_controller = tg3_poll_controller, 14316 #endif 14317 }; 14318 14319 static void tg3_get_eeprom_size(struct tg3 *tp) 14320 { 14321 u32 cursize, val, magic; 14322 14323 tp->nvram_size = EEPROM_CHIP_SIZE; 14324 14325 if (tg3_nvram_read(tp, 0, &magic) != 0) 14326 return; 14327 14328 if ((magic != TG3_EEPROM_MAGIC) && 14329 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14330 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14331 return; 14332 14333 /* 14334 * Size the chip by reading offsets at increasing powers of two. 14335 * When we encounter our validation signature, we know the addressing 14336 * has wrapped around, and thus have our chip size. 14337 */ 14338 cursize = 0x10; 14339 14340 while (cursize < tp->nvram_size) { 14341 if (tg3_nvram_read(tp, cursize, &val) != 0) 14342 return; 14343 14344 if (val == magic) 14345 break; 14346 14347 cursize <<= 1; 14348 } 14349 14350 tp->nvram_size = cursize; 14351 } 14352 14353 static void tg3_get_nvram_size(struct tg3 *tp) 14354 { 14355 u32 val; 14356 14357 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14358 return; 14359 14360 /* Selfboot format */ 14361 if (val != TG3_EEPROM_MAGIC) { 14362 tg3_get_eeprom_size(tp); 14363 return; 14364 } 14365 14366 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14367 if (val != 0) { 14368 /* This is confusing. We want to operate on the 14369 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14370 * call will read from NVRAM and byteswap the data 14371 * according to the byteswapping settings for all 14372 * other register accesses. This ensures the data we 14373 * want will always reside in the lower 16-bits. 14374 * However, the data in NVRAM is in LE format, which 14375 * means the data from the NVRAM read will always be 14376 * opposite the endianness of the CPU. The 16-bit 14377 * byteswap then brings the data to CPU endianness. 14378 */ 14379 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14380 return; 14381 } 14382 } 14383 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14384 } 14385 14386 static void tg3_get_nvram_info(struct tg3 *tp) 14387 { 14388 u32 nvcfg1; 14389 14390 nvcfg1 = tr32(NVRAM_CFG1); 14391 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14392 tg3_flag_set(tp, FLASH); 14393 } else { 14394 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14395 tw32(NVRAM_CFG1, nvcfg1); 14396 } 14397 14398 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14399 tg3_flag(tp, 5780_CLASS)) { 14400 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14401 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14402 tp->nvram_jedecnum = JEDEC_ATMEL; 14403 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14404 tg3_flag_set(tp, NVRAM_BUFFERED); 14405 break; 14406 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14407 tp->nvram_jedecnum = JEDEC_ATMEL; 14408 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14409 break; 14410 case FLASH_VENDOR_ATMEL_EEPROM: 14411 tp->nvram_jedecnum = JEDEC_ATMEL; 14412 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14413 tg3_flag_set(tp, NVRAM_BUFFERED); 14414 break; 14415 case FLASH_VENDOR_ST: 14416 tp->nvram_jedecnum = JEDEC_ST; 14417 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14418 tg3_flag_set(tp, NVRAM_BUFFERED); 14419 break; 14420 case FLASH_VENDOR_SAIFUN: 14421 tp->nvram_jedecnum = JEDEC_SAIFUN; 14422 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14423 break; 14424 case FLASH_VENDOR_SST_SMALL: 14425 case FLASH_VENDOR_SST_LARGE: 14426 tp->nvram_jedecnum = JEDEC_SST; 14427 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14428 break; 14429 } 14430 } else { 14431 tp->nvram_jedecnum = JEDEC_ATMEL; 14432 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14433 tg3_flag_set(tp, NVRAM_BUFFERED); 14434 } 14435 } 14436 14437 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14438 { 14439 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14440 case FLASH_5752PAGE_SIZE_256: 14441 tp->nvram_pagesize = 256; 14442 break; 14443 case FLASH_5752PAGE_SIZE_512: 14444 tp->nvram_pagesize = 512; 14445 break; 14446 case FLASH_5752PAGE_SIZE_1K: 14447 tp->nvram_pagesize = 1024; 14448 break; 14449 case FLASH_5752PAGE_SIZE_2K: 14450 tp->nvram_pagesize = 2048; 14451 break; 14452 case FLASH_5752PAGE_SIZE_4K: 14453 tp->nvram_pagesize = 4096; 14454 break; 14455 case FLASH_5752PAGE_SIZE_264: 14456 tp->nvram_pagesize = 264; 14457 break; 14458 case FLASH_5752PAGE_SIZE_528: 14459 tp->nvram_pagesize = 528; 14460 break; 14461 } 14462 } 14463 14464 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14465 { 14466 u32 nvcfg1; 14467 14468 nvcfg1 = tr32(NVRAM_CFG1); 14469 14470 /* NVRAM protection for TPM */ 14471 if (nvcfg1 & (1 << 27)) 14472 tg3_flag_set(tp, PROTECTED_NVRAM); 14473 14474 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14475 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14476 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14477 tp->nvram_jedecnum = JEDEC_ATMEL; 14478 tg3_flag_set(tp, NVRAM_BUFFERED); 14479 break; 14480 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14481 tp->nvram_jedecnum = JEDEC_ATMEL; 14482 tg3_flag_set(tp, NVRAM_BUFFERED); 14483 tg3_flag_set(tp, FLASH); 14484 break; 14485 case FLASH_5752VENDOR_ST_M45PE10: 14486 case FLASH_5752VENDOR_ST_M45PE20: 14487 case FLASH_5752VENDOR_ST_M45PE40: 14488 tp->nvram_jedecnum = JEDEC_ST; 14489 tg3_flag_set(tp, NVRAM_BUFFERED); 14490 tg3_flag_set(tp, FLASH); 14491 break; 14492 } 14493 14494 if (tg3_flag(tp, FLASH)) { 14495 tg3_nvram_get_pagesize(tp, nvcfg1); 14496 } else { 14497 /* For eeprom, set pagesize to maximum eeprom size */ 14498 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14499 14500 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14501 tw32(NVRAM_CFG1, nvcfg1); 14502 } 14503 } 14504 14505 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14506 { 14507 u32 nvcfg1, protect = 0; 14508 14509 nvcfg1 = tr32(NVRAM_CFG1); 14510 14511 /* NVRAM protection for TPM */ 14512 if (nvcfg1 & (1 << 27)) { 14513 tg3_flag_set(tp, PROTECTED_NVRAM); 14514 protect = 1; 14515 } 14516 14517 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14518 switch (nvcfg1) { 14519 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14520 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14521 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14522 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14523 tp->nvram_jedecnum = JEDEC_ATMEL; 14524 tg3_flag_set(tp, NVRAM_BUFFERED); 14525 tg3_flag_set(tp, FLASH); 14526 tp->nvram_pagesize = 264; 14527 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14528 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14529 tp->nvram_size = (protect ? 0x3e200 : 14530 TG3_NVRAM_SIZE_512KB); 14531 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14532 tp->nvram_size = (protect ? 0x1f200 : 14533 TG3_NVRAM_SIZE_256KB); 14534 else 14535 tp->nvram_size = (protect ? 0x1f200 : 14536 TG3_NVRAM_SIZE_128KB); 14537 break; 14538 case FLASH_5752VENDOR_ST_M45PE10: 14539 case FLASH_5752VENDOR_ST_M45PE20: 14540 case FLASH_5752VENDOR_ST_M45PE40: 14541 tp->nvram_jedecnum = JEDEC_ST; 14542 tg3_flag_set(tp, NVRAM_BUFFERED); 14543 tg3_flag_set(tp, FLASH); 14544 tp->nvram_pagesize = 256; 14545 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14546 tp->nvram_size = (protect ? 14547 TG3_NVRAM_SIZE_64KB : 14548 TG3_NVRAM_SIZE_128KB); 14549 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14550 tp->nvram_size = (protect ? 14551 TG3_NVRAM_SIZE_64KB : 14552 TG3_NVRAM_SIZE_256KB); 14553 else 14554 tp->nvram_size = (protect ? 14555 TG3_NVRAM_SIZE_128KB : 14556 TG3_NVRAM_SIZE_512KB); 14557 break; 14558 } 14559 } 14560 14561 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14562 { 14563 u32 nvcfg1; 14564 14565 nvcfg1 = tr32(NVRAM_CFG1); 14566 14567 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14568 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14569 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14570 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14571 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14572 tp->nvram_jedecnum = JEDEC_ATMEL; 14573 tg3_flag_set(tp, NVRAM_BUFFERED); 14574 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14575 14576 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14577 tw32(NVRAM_CFG1, nvcfg1); 14578 break; 14579 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14580 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14581 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14582 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14583 tp->nvram_jedecnum = JEDEC_ATMEL; 14584 tg3_flag_set(tp, NVRAM_BUFFERED); 14585 tg3_flag_set(tp, FLASH); 14586 tp->nvram_pagesize = 264; 14587 break; 14588 case FLASH_5752VENDOR_ST_M45PE10: 14589 case FLASH_5752VENDOR_ST_M45PE20: 14590 case FLASH_5752VENDOR_ST_M45PE40: 14591 tp->nvram_jedecnum = JEDEC_ST; 14592 tg3_flag_set(tp, NVRAM_BUFFERED); 14593 tg3_flag_set(tp, FLASH); 14594 tp->nvram_pagesize = 256; 14595 break; 14596 } 14597 } 14598 14599 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14600 { 14601 u32 nvcfg1, protect = 0; 14602 14603 nvcfg1 = tr32(NVRAM_CFG1); 14604 14605 /* NVRAM protection for TPM */ 14606 if (nvcfg1 & (1 << 27)) { 14607 tg3_flag_set(tp, PROTECTED_NVRAM); 14608 protect = 1; 14609 } 14610 14611 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14612 switch (nvcfg1) { 14613 case FLASH_5761VENDOR_ATMEL_ADB021D: 14614 case FLASH_5761VENDOR_ATMEL_ADB041D: 14615 case FLASH_5761VENDOR_ATMEL_ADB081D: 14616 case FLASH_5761VENDOR_ATMEL_ADB161D: 14617 case FLASH_5761VENDOR_ATMEL_MDB021D: 14618 case FLASH_5761VENDOR_ATMEL_MDB041D: 14619 case FLASH_5761VENDOR_ATMEL_MDB081D: 14620 case FLASH_5761VENDOR_ATMEL_MDB161D: 14621 tp->nvram_jedecnum = JEDEC_ATMEL; 14622 tg3_flag_set(tp, NVRAM_BUFFERED); 14623 tg3_flag_set(tp, FLASH); 14624 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14625 tp->nvram_pagesize = 256; 14626 break; 14627 case FLASH_5761VENDOR_ST_A_M45PE20: 14628 case FLASH_5761VENDOR_ST_A_M45PE40: 14629 case FLASH_5761VENDOR_ST_A_M45PE80: 14630 case FLASH_5761VENDOR_ST_A_M45PE16: 14631 case FLASH_5761VENDOR_ST_M_M45PE20: 14632 case FLASH_5761VENDOR_ST_M_M45PE40: 14633 case FLASH_5761VENDOR_ST_M_M45PE80: 14634 case FLASH_5761VENDOR_ST_M_M45PE16: 14635 tp->nvram_jedecnum = JEDEC_ST; 14636 tg3_flag_set(tp, NVRAM_BUFFERED); 14637 tg3_flag_set(tp, FLASH); 14638 tp->nvram_pagesize = 256; 14639 break; 14640 } 14641 14642 if (protect) { 14643 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14644 } else { 14645 switch (nvcfg1) { 14646 case FLASH_5761VENDOR_ATMEL_ADB161D: 14647 case FLASH_5761VENDOR_ATMEL_MDB161D: 14648 case FLASH_5761VENDOR_ST_A_M45PE16: 14649 case FLASH_5761VENDOR_ST_M_M45PE16: 14650 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14651 break; 14652 case FLASH_5761VENDOR_ATMEL_ADB081D: 14653 case FLASH_5761VENDOR_ATMEL_MDB081D: 14654 case FLASH_5761VENDOR_ST_A_M45PE80: 14655 case FLASH_5761VENDOR_ST_M_M45PE80: 14656 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14657 break; 14658 case FLASH_5761VENDOR_ATMEL_ADB041D: 14659 case FLASH_5761VENDOR_ATMEL_MDB041D: 14660 case FLASH_5761VENDOR_ST_A_M45PE40: 14661 case FLASH_5761VENDOR_ST_M_M45PE40: 14662 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14663 break; 14664 case FLASH_5761VENDOR_ATMEL_ADB021D: 14665 case FLASH_5761VENDOR_ATMEL_MDB021D: 14666 case FLASH_5761VENDOR_ST_A_M45PE20: 14667 case FLASH_5761VENDOR_ST_M_M45PE20: 14668 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14669 break; 14670 } 14671 } 14672 } 14673 14674 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14675 { 14676 tp->nvram_jedecnum = JEDEC_ATMEL; 14677 tg3_flag_set(tp, NVRAM_BUFFERED); 14678 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14679 } 14680 14681 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14682 { 14683 u32 nvcfg1; 14684 14685 nvcfg1 = tr32(NVRAM_CFG1); 14686 14687 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14688 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14689 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14690 tp->nvram_jedecnum = JEDEC_ATMEL; 14691 tg3_flag_set(tp, NVRAM_BUFFERED); 14692 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14693 14694 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14695 tw32(NVRAM_CFG1, nvcfg1); 14696 return; 14697 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14698 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14699 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14700 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14701 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14702 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14703 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14704 tp->nvram_jedecnum = JEDEC_ATMEL; 14705 tg3_flag_set(tp, NVRAM_BUFFERED); 14706 tg3_flag_set(tp, FLASH); 14707 14708 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14709 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14710 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14711 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14712 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14713 break; 14714 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14715 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14716 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14717 break; 14718 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14719 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14720 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14721 break; 14722 } 14723 break; 14724 case FLASH_5752VENDOR_ST_M45PE10: 14725 case FLASH_5752VENDOR_ST_M45PE20: 14726 case FLASH_5752VENDOR_ST_M45PE40: 14727 tp->nvram_jedecnum = JEDEC_ST; 14728 tg3_flag_set(tp, NVRAM_BUFFERED); 14729 tg3_flag_set(tp, FLASH); 14730 14731 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14732 case FLASH_5752VENDOR_ST_M45PE10: 14733 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14734 break; 14735 case FLASH_5752VENDOR_ST_M45PE20: 14736 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14737 break; 14738 case FLASH_5752VENDOR_ST_M45PE40: 14739 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14740 break; 14741 } 14742 break; 14743 default: 14744 tg3_flag_set(tp, NO_NVRAM); 14745 return; 14746 } 14747 14748 tg3_nvram_get_pagesize(tp, nvcfg1); 14749 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14750 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14751 } 14752 14753 14754 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14755 { 14756 u32 nvcfg1; 14757 14758 nvcfg1 = tr32(NVRAM_CFG1); 14759 14760 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14761 case FLASH_5717VENDOR_ATMEL_EEPROM: 14762 case FLASH_5717VENDOR_MICRO_EEPROM: 14763 tp->nvram_jedecnum = JEDEC_ATMEL; 14764 tg3_flag_set(tp, NVRAM_BUFFERED); 14765 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14766 14767 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14768 tw32(NVRAM_CFG1, nvcfg1); 14769 return; 14770 case FLASH_5717VENDOR_ATMEL_MDB011D: 14771 case FLASH_5717VENDOR_ATMEL_ADB011B: 14772 case FLASH_5717VENDOR_ATMEL_ADB011D: 14773 case FLASH_5717VENDOR_ATMEL_MDB021D: 14774 case FLASH_5717VENDOR_ATMEL_ADB021B: 14775 case FLASH_5717VENDOR_ATMEL_ADB021D: 14776 case FLASH_5717VENDOR_ATMEL_45USPT: 14777 tp->nvram_jedecnum = JEDEC_ATMEL; 14778 tg3_flag_set(tp, NVRAM_BUFFERED); 14779 tg3_flag_set(tp, FLASH); 14780 14781 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14782 case FLASH_5717VENDOR_ATMEL_MDB021D: 14783 /* Detect size with tg3_nvram_get_size() */ 14784 break; 14785 case FLASH_5717VENDOR_ATMEL_ADB021B: 14786 case FLASH_5717VENDOR_ATMEL_ADB021D: 14787 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14788 break; 14789 default: 14790 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14791 break; 14792 } 14793 break; 14794 case FLASH_5717VENDOR_ST_M_M25PE10: 14795 case FLASH_5717VENDOR_ST_A_M25PE10: 14796 case FLASH_5717VENDOR_ST_M_M45PE10: 14797 case FLASH_5717VENDOR_ST_A_M45PE10: 14798 case FLASH_5717VENDOR_ST_M_M25PE20: 14799 case FLASH_5717VENDOR_ST_A_M25PE20: 14800 case FLASH_5717VENDOR_ST_M_M45PE20: 14801 case FLASH_5717VENDOR_ST_A_M45PE20: 14802 case FLASH_5717VENDOR_ST_25USPT: 14803 case FLASH_5717VENDOR_ST_45USPT: 14804 tp->nvram_jedecnum = JEDEC_ST; 14805 tg3_flag_set(tp, NVRAM_BUFFERED); 14806 tg3_flag_set(tp, FLASH); 14807 14808 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14809 case FLASH_5717VENDOR_ST_M_M25PE20: 14810 case FLASH_5717VENDOR_ST_M_M45PE20: 14811 /* Detect size with tg3_nvram_get_size() */ 14812 break; 14813 case FLASH_5717VENDOR_ST_A_M25PE20: 14814 case FLASH_5717VENDOR_ST_A_M45PE20: 14815 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14816 break; 14817 default: 14818 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14819 break; 14820 } 14821 break; 14822 default: 14823 tg3_flag_set(tp, NO_NVRAM); 14824 return; 14825 } 14826 14827 tg3_nvram_get_pagesize(tp, nvcfg1); 14828 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14829 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14830 } 14831 14832 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14833 { 14834 u32 nvcfg1, nvmpinstrp, nv_status; 14835 14836 nvcfg1 = tr32(NVRAM_CFG1); 14837 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14838 14839 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14840 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14841 tg3_flag_set(tp, NO_NVRAM); 14842 return; 14843 } 14844 14845 switch (nvmpinstrp) { 14846 case FLASH_5762_MX25L_100: 14847 case FLASH_5762_MX25L_200: 14848 case FLASH_5762_MX25L_400: 14849 case FLASH_5762_MX25L_800: 14850 case FLASH_5762_MX25L_160_320: 14851 tp->nvram_pagesize = 4096; 14852 tp->nvram_jedecnum = JEDEC_MACRONIX; 14853 tg3_flag_set(tp, NVRAM_BUFFERED); 14854 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14855 tg3_flag_set(tp, FLASH); 14856 nv_status = tr32(NVRAM_AUTOSENSE_STATUS); 14857 tp->nvram_size = 14858 (1 << (nv_status >> AUTOSENSE_DEVID & 14859 AUTOSENSE_DEVID_MASK) 14860 << AUTOSENSE_SIZE_IN_MB); 14861 return; 14862 14863 case FLASH_5762_EEPROM_HD: 14864 nvmpinstrp = FLASH_5720_EEPROM_HD; 14865 break; 14866 case FLASH_5762_EEPROM_LD: 14867 nvmpinstrp = FLASH_5720_EEPROM_LD; 14868 break; 14869 case FLASH_5720VENDOR_M_ST_M45PE20: 14870 /* This pinstrap supports multiple sizes, so force it 14871 * to read the actual size from location 0xf0. 14872 */ 14873 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14874 break; 14875 } 14876 } 14877 14878 switch (nvmpinstrp) { 14879 case FLASH_5720_EEPROM_HD: 14880 case FLASH_5720_EEPROM_LD: 14881 tp->nvram_jedecnum = JEDEC_ATMEL; 14882 tg3_flag_set(tp, NVRAM_BUFFERED); 14883 14884 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14885 tw32(NVRAM_CFG1, nvcfg1); 14886 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14887 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14888 else 14889 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14890 return; 14891 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14892 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14893 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14894 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14895 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14896 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14897 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14898 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14899 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14900 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14901 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14902 case FLASH_5720VENDOR_ATMEL_45USPT: 14903 tp->nvram_jedecnum = JEDEC_ATMEL; 14904 tg3_flag_set(tp, NVRAM_BUFFERED); 14905 tg3_flag_set(tp, FLASH); 14906 14907 switch (nvmpinstrp) { 14908 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14909 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14910 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14911 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14912 break; 14913 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14914 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14915 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14916 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14917 break; 14918 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14919 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14920 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14921 break; 14922 default: 14923 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14924 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14925 break; 14926 } 14927 break; 14928 case FLASH_5720VENDOR_M_ST_M25PE10: 14929 case FLASH_5720VENDOR_M_ST_M45PE10: 14930 case FLASH_5720VENDOR_A_ST_M25PE10: 14931 case FLASH_5720VENDOR_A_ST_M45PE10: 14932 case FLASH_5720VENDOR_M_ST_M25PE20: 14933 case FLASH_5720VENDOR_M_ST_M45PE20: 14934 case FLASH_5720VENDOR_A_ST_M25PE20: 14935 case FLASH_5720VENDOR_A_ST_M45PE20: 14936 case FLASH_5720VENDOR_M_ST_M25PE40: 14937 case FLASH_5720VENDOR_M_ST_M45PE40: 14938 case FLASH_5720VENDOR_A_ST_M25PE40: 14939 case FLASH_5720VENDOR_A_ST_M45PE40: 14940 case FLASH_5720VENDOR_M_ST_M25PE80: 14941 case FLASH_5720VENDOR_M_ST_M45PE80: 14942 case FLASH_5720VENDOR_A_ST_M25PE80: 14943 case FLASH_5720VENDOR_A_ST_M45PE80: 14944 case FLASH_5720VENDOR_ST_25USPT: 14945 case FLASH_5720VENDOR_ST_45USPT: 14946 tp->nvram_jedecnum = JEDEC_ST; 14947 tg3_flag_set(tp, NVRAM_BUFFERED); 14948 tg3_flag_set(tp, FLASH); 14949 14950 switch (nvmpinstrp) { 14951 case FLASH_5720VENDOR_M_ST_M25PE20: 14952 case FLASH_5720VENDOR_M_ST_M45PE20: 14953 case FLASH_5720VENDOR_A_ST_M25PE20: 14954 case FLASH_5720VENDOR_A_ST_M45PE20: 14955 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14956 break; 14957 case FLASH_5720VENDOR_M_ST_M25PE40: 14958 case FLASH_5720VENDOR_M_ST_M45PE40: 14959 case FLASH_5720VENDOR_A_ST_M25PE40: 14960 case FLASH_5720VENDOR_A_ST_M45PE40: 14961 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14962 break; 14963 case FLASH_5720VENDOR_M_ST_M25PE80: 14964 case FLASH_5720VENDOR_M_ST_M45PE80: 14965 case FLASH_5720VENDOR_A_ST_M25PE80: 14966 case FLASH_5720VENDOR_A_ST_M45PE80: 14967 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14968 break; 14969 default: 14970 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14971 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14972 break; 14973 } 14974 break; 14975 default: 14976 tg3_flag_set(tp, NO_NVRAM); 14977 return; 14978 } 14979 14980 tg3_nvram_get_pagesize(tp, nvcfg1); 14981 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14982 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14983 14984 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14985 u32 val; 14986 14987 if (tg3_nvram_read(tp, 0, &val)) 14988 return; 14989 14990 if (val != TG3_EEPROM_MAGIC && 14991 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 14992 tg3_flag_set(tp, NO_NVRAM); 14993 } 14994 } 14995 14996 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 14997 static void tg3_nvram_init(struct tg3 *tp) 14998 { 14999 if (tg3_flag(tp, IS_SSB_CORE)) { 15000 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 15001 tg3_flag_clear(tp, NVRAM); 15002 tg3_flag_clear(tp, NVRAM_BUFFERED); 15003 tg3_flag_set(tp, NO_NVRAM); 15004 return; 15005 } 15006 15007 tw32_f(GRC_EEPROM_ADDR, 15008 (EEPROM_ADDR_FSM_RESET | 15009 (EEPROM_DEFAULT_CLOCK_PERIOD << 15010 EEPROM_ADDR_CLKPERD_SHIFT))); 15011 15012 msleep(1); 15013 15014 /* Enable seeprom accesses. */ 15015 tw32_f(GRC_LOCAL_CTRL, 15016 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 15017 udelay(100); 15018 15019 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15020 tg3_asic_rev(tp) != ASIC_REV_5701) { 15021 tg3_flag_set(tp, NVRAM); 15022 15023 if (tg3_nvram_lock(tp)) { 15024 netdev_warn(tp->dev, 15025 "Cannot get nvram lock, %s failed\n", 15026 __func__); 15027 return; 15028 } 15029 tg3_enable_nvram_access(tp); 15030 15031 tp->nvram_size = 0; 15032 15033 if (tg3_asic_rev(tp) == ASIC_REV_5752) 15034 tg3_get_5752_nvram_info(tp); 15035 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 15036 tg3_get_5755_nvram_info(tp); 15037 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 15038 tg3_asic_rev(tp) == ASIC_REV_5784 || 15039 tg3_asic_rev(tp) == ASIC_REV_5785) 15040 tg3_get_5787_nvram_info(tp); 15041 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 15042 tg3_get_5761_nvram_info(tp); 15043 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 15044 tg3_get_5906_nvram_info(tp); 15045 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 15046 tg3_flag(tp, 57765_CLASS)) 15047 tg3_get_57780_nvram_info(tp); 15048 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15049 tg3_asic_rev(tp) == ASIC_REV_5719) 15050 tg3_get_5717_nvram_info(tp); 15051 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 15052 tg3_asic_rev(tp) == ASIC_REV_5762) 15053 tg3_get_5720_nvram_info(tp); 15054 else 15055 tg3_get_nvram_info(tp); 15056 15057 if (tp->nvram_size == 0) 15058 tg3_get_nvram_size(tp); 15059 15060 tg3_disable_nvram_access(tp); 15061 tg3_nvram_unlock(tp); 15062 15063 } else { 15064 tg3_flag_clear(tp, NVRAM); 15065 tg3_flag_clear(tp, NVRAM_BUFFERED); 15066 15067 tg3_get_eeprom_size(tp); 15068 } 15069 } 15070 15071 struct subsys_tbl_ent { 15072 u16 subsys_vendor, subsys_devid; 15073 u32 phy_id; 15074 }; 15075 15076 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 15077 /* Broadcom boards. */ 15078 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15079 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 15080 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15081 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 15082 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15083 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 15084 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15085 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 15086 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15087 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 15088 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15089 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15090 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15091 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15092 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15093 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15094 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15095 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15096 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15097 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15098 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15099 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15100 15101 /* 3com boards. */ 15102 { TG3PCI_SUBVENDOR_ID_3COM, 15103 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15104 { TG3PCI_SUBVENDOR_ID_3COM, 15105 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15106 { TG3PCI_SUBVENDOR_ID_3COM, 15107 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15108 { TG3PCI_SUBVENDOR_ID_3COM, 15109 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15110 { TG3PCI_SUBVENDOR_ID_3COM, 15111 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15112 15113 /* DELL boards. */ 15114 { TG3PCI_SUBVENDOR_ID_DELL, 15115 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15116 { TG3PCI_SUBVENDOR_ID_DELL, 15117 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15118 { TG3PCI_SUBVENDOR_ID_DELL, 15119 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15120 { TG3PCI_SUBVENDOR_ID_DELL, 15121 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15122 15123 /* Compaq boards. */ 15124 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15125 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15126 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15127 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15128 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15129 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15130 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15131 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15132 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15133 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15134 15135 /* IBM boards. */ 15136 { TG3PCI_SUBVENDOR_ID_IBM, 15137 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15138 }; 15139 15140 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15141 { 15142 int i; 15143 15144 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15145 if ((subsys_id_to_phy_id[i].subsys_vendor == 15146 tp->pdev->subsystem_vendor) && 15147 (subsys_id_to_phy_id[i].subsys_devid == 15148 tp->pdev->subsystem_device)) 15149 return &subsys_id_to_phy_id[i]; 15150 } 15151 return NULL; 15152 } 15153 15154 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15155 { 15156 u32 val; 15157 15158 tp->phy_id = TG3_PHY_ID_INVALID; 15159 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15160 15161 /* Assume an onboard device and WOL capable by default. */ 15162 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15163 tg3_flag_set(tp, WOL_CAP); 15164 15165 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15166 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15167 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15168 tg3_flag_set(tp, IS_NIC); 15169 } 15170 val = tr32(VCPU_CFGSHDW); 15171 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15172 tg3_flag_set(tp, ASPM_WORKAROUND); 15173 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15174 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15175 tg3_flag_set(tp, WOL_ENABLE); 15176 device_set_wakeup_enable(&tp->pdev->dev, true); 15177 } 15178 goto done; 15179 } 15180 15181 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15182 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15183 u32 nic_cfg, led_cfg; 15184 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15185 u32 nic_phy_id, ver, eeprom_phy_id; 15186 int eeprom_phy_serdes = 0; 15187 15188 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15189 tp->nic_sram_data_cfg = nic_cfg; 15190 15191 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15192 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15193 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15194 tg3_asic_rev(tp) != ASIC_REV_5701 && 15195 tg3_asic_rev(tp) != ASIC_REV_5703 && 15196 (ver > 0) && (ver < 0x100)) 15197 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15198 15199 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15200 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15201 15202 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15203 tg3_asic_rev(tp) == ASIC_REV_5719 || 15204 tg3_asic_rev(tp) == ASIC_REV_5720) 15205 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15206 15207 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15208 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15209 eeprom_phy_serdes = 1; 15210 15211 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15212 if (nic_phy_id != 0) { 15213 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15214 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15215 15216 eeprom_phy_id = (id1 >> 16) << 10; 15217 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15218 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15219 } else 15220 eeprom_phy_id = 0; 15221 15222 tp->phy_id = eeprom_phy_id; 15223 if (eeprom_phy_serdes) { 15224 if (!tg3_flag(tp, 5705_PLUS)) 15225 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15226 else 15227 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15228 } 15229 15230 if (tg3_flag(tp, 5750_PLUS)) 15231 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15232 SHASTA_EXT_LED_MODE_MASK); 15233 else 15234 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15235 15236 switch (led_cfg) { 15237 default: 15238 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15239 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15240 break; 15241 15242 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15243 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15244 break; 15245 15246 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15247 tp->led_ctrl = LED_CTRL_MODE_MAC; 15248 15249 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15250 * read on some older 5700/5701 bootcode. 15251 */ 15252 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15253 tg3_asic_rev(tp) == ASIC_REV_5701) 15254 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15255 15256 break; 15257 15258 case SHASTA_EXT_LED_SHARED: 15259 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15260 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15261 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15262 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15263 LED_CTRL_MODE_PHY_2); 15264 15265 if (tg3_flag(tp, 5717_PLUS) || 15266 tg3_asic_rev(tp) == ASIC_REV_5762) 15267 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15268 LED_CTRL_BLINK_RATE_MASK; 15269 15270 break; 15271 15272 case SHASTA_EXT_LED_MAC: 15273 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15274 break; 15275 15276 case SHASTA_EXT_LED_COMBO: 15277 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15278 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15279 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15280 LED_CTRL_MODE_PHY_2); 15281 break; 15282 15283 } 15284 15285 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15286 tg3_asic_rev(tp) == ASIC_REV_5701) && 15287 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15288 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15289 15290 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15291 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15292 15293 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15294 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15295 if ((tp->pdev->subsystem_vendor == 15296 PCI_VENDOR_ID_ARIMA) && 15297 (tp->pdev->subsystem_device == 0x205a || 15298 tp->pdev->subsystem_device == 0x2063)) 15299 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15300 } else { 15301 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15302 tg3_flag_set(tp, IS_NIC); 15303 } 15304 15305 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15306 tg3_flag_set(tp, ENABLE_ASF); 15307 if (tg3_flag(tp, 5750_PLUS)) 15308 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15309 } 15310 15311 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15312 tg3_flag(tp, 5750_PLUS)) 15313 tg3_flag_set(tp, ENABLE_APE); 15314 15315 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15316 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15317 tg3_flag_clear(tp, WOL_CAP); 15318 15319 if (tg3_flag(tp, WOL_CAP) && 15320 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15321 tg3_flag_set(tp, WOL_ENABLE); 15322 device_set_wakeup_enable(&tp->pdev->dev, true); 15323 } 15324 15325 if (cfg2 & (1 << 17)) 15326 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15327 15328 /* serdes signal pre-emphasis in register 0x590 set by */ 15329 /* bootcode if bit 18 is set */ 15330 if (cfg2 & (1 << 18)) 15331 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15332 15333 if ((tg3_flag(tp, 57765_PLUS) || 15334 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15335 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15336 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15337 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15338 15339 if (tg3_flag(tp, PCI_EXPRESS)) { 15340 u32 cfg3; 15341 15342 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15343 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15344 !tg3_flag(tp, 57765_PLUS) && 15345 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15346 tg3_flag_set(tp, ASPM_WORKAROUND); 15347 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15348 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15349 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15350 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15351 } 15352 15353 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15354 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15355 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15356 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15357 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15358 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15359 15360 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15361 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15362 } 15363 done: 15364 if (tg3_flag(tp, WOL_CAP)) 15365 device_set_wakeup_enable(&tp->pdev->dev, 15366 tg3_flag(tp, WOL_ENABLE)); 15367 else 15368 device_set_wakeup_capable(&tp->pdev->dev, false); 15369 } 15370 15371 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15372 { 15373 int i, err; 15374 u32 val2, off = offset * 8; 15375 15376 err = tg3_nvram_lock(tp); 15377 if (err) 15378 return err; 15379 15380 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15381 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15382 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15383 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15384 udelay(10); 15385 15386 for (i = 0; i < 100; i++) { 15387 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15388 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15389 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15390 break; 15391 } 15392 udelay(10); 15393 } 15394 15395 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15396 15397 tg3_nvram_unlock(tp); 15398 if (val2 & APE_OTP_STATUS_CMD_DONE) 15399 return 0; 15400 15401 return -EBUSY; 15402 } 15403 15404 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15405 { 15406 int i; 15407 u32 val; 15408 15409 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15410 tw32(OTP_CTRL, cmd); 15411 15412 /* Wait for up to 1 ms for command to execute. */ 15413 for (i = 0; i < 100; i++) { 15414 val = tr32(OTP_STATUS); 15415 if (val & OTP_STATUS_CMD_DONE) 15416 break; 15417 udelay(10); 15418 } 15419 15420 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15421 } 15422 15423 /* Read the gphy configuration from the OTP region of the chip. The gphy 15424 * configuration is a 32-bit value that straddles the alignment boundary. 15425 * We do two 32-bit reads and then shift and merge the results. 15426 */ 15427 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15428 { 15429 u32 bhalf_otp, thalf_otp; 15430 15431 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15432 15433 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15434 return 0; 15435 15436 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15437 15438 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15439 return 0; 15440 15441 thalf_otp = tr32(OTP_READ_DATA); 15442 15443 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15444 15445 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15446 return 0; 15447 15448 bhalf_otp = tr32(OTP_READ_DATA); 15449 15450 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15451 } 15452 15453 static void tg3_phy_init_link_config(struct tg3 *tp) 15454 { 15455 u32 adv = ADVERTISED_Autoneg; 15456 15457 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15458 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15459 adv |= ADVERTISED_1000baseT_Half; 15460 adv |= ADVERTISED_1000baseT_Full; 15461 } 15462 15463 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15464 adv |= ADVERTISED_100baseT_Half | 15465 ADVERTISED_100baseT_Full | 15466 ADVERTISED_10baseT_Half | 15467 ADVERTISED_10baseT_Full | 15468 ADVERTISED_TP; 15469 else 15470 adv |= ADVERTISED_FIBRE; 15471 15472 tp->link_config.advertising = adv; 15473 tp->link_config.speed = SPEED_UNKNOWN; 15474 tp->link_config.duplex = DUPLEX_UNKNOWN; 15475 tp->link_config.autoneg = AUTONEG_ENABLE; 15476 tp->link_config.active_speed = SPEED_UNKNOWN; 15477 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15478 15479 tp->old_link = -1; 15480 } 15481 15482 static int tg3_phy_probe(struct tg3 *tp) 15483 { 15484 u32 hw_phy_id_1, hw_phy_id_2; 15485 u32 hw_phy_id, hw_phy_id_masked; 15486 int err; 15487 15488 /* flow control autonegotiation is default behavior */ 15489 tg3_flag_set(tp, PAUSE_AUTONEG); 15490 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15491 15492 if (tg3_flag(tp, ENABLE_APE)) { 15493 switch (tp->pci_fn) { 15494 case 0: 15495 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15496 break; 15497 case 1: 15498 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15499 break; 15500 case 2: 15501 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15502 break; 15503 case 3: 15504 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15505 break; 15506 } 15507 } 15508 15509 if (!tg3_flag(tp, ENABLE_ASF) && 15510 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15511 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15512 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15513 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15514 15515 if (tg3_flag(tp, USE_PHYLIB)) 15516 return tg3_phy_init(tp); 15517 15518 /* Reading the PHY ID register can conflict with ASF 15519 * firmware access to the PHY hardware. 15520 */ 15521 err = 0; 15522 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15523 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15524 } else { 15525 /* Now read the physical PHY_ID from the chip and verify 15526 * that it is sane. If it doesn't look good, we fall back 15527 * to either the hard-coded table based PHY_ID and failing 15528 * that the value found in the eeprom area. 15529 */ 15530 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15531 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15532 15533 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15534 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15535 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15536 15537 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15538 } 15539 15540 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15541 tp->phy_id = hw_phy_id; 15542 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15543 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15544 else 15545 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15546 } else { 15547 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15548 /* Do nothing, phy ID already set up in 15549 * tg3_get_eeprom_hw_cfg(). 15550 */ 15551 } else { 15552 struct subsys_tbl_ent *p; 15553 15554 /* No eeprom signature? Try the hardcoded 15555 * subsys device table. 15556 */ 15557 p = tg3_lookup_by_subsys(tp); 15558 if (p) { 15559 tp->phy_id = p->phy_id; 15560 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15561 /* For now we saw the IDs 0xbc050cd0, 15562 * 0xbc050f80 and 0xbc050c30 on devices 15563 * connected to an BCM4785 and there are 15564 * probably more. Just assume that the phy is 15565 * supported when it is connected to a SSB core 15566 * for now. 15567 */ 15568 return -ENODEV; 15569 } 15570 15571 if (!tp->phy_id || 15572 tp->phy_id == TG3_PHY_ID_BCM8002) 15573 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15574 } 15575 } 15576 15577 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15578 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15579 tg3_asic_rev(tp) == ASIC_REV_5720 || 15580 tg3_asic_rev(tp) == ASIC_REV_57766 || 15581 tg3_asic_rev(tp) == ASIC_REV_5762 || 15582 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15583 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15584 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15585 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15586 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15587 15588 tp->eee.supported = SUPPORTED_100baseT_Full | 15589 SUPPORTED_1000baseT_Full; 15590 tp->eee.advertised = ADVERTISED_100baseT_Full | 15591 ADVERTISED_1000baseT_Full; 15592 tp->eee.eee_enabled = 1; 15593 tp->eee.tx_lpi_enabled = 1; 15594 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15595 } 15596 15597 tg3_phy_init_link_config(tp); 15598 15599 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15600 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15601 !tg3_flag(tp, ENABLE_APE) && 15602 !tg3_flag(tp, ENABLE_ASF)) { 15603 u32 bmsr, dummy; 15604 15605 tg3_readphy(tp, MII_BMSR, &bmsr); 15606 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15607 (bmsr & BMSR_LSTATUS)) 15608 goto skip_phy_reset; 15609 15610 err = tg3_phy_reset(tp); 15611 if (err) 15612 return err; 15613 15614 tg3_phy_set_wirespeed(tp); 15615 15616 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15617 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15618 tp->link_config.flowctrl); 15619 15620 tg3_writephy(tp, MII_BMCR, 15621 BMCR_ANENABLE | BMCR_ANRESTART); 15622 } 15623 } 15624 15625 skip_phy_reset: 15626 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15627 err = tg3_init_5401phy_dsp(tp); 15628 if (err) 15629 return err; 15630 15631 err = tg3_init_5401phy_dsp(tp); 15632 } 15633 15634 return err; 15635 } 15636 15637 static void tg3_read_vpd(struct tg3 *tp) 15638 { 15639 u8 *vpd_data; 15640 unsigned int len, vpdlen; 15641 int i; 15642 15643 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15644 if (!vpd_data) 15645 goto out_no_vpd; 15646 15647 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15648 PCI_VPD_RO_KEYWORD_MFR_ID, &len); 15649 if (i < 0) 15650 goto partno; 15651 15652 if (len != 4 || memcmp(vpd_data + i, "1028", 4)) 15653 goto partno; 15654 15655 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15656 PCI_VPD_RO_KEYWORD_VENDOR0, &len); 15657 if (i < 0) 15658 goto partno; 15659 15660 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15661 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i); 15662 15663 partno: 15664 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15665 PCI_VPD_RO_KEYWORD_PARTNO, &len); 15666 if (i < 0) 15667 goto out_not_found; 15668 15669 if (len > TG3_BPN_SIZE) 15670 goto out_not_found; 15671 15672 memcpy(tp->board_part_number, &vpd_data[i], len); 15673 15674 out_not_found: 15675 kfree(vpd_data); 15676 if (tp->board_part_number[0]) 15677 return; 15678 15679 out_no_vpd: 15680 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15681 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15682 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15683 strcpy(tp->board_part_number, "BCM5717"); 15684 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15685 strcpy(tp->board_part_number, "BCM5718"); 15686 else 15687 goto nomatch; 15688 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15689 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15690 strcpy(tp->board_part_number, "BCM57780"); 15691 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15692 strcpy(tp->board_part_number, "BCM57760"); 15693 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15694 strcpy(tp->board_part_number, "BCM57790"); 15695 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15696 strcpy(tp->board_part_number, "BCM57788"); 15697 else 15698 goto nomatch; 15699 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15700 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15701 strcpy(tp->board_part_number, "BCM57761"); 15702 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15703 strcpy(tp->board_part_number, "BCM57765"); 15704 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15705 strcpy(tp->board_part_number, "BCM57781"); 15706 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15707 strcpy(tp->board_part_number, "BCM57785"); 15708 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15709 strcpy(tp->board_part_number, "BCM57791"); 15710 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15711 strcpy(tp->board_part_number, "BCM57795"); 15712 else 15713 goto nomatch; 15714 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15715 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15716 strcpy(tp->board_part_number, "BCM57762"); 15717 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15718 strcpy(tp->board_part_number, "BCM57766"); 15719 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15720 strcpy(tp->board_part_number, "BCM57782"); 15721 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15722 strcpy(tp->board_part_number, "BCM57786"); 15723 else 15724 goto nomatch; 15725 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15726 strcpy(tp->board_part_number, "BCM95906"); 15727 } else { 15728 nomatch: 15729 strcpy(tp->board_part_number, "none"); 15730 } 15731 } 15732 15733 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15734 { 15735 u32 val; 15736 15737 if (tg3_nvram_read(tp, offset, &val) || 15738 (val & 0xfc000000) != 0x0c000000 || 15739 tg3_nvram_read(tp, offset + 4, &val) || 15740 val != 0) 15741 return 0; 15742 15743 return 1; 15744 } 15745 15746 static void tg3_read_bc_ver(struct tg3 *tp) 15747 { 15748 u32 val, offset, start, ver_offset; 15749 int i, dst_off; 15750 bool newver = false; 15751 15752 if (tg3_nvram_read(tp, 0xc, &offset) || 15753 tg3_nvram_read(tp, 0x4, &start)) 15754 return; 15755 15756 offset = tg3_nvram_logical_addr(tp, offset); 15757 15758 if (tg3_nvram_read(tp, offset, &val)) 15759 return; 15760 15761 if ((val & 0xfc000000) == 0x0c000000) { 15762 if (tg3_nvram_read(tp, offset + 4, &val)) 15763 return; 15764 15765 if (val == 0) 15766 newver = true; 15767 } 15768 15769 dst_off = strlen(tp->fw_ver); 15770 15771 if (newver) { 15772 if (TG3_VER_SIZE - dst_off < 16 || 15773 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15774 return; 15775 15776 offset = offset + ver_offset - start; 15777 for (i = 0; i < 16; i += 4) { 15778 __be32 v; 15779 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15780 return; 15781 15782 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15783 } 15784 } else { 15785 u32 major, minor; 15786 15787 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15788 return; 15789 15790 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15791 TG3_NVM_BCVER_MAJSFT; 15792 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15793 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15794 "v%d.%02d", major, minor); 15795 } 15796 } 15797 15798 static void tg3_read_hwsb_ver(struct tg3 *tp) 15799 { 15800 u32 val, major, minor; 15801 15802 /* Use native endian representation */ 15803 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15804 return; 15805 15806 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15807 TG3_NVM_HWSB_CFG1_MAJSFT; 15808 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15809 TG3_NVM_HWSB_CFG1_MINSFT; 15810 15811 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15812 } 15813 15814 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15815 { 15816 u32 offset, major, minor, build; 15817 15818 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15819 15820 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15821 return; 15822 15823 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15824 case TG3_EEPROM_SB_REVISION_0: 15825 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15826 break; 15827 case TG3_EEPROM_SB_REVISION_2: 15828 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15829 break; 15830 case TG3_EEPROM_SB_REVISION_3: 15831 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15832 break; 15833 case TG3_EEPROM_SB_REVISION_4: 15834 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15835 break; 15836 case TG3_EEPROM_SB_REVISION_5: 15837 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15838 break; 15839 case TG3_EEPROM_SB_REVISION_6: 15840 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15841 break; 15842 default: 15843 return; 15844 } 15845 15846 if (tg3_nvram_read(tp, offset, &val)) 15847 return; 15848 15849 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15850 TG3_EEPROM_SB_EDH_BLD_SHFT; 15851 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15852 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15853 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15854 15855 if (minor > 99 || build > 26) 15856 return; 15857 15858 offset = strlen(tp->fw_ver); 15859 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15860 " v%d.%02d", major, minor); 15861 15862 if (build > 0) { 15863 offset = strlen(tp->fw_ver); 15864 if (offset < TG3_VER_SIZE - 1) 15865 tp->fw_ver[offset] = 'a' + build - 1; 15866 } 15867 } 15868 15869 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15870 { 15871 u32 val, offset, start; 15872 int i, vlen; 15873 15874 for (offset = TG3_NVM_DIR_START; 15875 offset < TG3_NVM_DIR_END; 15876 offset += TG3_NVM_DIRENT_SIZE) { 15877 if (tg3_nvram_read(tp, offset, &val)) 15878 return; 15879 15880 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15881 break; 15882 } 15883 15884 if (offset == TG3_NVM_DIR_END) 15885 return; 15886 15887 if (!tg3_flag(tp, 5705_PLUS)) 15888 start = 0x08000000; 15889 else if (tg3_nvram_read(tp, offset - 4, &start)) 15890 return; 15891 15892 if (tg3_nvram_read(tp, offset + 4, &offset) || 15893 !tg3_fw_img_is_valid(tp, offset) || 15894 tg3_nvram_read(tp, offset + 8, &val)) 15895 return; 15896 15897 offset += val - start; 15898 15899 vlen = strlen(tp->fw_ver); 15900 15901 tp->fw_ver[vlen++] = ','; 15902 tp->fw_ver[vlen++] = ' '; 15903 15904 for (i = 0; i < 4; i++) { 15905 __be32 v; 15906 if (tg3_nvram_read_be32(tp, offset, &v)) 15907 return; 15908 15909 offset += sizeof(v); 15910 15911 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15912 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15913 break; 15914 } 15915 15916 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15917 vlen += sizeof(v); 15918 } 15919 } 15920 15921 static void tg3_probe_ncsi(struct tg3 *tp) 15922 { 15923 u32 apedata; 15924 15925 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15926 if (apedata != APE_SEG_SIG_MAGIC) 15927 return; 15928 15929 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15930 if (!(apedata & APE_FW_STATUS_READY)) 15931 return; 15932 15933 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15934 tg3_flag_set(tp, APE_HAS_NCSI); 15935 } 15936 15937 static void tg3_read_dash_ver(struct tg3 *tp) 15938 { 15939 int vlen; 15940 u32 apedata; 15941 char *fwtype; 15942 15943 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 15944 15945 if (tg3_flag(tp, APE_HAS_NCSI)) 15946 fwtype = "NCSI"; 15947 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 15948 fwtype = "SMASH"; 15949 else 15950 fwtype = "DASH"; 15951 15952 vlen = strlen(tp->fw_ver); 15953 15954 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 15955 fwtype, 15956 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 15957 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 15958 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 15959 (apedata & APE_FW_VERSION_BLDMSK)); 15960 } 15961 15962 static void tg3_read_otp_ver(struct tg3 *tp) 15963 { 15964 u32 val, val2; 15965 15966 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15967 return; 15968 15969 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 15970 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 15971 TG3_OTP_MAGIC0_VALID(val)) { 15972 u64 val64 = (u64) val << 32 | val2; 15973 u32 ver = 0; 15974 int i, vlen; 15975 15976 for (i = 0; i < 7; i++) { 15977 if ((val64 & 0xff) == 0) 15978 break; 15979 ver = val64 & 0xff; 15980 val64 >>= 8; 15981 } 15982 vlen = strlen(tp->fw_ver); 15983 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 15984 } 15985 } 15986 15987 static void tg3_read_fw_ver(struct tg3 *tp) 15988 { 15989 u32 val; 15990 bool vpd_vers = false; 15991 15992 if (tp->fw_ver[0] != 0) 15993 vpd_vers = true; 15994 15995 if (tg3_flag(tp, NO_NVRAM)) { 15996 strcat(tp->fw_ver, "sb"); 15997 tg3_read_otp_ver(tp); 15998 return; 15999 } 16000 16001 if (tg3_nvram_read(tp, 0, &val)) 16002 return; 16003 16004 if (val == TG3_EEPROM_MAGIC) 16005 tg3_read_bc_ver(tp); 16006 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 16007 tg3_read_sb_ver(tp, val); 16008 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 16009 tg3_read_hwsb_ver(tp); 16010 16011 if (tg3_flag(tp, ENABLE_ASF)) { 16012 if (tg3_flag(tp, ENABLE_APE)) { 16013 tg3_probe_ncsi(tp); 16014 if (!vpd_vers) 16015 tg3_read_dash_ver(tp); 16016 } else if (!vpd_vers) { 16017 tg3_read_mgmtfw_ver(tp); 16018 } 16019 } 16020 16021 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 16022 } 16023 16024 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 16025 { 16026 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 16027 return TG3_RX_RET_MAX_SIZE_5717; 16028 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 16029 return TG3_RX_RET_MAX_SIZE_5700; 16030 else 16031 return TG3_RX_RET_MAX_SIZE_5705; 16032 } 16033 16034 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 16035 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 16036 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 16037 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 16038 { }, 16039 }; 16040 16041 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 16042 { 16043 struct pci_dev *peer; 16044 unsigned int func, devnr = tp->pdev->devfn & ~7; 16045 16046 for (func = 0; func < 8; func++) { 16047 peer = pci_get_slot(tp->pdev->bus, devnr | func); 16048 if (peer && peer != tp->pdev) 16049 break; 16050 pci_dev_put(peer); 16051 } 16052 /* 5704 can be configured in single-port mode, set peer to 16053 * tp->pdev in that case. 16054 */ 16055 if (!peer) { 16056 peer = tp->pdev; 16057 return peer; 16058 } 16059 16060 /* 16061 * We don't need to keep the refcount elevated; there's no way 16062 * to remove one half of this device without removing the other 16063 */ 16064 pci_dev_put(peer); 16065 16066 return peer; 16067 } 16068 16069 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16070 { 16071 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16072 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16073 u32 reg; 16074 16075 /* All devices that use the alternate 16076 * ASIC REV location have a CPMU. 16077 */ 16078 tg3_flag_set(tp, CPMU_PRESENT); 16079 16080 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16081 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16082 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16083 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16084 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16086 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16087 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16088 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16089 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16090 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16091 reg = TG3PCI_GEN2_PRODID_ASICREV; 16092 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16093 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16094 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16095 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16099 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16100 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16101 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16102 reg = TG3PCI_GEN15_PRODID_ASICREV; 16103 else 16104 reg = TG3PCI_PRODID_ASICREV; 16105 16106 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16107 } 16108 16109 /* Wrong chip ID in 5752 A0. This code can be removed later 16110 * as A0 is not in production. 16111 */ 16112 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16113 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16114 16115 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16116 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16117 16118 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16119 tg3_asic_rev(tp) == ASIC_REV_5719 || 16120 tg3_asic_rev(tp) == ASIC_REV_5720) 16121 tg3_flag_set(tp, 5717_PLUS); 16122 16123 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16124 tg3_asic_rev(tp) == ASIC_REV_57766) 16125 tg3_flag_set(tp, 57765_CLASS); 16126 16127 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16128 tg3_asic_rev(tp) == ASIC_REV_5762) 16129 tg3_flag_set(tp, 57765_PLUS); 16130 16131 /* Intentionally exclude ASIC_REV_5906 */ 16132 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16133 tg3_asic_rev(tp) == ASIC_REV_5787 || 16134 tg3_asic_rev(tp) == ASIC_REV_5784 || 16135 tg3_asic_rev(tp) == ASIC_REV_5761 || 16136 tg3_asic_rev(tp) == ASIC_REV_5785 || 16137 tg3_asic_rev(tp) == ASIC_REV_57780 || 16138 tg3_flag(tp, 57765_PLUS)) 16139 tg3_flag_set(tp, 5755_PLUS); 16140 16141 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16142 tg3_asic_rev(tp) == ASIC_REV_5714) 16143 tg3_flag_set(tp, 5780_CLASS); 16144 16145 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16146 tg3_asic_rev(tp) == ASIC_REV_5752 || 16147 tg3_asic_rev(tp) == ASIC_REV_5906 || 16148 tg3_flag(tp, 5755_PLUS) || 16149 tg3_flag(tp, 5780_CLASS)) 16150 tg3_flag_set(tp, 5750_PLUS); 16151 16152 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16153 tg3_flag(tp, 5750_PLUS)) 16154 tg3_flag_set(tp, 5705_PLUS); 16155 } 16156 16157 static bool tg3_10_100_only_device(struct tg3 *tp, 16158 const struct pci_device_id *ent) 16159 { 16160 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16161 16162 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16163 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16164 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16165 return true; 16166 16167 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16168 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16169 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16170 return true; 16171 } else { 16172 return true; 16173 } 16174 } 16175 16176 return false; 16177 } 16178 16179 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16180 { 16181 u32 misc_ctrl_reg; 16182 u32 pci_state_reg, grc_misc_cfg; 16183 u32 val; 16184 u16 pci_cmd; 16185 int err; 16186 16187 /* Force memory write invalidate off. If we leave it on, 16188 * then on 5700_BX chips we have to enable a workaround. 16189 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16190 * to match the cacheline size. The Broadcom driver have this 16191 * workaround but turns MWI off all the times so never uses 16192 * it. This seems to suggest that the workaround is insufficient. 16193 */ 16194 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16195 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16196 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16197 16198 /* Important! -- Make sure register accesses are byteswapped 16199 * correctly. Also, for those chips that require it, make 16200 * sure that indirect register accesses are enabled before 16201 * the first operation. 16202 */ 16203 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16204 &misc_ctrl_reg); 16205 tp->misc_host_ctrl |= (misc_ctrl_reg & 16206 MISC_HOST_CTRL_CHIPREV); 16207 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16208 tp->misc_host_ctrl); 16209 16210 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16211 16212 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16213 * we need to disable memory and use config. cycles 16214 * only to access all registers. The 5702/03 chips 16215 * can mistakenly decode the special cycles from the 16216 * ICH chipsets as memory write cycles, causing corruption 16217 * of register and memory space. Only certain ICH bridges 16218 * will drive special cycles with non-zero data during the 16219 * address phase which can fall within the 5703's address 16220 * range. This is not an ICH bug as the PCI spec allows 16221 * non-zero address during special cycles. However, only 16222 * these ICH bridges are known to drive non-zero addresses 16223 * during special cycles. 16224 * 16225 * Since special cycles do not cross PCI bridges, we only 16226 * enable this workaround if the 5703 is on the secondary 16227 * bus of these ICH bridges. 16228 */ 16229 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16230 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16231 static struct tg3_dev_id { 16232 u32 vendor; 16233 u32 device; 16234 u32 rev; 16235 } ich_chipsets[] = { 16236 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16237 PCI_ANY_ID }, 16238 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16239 PCI_ANY_ID }, 16240 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16241 0xa }, 16242 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16243 PCI_ANY_ID }, 16244 { }, 16245 }; 16246 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16247 struct pci_dev *bridge = NULL; 16248 16249 while (pci_id->vendor != 0) { 16250 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16251 bridge); 16252 if (!bridge) { 16253 pci_id++; 16254 continue; 16255 } 16256 if (pci_id->rev != PCI_ANY_ID) { 16257 if (bridge->revision > pci_id->rev) 16258 continue; 16259 } 16260 if (bridge->subordinate && 16261 (bridge->subordinate->number == 16262 tp->pdev->bus->number)) { 16263 tg3_flag_set(tp, ICH_WORKAROUND); 16264 pci_dev_put(bridge); 16265 break; 16266 } 16267 } 16268 } 16269 16270 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16271 static struct tg3_dev_id { 16272 u32 vendor; 16273 u32 device; 16274 } bridge_chipsets[] = { 16275 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16276 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16277 { }, 16278 }; 16279 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16280 struct pci_dev *bridge = NULL; 16281 16282 while (pci_id->vendor != 0) { 16283 bridge = pci_get_device(pci_id->vendor, 16284 pci_id->device, 16285 bridge); 16286 if (!bridge) { 16287 pci_id++; 16288 continue; 16289 } 16290 if (bridge->subordinate && 16291 (bridge->subordinate->number <= 16292 tp->pdev->bus->number) && 16293 (bridge->subordinate->busn_res.end >= 16294 tp->pdev->bus->number)) { 16295 tg3_flag_set(tp, 5701_DMA_BUG); 16296 pci_dev_put(bridge); 16297 break; 16298 } 16299 } 16300 } 16301 16302 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16303 * DMA addresses > 40-bit. This bridge may have other additional 16304 * 57xx devices behind it in some 4-port NIC designs for example. 16305 * Any tg3 device found behind the bridge will also need the 40-bit 16306 * DMA workaround. 16307 */ 16308 if (tg3_flag(tp, 5780_CLASS)) { 16309 tg3_flag_set(tp, 40BIT_DMA_BUG); 16310 tp->msi_cap = tp->pdev->msi_cap; 16311 } else { 16312 struct pci_dev *bridge = NULL; 16313 16314 do { 16315 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16316 PCI_DEVICE_ID_SERVERWORKS_EPB, 16317 bridge); 16318 if (bridge && bridge->subordinate && 16319 (bridge->subordinate->number <= 16320 tp->pdev->bus->number) && 16321 (bridge->subordinate->busn_res.end >= 16322 tp->pdev->bus->number)) { 16323 tg3_flag_set(tp, 40BIT_DMA_BUG); 16324 pci_dev_put(bridge); 16325 break; 16326 } 16327 } while (bridge); 16328 } 16329 16330 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16331 tg3_asic_rev(tp) == ASIC_REV_5714) 16332 tp->pdev_peer = tg3_find_peer(tp); 16333 16334 /* Determine TSO capabilities */ 16335 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16336 ; /* Do nothing. HW bug. */ 16337 else if (tg3_flag(tp, 57765_PLUS)) 16338 tg3_flag_set(tp, HW_TSO_3); 16339 else if (tg3_flag(tp, 5755_PLUS) || 16340 tg3_asic_rev(tp) == ASIC_REV_5906) 16341 tg3_flag_set(tp, HW_TSO_2); 16342 else if (tg3_flag(tp, 5750_PLUS)) { 16343 tg3_flag_set(tp, HW_TSO_1); 16344 tg3_flag_set(tp, TSO_BUG); 16345 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16346 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16347 tg3_flag_clear(tp, TSO_BUG); 16348 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16349 tg3_asic_rev(tp) != ASIC_REV_5701 && 16350 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16351 tg3_flag_set(tp, FW_TSO); 16352 tg3_flag_set(tp, TSO_BUG); 16353 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16354 tp->fw_needed = FIRMWARE_TG3TSO5; 16355 else 16356 tp->fw_needed = FIRMWARE_TG3TSO; 16357 } 16358 16359 /* Selectively allow TSO based on operating conditions */ 16360 if (tg3_flag(tp, HW_TSO_1) || 16361 tg3_flag(tp, HW_TSO_2) || 16362 tg3_flag(tp, HW_TSO_3) || 16363 tg3_flag(tp, FW_TSO)) { 16364 /* For firmware TSO, assume ASF is disabled. 16365 * We'll disable TSO later if we discover ASF 16366 * is enabled in tg3_get_eeprom_hw_cfg(). 16367 */ 16368 tg3_flag_set(tp, TSO_CAPABLE); 16369 } else { 16370 tg3_flag_clear(tp, TSO_CAPABLE); 16371 tg3_flag_clear(tp, TSO_BUG); 16372 tp->fw_needed = NULL; 16373 } 16374 16375 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16376 tp->fw_needed = FIRMWARE_TG3; 16377 16378 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16379 tp->fw_needed = FIRMWARE_TG357766; 16380 16381 tp->irq_max = 1; 16382 16383 if (tg3_flag(tp, 5750_PLUS)) { 16384 tg3_flag_set(tp, SUPPORT_MSI); 16385 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16386 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16387 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16388 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16389 tp->pdev_peer == tp->pdev)) 16390 tg3_flag_clear(tp, SUPPORT_MSI); 16391 16392 if (tg3_flag(tp, 5755_PLUS) || 16393 tg3_asic_rev(tp) == ASIC_REV_5906) { 16394 tg3_flag_set(tp, 1SHOT_MSI); 16395 } 16396 16397 if (tg3_flag(tp, 57765_PLUS)) { 16398 tg3_flag_set(tp, SUPPORT_MSIX); 16399 tp->irq_max = TG3_IRQ_MAX_VECS; 16400 } 16401 } 16402 16403 tp->txq_max = 1; 16404 tp->rxq_max = 1; 16405 if (tp->irq_max > 1) { 16406 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16407 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16408 16409 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16410 tg3_asic_rev(tp) == ASIC_REV_5720) 16411 tp->txq_max = tp->irq_max - 1; 16412 } 16413 16414 if (tg3_flag(tp, 5755_PLUS) || 16415 tg3_asic_rev(tp) == ASIC_REV_5906) 16416 tg3_flag_set(tp, SHORT_DMA_BUG); 16417 16418 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16419 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16420 16421 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16422 tg3_asic_rev(tp) == ASIC_REV_5719 || 16423 tg3_asic_rev(tp) == ASIC_REV_5720 || 16424 tg3_asic_rev(tp) == ASIC_REV_5762) 16425 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16426 16427 if (tg3_flag(tp, 57765_PLUS) && 16428 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16429 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16430 16431 if (!tg3_flag(tp, 5705_PLUS) || 16432 tg3_flag(tp, 5780_CLASS) || 16433 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16434 tg3_flag_set(tp, JUMBO_CAPABLE); 16435 16436 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16437 &pci_state_reg); 16438 16439 if (pci_is_pcie(tp->pdev)) { 16440 u16 lnkctl; 16441 16442 tg3_flag_set(tp, PCI_EXPRESS); 16443 16444 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16445 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16446 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16447 tg3_flag_clear(tp, HW_TSO_2); 16448 tg3_flag_clear(tp, TSO_CAPABLE); 16449 } 16450 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16451 tg3_asic_rev(tp) == ASIC_REV_5761 || 16452 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16453 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16454 tg3_flag_set(tp, CLKREQ_BUG); 16455 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16456 tg3_flag_set(tp, L1PLLPD_EN); 16457 } 16458 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16459 /* BCM5785 devices are effectively PCIe devices, and should 16460 * follow PCIe codepaths, but do not have a PCIe capabilities 16461 * section. 16462 */ 16463 tg3_flag_set(tp, PCI_EXPRESS); 16464 } else if (!tg3_flag(tp, 5705_PLUS) || 16465 tg3_flag(tp, 5780_CLASS)) { 16466 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16467 if (!tp->pcix_cap) { 16468 dev_err(&tp->pdev->dev, 16469 "Cannot find PCI-X capability, aborting\n"); 16470 return -EIO; 16471 } 16472 16473 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16474 tg3_flag_set(tp, PCIX_MODE); 16475 } 16476 16477 /* If we have an AMD 762 or VIA K8T800 chipset, write 16478 * reordering to the mailbox registers done by the host 16479 * controller can cause major troubles. We read back from 16480 * every mailbox register write to force the writes to be 16481 * posted to the chip in order. 16482 */ 16483 if (pci_dev_present(tg3_write_reorder_chipsets) && 16484 !tg3_flag(tp, PCI_EXPRESS)) 16485 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16486 16487 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16488 &tp->pci_cacheline_sz); 16489 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16490 &tp->pci_lat_timer); 16491 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16492 tp->pci_lat_timer < 64) { 16493 tp->pci_lat_timer = 64; 16494 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16495 tp->pci_lat_timer); 16496 } 16497 16498 /* Important! -- It is critical that the PCI-X hw workaround 16499 * situation is decided before the first MMIO register access. 16500 */ 16501 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16502 /* 5700 BX chips need to have their TX producer index 16503 * mailboxes written twice to workaround a bug. 16504 */ 16505 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16506 16507 /* If we are in PCI-X mode, enable register write workaround. 16508 * 16509 * The workaround is to use indirect register accesses 16510 * for all chip writes not to mailbox registers. 16511 */ 16512 if (tg3_flag(tp, PCIX_MODE)) { 16513 u32 pm_reg; 16514 16515 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16516 16517 /* The chip can have it's power management PCI config 16518 * space registers clobbered due to this bug. 16519 * So explicitly force the chip into D0 here. 16520 */ 16521 pci_read_config_dword(tp->pdev, 16522 tp->pdev->pm_cap + PCI_PM_CTRL, 16523 &pm_reg); 16524 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16525 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16526 pci_write_config_dword(tp->pdev, 16527 tp->pdev->pm_cap + PCI_PM_CTRL, 16528 pm_reg); 16529 16530 /* Also, force SERR#/PERR# in PCI command. */ 16531 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16532 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16533 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16534 } 16535 } 16536 16537 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16538 tg3_flag_set(tp, PCI_HIGH_SPEED); 16539 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16540 tg3_flag_set(tp, PCI_32BIT); 16541 16542 /* Chip-specific fixup from Broadcom driver */ 16543 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16544 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16545 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16546 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16547 } 16548 16549 /* Default fast path register access methods */ 16550 tp->read32 = tg3_read32; 16551 tp->write32 = tg3_write32; 16552 tp->read32_mbox = tg3_read32; 16553 tp->write32_mbox = tg3_write32; 16554 tp->write32_tx_mbox = tg3_write32; 16555 tp->write32_rx_mbox = tg3_write32; 16556 16557 /* Various workaround register access methods */ 16558 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16559 tp->write32 = tg3_write_indirect_reg32; 16560 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16561 (tg3_flag(tp, PCI_EXPRESS) && 16562 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16563 /* 16564 * Back to back register writes can cause problems on these 16565 * chips, the workaround is to read back all reg writes 16566 * except those to mailbox regs. 16567 * 16568 * See tg3_write_indirect_reg32(). 16569 */ 16570 tp->write32 = tg3_write_flush_reg32; 16571 } 16572 16573 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16574 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16575 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16576 tp->write32_rx_mbox = tg3_write_flush_reg32; 16577 } 16578 16579 if (tg3_flag(tp, ICH_WORKAROUND)) { 16580 tp->read32 = tg3_read_indirect_reg32; 16581 tp->write32 = tg3_write_indirect_reg32; 16582 tp->read32_mbox = tg3_read_indirect_mbox; 16583 tp->write32_mbox = tg3_write_indirect_mbox; 16584 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16585 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16586 16587 iounmap(tp->regs); 16588 tp->regs = NULL; 16589 16590 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16591 pci_cmd &= ~PCI_COMMAND_MEMORY; 16592 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16593 } 16594 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16595 tp->read32_mbox = tg3_read32_mbox_5906; 16596 tp->write32_mbox = tg3_write32_mbox_5906; 16597 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16598 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16599 } 16600 16601 if (tp->write32 == tg3_write_indirect_reg32 || 16602 (tg3_flag(tp, PCIX_MODE) && 16603 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16604 tg3_asic_rev(tp) == ASIC_REV_5701))) 16605 tg3_flag_set(tp, SRAM_USE_CONFIG); 16606 16607 /* The memory arbiter has to be enabled in order for SRAM accesses 16608 * to succeed. Normally on powerup the tg3 chip firmware will make 16609 * sure it is enabled, but other entities such as system netboot 16610 * code might disable it. 16611 */ 16612 val = tr32(MEMARB_MODE); 16613 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16614 16615 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16616 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16617 tg3_flag(tp, 5780_CLASS)) { 16618 if (tg3_flag(tp, PCIX_MODE)) { 16619 pci_read_config_dword(tp->pdev, 16620 tp->pcix_cap + PCI_X_STATUS, 16621 &val); 16622 tp->pci_fn = val & 0x7; 16623 } 16624 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16625 tg3_asic_rev(tp) == ASIC_REV_5719 || 16626 tg3_asic_rev(tp) == ASIC_REV_5720) { 16627 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16628 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16629 val = tr32(TG3_CPMU_STATUS); 16630 16631 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16632 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16633 else 16634 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16635 TG3_CPMU_STATUS_FSHFT_5719; 16636 } 16637 16638 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16639 tp->write32_tx_mbox = tg3_write_flush_reg32; 16640 tp->write32_rx_mbox = tg3_write_flush_reg32; 16641 } 16642 16643 /* Get eeprom hw config before calling tg3_set_power_state(). 16644 * In particular, the TG3_FLAG_IS_NIC flag must be 16645 * determined before calling tg3_set_power_state() so that 16646 * we know whether or not to switch out of Vaux power. 16647 * When the flag is set, it means that GPIO1 is used for eeprom 16648 * write protect and also implies that it is a LOM where GPIOs 16649 * are not used to switch power. 16650 */ 16651 tg3_get_eeprom_hw_cfg(tp); 16652 16653 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16654 tg3_flag_clear(tp, TSO_CAPABLE); 16655 tg3_flag_clear(tp, TSO_BUG); 16656 tp->fw_needed = NULL; 16657 } 16658 16659 if (tg3_flag(tp, ENABLE_APE)) { 16660 /* Allow reads and writes to the 16661 * APE register and memory space. 16662 */ 16663 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16664 PCISTATE_ALLOW_APE_SHMEM_WR | 16665 PCISTATE_ALLOW_APE_PSPACE_WR; 16666 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16667 pci_state_reg); 16668 16669 tg3_ape_lock_init(tp); 16670 tp->ape_hb_interval = 16671 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); 16672 } 16673 16674 /* Set up tp->grc_local_ctrl before calling 16675 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16676 * will bring 5700's external PHY out of reset. 16677 * It is also used as eeprom write protect on LOMs. 16678 */ 16679 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16680 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16681 tg3_flag(tp, EEPROM_WRITE_PROT)) 16682 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16683 GRC_LCLCTRL_GPIO_OUTPUT1); 16684 /* Unused GPIO3 must be driven as output on 5752 because there 16685 * are no pull-up resistors on unused GPIO pins. 16686 */ 16687 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16688 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16689 16690 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16691 tg3_asic_rev(tp) == ASIC_REV_57780 || 16692 tg3_flag(tp, 57765_CLASS)) 16693 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16694 16695 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16696 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16697 /* Turn off the debug UART. */ 16698 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16699 if (tg3_flag(tp, IS_NIC)) 16700 /* Keep VMain power. */ 16701 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16702 GRC_LCLCTRL_GPIO_OUTPUT0; 16703 } 16704 16705 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16706 tp->grc_local_ctrl |= 16707 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16708 16709 /* Switch out of Vaux if it is a NIC */ 16710 tg3_pwrsrc_switch_to_vmain(tp); 16711 16712 /* Derive initial jumbo mode from MTU assigned in 16713 * ether_setup() via the alloc_etherdev() call 16714 */ 16715 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16716 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16717 16718 /* Determine WakeOnLan speed to use. */ 16719 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16720 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16721 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16722 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16723 tg3_flag_clear(tp, WOL_SPEED_100MB); 16724 } else { 16725 tg3_flag_set(tp, WOL_SPEED_100MB); 16726 } 16727 16728 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16729 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16730 16731 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16732 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16733 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16734 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16735 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16736 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16737 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16738 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16739 16740 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16741 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16742 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16743 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16744 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16745 16746 if (tg3_flag(tp, 5705_PLUS) && 16747 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16748 tg3_asic_rev(tp) != ASIC_REV_5785 && 16749 tg3_asic_rev(tp) != ASIC_REV_57780 && 16750 !tg3_flag(tp, 57765_PLUS)) { 16751 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16752 tg3_asic_rev(tp) == ASIC_REV_5787 || 16753 tg3_asic_rev(tp) == ASIC_REV_5784 || 16754 tg3_asic_rev(tp) == ASIC_REV_5761) { 16755 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16756 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16757 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16758 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16759 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16760 } else 16761 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16762 } 16763 16764 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16765 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16766 tp->phy_otp = tg3_read_otp_phycfg(tp); 16767 if (tp->phy_otp == 0) 16768 tp->phy_otp = TG3_OTP_DEFAULT; 16769 } 16770 16771 if (tg3_flag(tp, CPMU_PRESENT)) 16772 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16773 else 16774 tp->mi_mode = MAC_MI_MODE_BASE; 16775 16776 tp->coalesce_mode = 0; 16777 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16778 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16779 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16780 16781 /* Set these bits to enable statistics workaround. */ 16782 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16783 tg3_asic_rev(tp) == ASIC_REV_5762 || 16784 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16785 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16786 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16787 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16788 } 16789 16790 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16791 tg3_asic_rev(tp) == ASIC_REV_57780) 16792 tg3_flag_set(tp, USE_PHYLIB); 16793 16794 err = tg3_mdio_init(tp); 16795 if (err) 16796 return err; 16797 16798 /* Initialize data/descriptor byte/word swapping. */ 16799 val = tr32(GRC_MODE); 16800 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16801 tg3_asic_rev(tp) == ASIC_REV_5762) 16802 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16803 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16804 GRC_MODE_B2HRX_ENABLE | 16805 GRC_MODE_HTX2B_ENABLE | 16806 GRC_MODE_HOST_STACKUP); 16807 else 16808 val &= GRC_MODE_HOST_STACKUP; 16809 16810 tw32(GRC_MODE, val | tp->grc_mode); 16811 16812 tg3_switch_clocks(tp); 16813 16814 /* Clear this out for sanity. */ 16815 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16816 16817 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16818 tw32(TG3PCI_REG_BASE_ADDR, 0); 16819 16820 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16821 &pci_state_reg); 16822 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16823 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16824 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16825 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16826 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16827 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16828 void __iomem *sram_base; 16829 16830 /* Write some dummy words into the SRAM status block 16831 * area, see if it reads back correctly. If the return 16832 * value is bad, force enable the PCIX workaround. 16833 */ 16834 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16835 16836 writel(0x00000000, sram_base); 16837 writel(0x00000000, sram_base + 4); 16838 writel(0xffffffff, sram_base + 4); 16839 if (readl(sram_base) != 0x00000000) 16840 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16841 } 16842 } 16843 16844 udelay(50); 16845 tg3_nvram_init(tp); 16846 16847 /* If the device has an NVRAM, no need to load patch firmware */ 16848 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16849 !tg3_flag(tp, NO_NVRAM)) 16850 tp->fw_needed = NULL; 16851 16852 grc_misc_cfg = tr32(GRC_MISC_CFG); 16853 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16854 16855 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16856 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16857 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16858 tg3_flag_set(tp, IS_5788); 16859 16860 if (!tg3_flag(tp, IS_5788) && 16861 tg3_asic_rev(tp) != ASIC_REV_5700) 16862 tg3_flag_set(tp, TAGGED_STATUS); 16863 if (tg3_flag(tp, TAGGED_STATUS)) { 16864 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16865 HOSTCC_MODE_CLRTICK_TXBD); 16866 16867 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16868 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16869 tp->misc_host_ctrl); 16870 } 16871 16872 /* Preserve the APE MAC_MODE bits */ 16873 if (tg3_flag(tp, ENABLE_APE)) 16874 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16875 else 16876 tp->mac_mode = 0; 16877 16878 if (tg3_10_100_only_device(tp, ent)) 16879 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16880 16881 err = tg3_phy_probe(tp); 16882 if (err) { 16883 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16884 /* ... but do not return immediately ... */ 16885 tg3_mdio_fini(tp); 16886 } 16887 16888 tg3_read_vpd(tp); 16889 tg3_read_fw_ver(tp); 16890 16891 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16892 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16893 } else { 16894 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16895 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16896 else 16897 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16898 } 16899 16900 /* 5700 {AX,BX} chips have a broken status block link 16901 * change bit implementation, so we must use the 16902 * status register in those cases. 16903 */ 16904 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16905 tg3_flag_set(tp, USE_LINKCHG_REG); 16906 else 16907 tg3_flag_clear(tp, USE_LINKCHG_REG); 16908 16909 /* The led_ctrl is set during tg3_phy_probe, here we might 16910 * have to force the link status polling mechanism based 16911 * upon subsystem IDs. 16912 */ 16913 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16914 tg3_asic_rev(tp) == ASIC_REV_5701 && 16915 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16916 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16917 tg3_flag_set(tp, USE_LINKCHG_REG); 16918 } 16919 16920 /* For all SERDES we poll the MAC status register. */ 16921 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16922 tg3_flag_set(tp, POLL_SERDES); 16923 else 16924 tg3_flag_clear(tp, POLL_SERDES); 16925 16926 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16927 tg3_flag_set(tp, POLL_CPMU_LINK); 16928 16929 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16930 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16931 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16932 tg3_flag(tp, PCIX_MODE)) { 16933 tp->rx_offset = NET_SKB_PAD; 16934 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16935 tp->rx_copy_thresh = ~(u16)0; 16936 #endif 16937 } 16938 16939 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 16940 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 16941 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 16942 16943 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 16944 16945 /* Increment the rx prod index on the rx std ring by at most 16946 * 8 for these chips to workaround hw errata. 16947 */ 16948 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16949 tg3_asic_rev(tp) == ASIC_REV_5752 || 16950 tg3_asic_rev(tp) == ASIC_REV_5755) 16951 tp->rx_std_max_post = 8; 16952 16953 if (tg3_flag(tp, ASPM_WORKAROUND)) 16954 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 16955 PCIE_PWR_MGMT_L1_THRESH_MSK; 16956 16957 return err; 16958 } 16959 16960 static int tg3_get_device_address(struct tg3 *tp, u8 *addr) 16961 { 16962 u32 hi, lo, mac_offset; 16963 int addr_ok = 0; 16964 int err; 16965 16966 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) 16967 return 0; 16968 16969 if (tg3_flag(tp, IS_SSB_CORE)) { 16970 err = ssb_gige_get_macaddr(tp->pdev, addr); 16971 if (!err && is_valid_ether_addr(addr)) 16972 return 0; 16973 } 16974 16975 mac_offset = 0x7c; 16976 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16977 tg3_flag(tp, 5780_CLASS)) { 16978 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 16979 mac_offset = 0xcc; 16980 if (tg3_nvram_lock(tp)) 16981 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 16982 else 16983 tg3_nvram_unlock(tp); 16984 } else if (tg3_flag(tp, 5717_PLUS)) { 16985 if (tp->pci_fn & 1) 16986 mac_offset = 0xcc; 16987 if (tp->pci_fn > 1) 16988 mac_offset += 0x18c; 16989 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 16990 mac_offset = 0x10; 16991 16992 /* First try to get it from MAC address mailbox. */ 16993 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 16994 if ((hi >> 16) == 0x484b) { 16995 addr[0] = (hi >> 8) & 0xff; 16996 addr[1] = (hi >> 0) & 0xff; 16997 16998 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 16999 addr[2] = (lo >> 24) & 0xff; 17000 addr[3] = (lo >> 16) & 0xff; 17001 addr[4] = (lo >> 8) & 0xff; 17002 addr[5] = (lo >> 0) & 0xff; 17003 17004 /* Some old bootcode may report a 0 MAC address in SRAM */ 17005 addr_ok = is_valid_ether_addr(addr); 17006 } 17007 if (!addr_ok) { 17008 /* Next, try NVRAM. */ 17009 if (!tg3_flag(tp, NO_NVRAM) && 17010 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 17011 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 17012 memcpy(&addr[0], ((char *)&hi) + 2, 2); 17013 memcpy(&addr[2], (char *)&lo, sizeof(lo)); 17014 } 17015 /* Finally just fetch it out of the MAC control regs. */ 17016 else { 17017 hi = tr32(MAC_ADDR_0_HIGH); 17018 lo = tr32(MAC_ADDR_0_LOW); 17019 17020 addr[5] = lo & 0xff; 17021 addr[4] = (lo >> 8) & 0xff; 17022 addr[3] = (lo >> 16) & 0xff; 17023 addr[2] = (lo >> 24) & 0xff; 17024 addr[1] = hi & 0xff; 17025 addr[0] = (hi >> 8) & 0xff; 17026 } 17027 } 17028 17029 if (!is_valid_ether_addr(addr)) 17030 return -EINVAL; 17031 return 0; 17032 } 17033 17034 #define BOUNDARY_SINGLE_CACHELINE 1 17035 #define BOUNDARY_MULTI_CACHELINE 2 17036 17037 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 17038 { 17039 int cacheline_size; 17040 u8 byte; 17041 int goal; 17042 17043 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 17044 if (byte == 0) 17045 cacheline_size = 1024; 17046 else 17047 cacheline_size = (int) byte * 4; 17048 17049 /* On 5703 and later chips, the boundary bits have no 17050 * effect. 17051 */ 17052 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17053 tg3_asic_rev(tp) != ASIC_REV_5701 && 17054 !tg3_flag(tp, PCI_EXPRESS)) 17055 goto out; 17056 17057 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17058 goal = BOUNDARY_MULTI_CACHELINE; 17059 #else 17060 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17061 goal = BOUNDARY_SINGLE_CACHELINE; 17062 #else 17063 goal = 0; 17064 #endif 17065 #endif 17066 17067 if (tg3_flag(tp, 57765_PLUS)) { 17068 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17069 goto out; 17070 } 17071 17072 if (!goal) 17073 goto out; 17074 17075 /* PCI controllers on most RISC systems tend to disconnect 17076 * when a device tries to burst across a cache-line boundary. 17077 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17078 * 17079 * Unfortunately, for PCI-E there are only limited 17080 * write-side controls for this, and thus for reads 17081 * we will still get the disconnects. We'll also waste 17082 * these PCI cycles for both read and write for chips 17083 * other than 5700 and 5701 which do not implement the 17084 * boundary bits. 17085 */ 17086 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17087 switch (cacheline_size) { 17088 case 16: 17089 case 32: 17090 case 64: 17091 case 128: 17092 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17093 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17094 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17095 } else { 17096 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17097 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17098 } 17099 break; 17100 17101 case 256: 17102 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17103 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17104 break; 17105 17106 default: 17107 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17108 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17109 break; 17110 } 17111 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17112 switch (cacheline_size) { 17113 case 16: 17114 case 32: 17115 case 64: 17116 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17117 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17118 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17119 break; 17120 } 17121 fallthrough; 17122 case 128: 17123 default: 17124 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17125 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17126 break; 17127 } 17128 } else { 17129 switch (cacheline_size) { 17130 case 16: 17131 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17132 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17133 DMA_RWCTRL_WRITE_BNDRY_16); 17134 break; 17135 } 17136 fallthrough; 17137 case 32: 17138 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17139 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17140 DMA_RWCTRL_WRITE_BNDRY_32); 17141 break; 17142 } 17143 fallthrough; 17144 case 64: 17145 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17146 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17147 DMA_RWCTRL_WRITE_BNDRY_64); 17148 break; 17149 } 17150 fallthrough; 17151 case 128: 17152 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17153 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17154 DMA_RWCTRL_WRITE_BNDRY_128); 17155 break; 17156 } 17157 fallthrough; 17158 case 256: 17159 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17160 DMA_RWCTRL_WRITE_BNDRY_256); 17161 break; 17162 case 512: 17163 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17164 DMA_RWCTRL_WRITE_BNDRY_512); 17165 break; 17166 case 1024: 17167 default: 17168 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17169 DMA_RWCTRL_WRITE_BNDRY_1024); 17170 break; 17171 } 17172 } 17173 17174 out: 17175 return val; 17176 } 17177 17178 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17179 int size, bool to_device) 17180 { 17181 struct tg3_internal_buffer_desc test_desc; 17182 u32 sram_dma_descs; 17183 int i, ret; 17184 17185 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17186 17187 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17188 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17189 tw32(RDMAC_STATUS, 0); 17190 tw32(WDMAC_STATUS, 0); 17191 17192 tw32(BUFMGR_MODE, 0); 17193 tw32(FTQ_RESET, 0); 17194 17195 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17196 test_desc.addr_lo = buf_dma & 0xffffffff; 17197 test_desc.nic_mbuf = 0x00002100; 17198 test_desc.len = size; 17199 17200 /* 17201 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17202 * the *second* time the tg3 driver was getting loaded after an 17203 * initial scan. 17204 * 17205 * Broadcom tells me: 17206 * ...the DMA engine is connected to the GRC block and a DMA 17207 * reset may affect the GRC block in some unpredictable way... 17208 * The behavior of resets to individual blocks has not been tested. 17209 * 17210 * Broadcom noted the GRC reset will also reset all sub-components. 17211 */ 17212 if (to_device) { 17213 test_desc.cqid_sqid = (13 << 8) | 2; 17214 17215 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17216 udelay(40); 17217 } else { 17218 test_desc.cqid_sqid = (16 << 8) | 7; 17219 17220 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17221 udelay(40); 17222 } 17223 test_desc.flags = 0x00000005; 17224 17225 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17226 u32 val; 17227 17228 val = *(((u32 *)&test_desc) + i); 17229 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17230 sram_dma_descs + (i * sizeof(u32))); 17231 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17232 } 17233 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17234 17235 if (to_device) 17236 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17237 else 17238 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17239 17240 ret = -ENODEV; 17241 for (i = 0; i < 40; i++) { 17242 u32 val; 17243 17244 if (to_device) 17245 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17246 else 17247 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17248 if ((val & 0xffff) == sram_dma_descs) { 17249 ret = 0; 17250 break; 17251 } 17252 17253 udelay(100); 17254 } 17255 17256 return ret; 17257 } 17258 17259 #define TEST_BUFFER_SIZE 0x2000 17260 17261 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17262 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17263 { }, 17264 }; 17265 17266 static int tg3_test_dma(struct tg3 *tp) 17267 { 17268 dma_addr_t buf_dma; 17269 u32 *buf, saved_dma_rwctrl; 17270 int ret = 0; 17271 17272 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17273 &buf_dma, GFP_KERNEL); 17274 if (!buf) { 17275 ret = -ENOMEM; 17276 goto out_nofree; 17277 } 17278 17279 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17280 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17281 17282 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17283 17284 if (tg3_flag(tp, 57765_PLUS)) 17285 goto out; 17286 17287 if (tg3_flag(tp, PCI_EXPRESS)) { 17288 /* DMA read watermark not used on PCIE */ 17289 tp->dma_rwctrl |= 0x00180000; 17290 } else if (!tg3_flag(tp, PCIX_MODE)) { 17291 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17292 tg3_asic_rev(tp) == ASIC_REV_5750) 17293 tp->dma_rwctrl |= 0x003f0000; 17294 else 17295 tp->dma_rwctrl |= 0x003f000f; 17296 } else { 17297 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17298 tg3_asic_rev(tp) == ASIC_REV_5704) { 17299 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17300 u32 read_water = 0x7; 17301 17302 /* If the 5704 is behind the EPB bridge, we can 17303 * do the less restrictive ONE_DMA workaround for 17304 * better performance. 17305 */ 17306 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17307 tg3_asic_rev(tp) == ASIC_REV_5704) 17308 tp->dma_rwctrl |= 0x8000; 17309 else if (ccval == 0x6 || ccval == 0x7) 17310 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17311 17312 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17313 read_water = 4; 17314 /* Set bit 23 to enable PCIX hw bug fix */ 17315 tp->dma_rwctrl |= 17316 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17317 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17318 (1 << 23); 17319 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17320 /* 5780 always in PCIX mode */ 17321 tp->dma_rwctrl |= 0x00144000; 17322 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17323 /* 5714 always in PCIX mode */ 17324 tp->dma_rwctrl |= 0x00148000; 17325 } else { 17326 tp->dma_rwctrl |= 0x001b000f; 17327 } 17328 } 17329 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17330 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17331 17332 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17333 tg3_asic_rev(tp) == ASIC_REV_5704) 17334 tp->dma_rwctrl &= 0xfffffff0; 17335 17336 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17337 tg3_asic_rev(tp) == ASIC_REV_5701) { 17338 /* Remove this if it causes problems for some boards. */ 17339 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17340 17341 /* On 5700/5701 chips, we need to set this bit. 17342 * Otherwise the chip will issue cacheline transactions 17343 * to streamable DMA memory with not all the byte 17344 * enables turned on. This is an error on several 17345 * RISC PCI controllers, in particular sparc64. 17346 * 17347 * On 5703/5704 chips, this bit has been reassigned 17348 * a different meaning. In particular, it is used 17349 * on those chips to enable a PCI-X workaround. 17350 */ 17351 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17352 } 17353 17354 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17355 17356 17357 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17358 tg3_asic_rev(tp) != ASIC_REV_5701) 17359 goto out; 17360 17361 /* It is best to perform DMA test with maximum write burst size 17362 * to expose the 5700/5701 write DMA bug. 17363 */ 17364 saved_dma_rwctrl = tp->dma_rwctrl; 17365 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17366 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17367 17368 while (1) { 17369 u32 *p = buf, i; 17370 17371 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17372 p[i] = i; 17373 17374 /* Send the buffer to the chip. */ 17375 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17376 if (ret) { 17377 dev_err(&tp->pdev->dev, 17378 "%s: Buffer write failed. err = %d\n", 17379 __func__, ret); 17380 break; 17381 } 17382 17383 /* Now read it back. */ 17384 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17385 if (ret) { 17386 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17387 "err = %d\n", __func__, ret); 17388 break; 17389 } 17390 17391 /* Verify it. */ 17392 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17393 if (p[i] == i) 17394 continue; 17395 17396 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17397 DMA_RWCTRL_WRITE_BNDRY_16) { 17398 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17399 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17400 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17401 break; 17402 } else { 17403 dev_err(&tp->pdev->dev, 17404 "%s: Buffer corrupted on read back! " 17405 "(%d != %d)\n", __func__, p[i], i); 17406 ret = -ENODEV; 17407 goto out; 17408 } 17409 } 17410 17411 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17412 /* Success. */ 17413 ret = 0; 17414 break; 17415 } 17416 } 17417 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17418 DMA_RWCTRL_WRITE_BNDRY_16) { 17419 /* DMA test passed without adjusting DMA boundary, 17420 * now look for chipsets that are known to expose the 17421 * DMA bug without failing the test. 17422 */ 17423 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17424 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17425 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17426 } else { 17427 /* Safe to use the calculated DMA boundary. */ 17428 tp->dma_rwctrl = saved_dma_rwctrl; 17429 } 17430 17431 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17432 } 17433 17434 out: 17435 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17436 out_nofree: 17437 return ret; 17438 } 17439 17440 static void tg3_init_bufmgr_config(struct tg3 *tp) 17441 { 17442 if (tg3_flag(tp, 57765_PLUS)) { 17443 tp->bufmgr_config.mbuf_read_dma_low_water = 17444 DEFAULT_MB_RDMA_LOW_WATER_5705; 17445 tp->bufmgr_config.mbuf_mac_rx_low_water = 17446 DEFAULT_MB_MACRX_LOW_WATER_57765; 17447 tp->bufmgr_config.mbuf_high_water = 17448 DEFAULT_MB_HIGH_WATER_57765; 17449 17450 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17451 DEFAULT_MB_RDMA_LOW_WATER_5705; 17452 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17453 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17454 tp->bufmgr_config.mbuf_high_water_jumbo = 17455 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17456 } else if (tg3_flag(tp, 5705_PLUS)) { 17457 tp->bufmgr_config.mbuf_read_dma_low_water = 17458 DEFAULT_MB_RDMA_LOW_WATER_5705; 17459 tp->bufmgr_config.mbuf_mac_rx_low_water = 17460 DEFAULT_MB_MACRX_LOW_WATER_5705; 17461 tp->bufmgr_config.mbuf_high_water = 17462 DEFAULT_MB_HIGH_WATER_5705; 17463 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17464 tp->bufmgr_config.mbuf_mac_rx_low_water = 17465 DEFAULT_MB_MACRX_LOW_WATER_5906; 17466 tp->bufmgr_config.mbuf_high_water = 17467 DEFAULT_MB_HIGH_WATER_5906; 17468 } 17469 17470 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17471 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17472 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17473 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17474 tp->bufmgr_config.mbuf_high_water_jumbo = 17475 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17476 } else { 17477 tp->bufmgr_config.mbuf_read_dma_low_water = 17478 DEFAULT_MB_RDMA_LOW_WATER; 17479 tp->bufmgr_config.mbuf_mac_rx_low_water = 17480 DEFAULT_MB_MACRX_LOW_WATER; 17481 tp->bufmgr_config.mbuf_high_water = 17482 DEFAULT_MB_HIGH_WATER; 17483 17484 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17485 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17486 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17487 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17488 tp->bufmgr_config.mbuf_high_water_jumbo = 17489 DEFAULT_MB_HIGH_WATER_JUMBO; 17490 } 17491 17492 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17493 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17494 } 17495 17496 static char *tg3_phy_string(struct tg3 *tp) 17497 { 17498 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17499 case TG3_PHY_ID_BCM5400: return "5400"; 17500 case TG3_PHY_ID_BCM5401: return "5401"; 17501 case TG3_PHY_ID_BCM5411: return "5411"; 17502 case TG3_PHY_ID_BCM5701: return "5701"; 17503 case TG3_PHY_ID_BCM5703: return "5703"; 17504 case TG3_PHY_ID_BCM5704: return "5704"; 17505 case TG3_PHY_ID_BCM5705: return "5705"; 17506 case TG3_PHY_ID_BCM5750: return "5750"; 17507 case TG3_PHY_ID_BCM5752: return "5752"; 17508 case TG3_PHY_ID_BCM5714: return "5714"; 17509 case TG3_PHY_ID_BCM5780: return "5780"; 17510 case TG3_PHY_ID_BCM5755: return "5755"; 17511 case TG3_PHY_ID_BCM5787: return "5787"; 17512 case TG3_PHY_ID_BCM5784: return "5784"; 17513 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17514 case TG3_PHY_ID_BCM5906: return "5906"; 17515 case TG3_PHY_ID_BCM5761: return "5761"; 17516 case TG3_PHY_ID_BCM5718C: return "5718C"; 17517 case TG3_PHY_ID_BCM5718S: return "5718S"; 17518 case TG3_PHY_ID_BCM57765: return "57765"; 17519 case TG3_PHY_ID_BCM5719C: return "5719C"; 17520 case TG3_PHY_ID_BCM5720C: return "5720C"; 17521 case TG3_PHY_ID_BCM5762: return "5762C"; 17522 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17523 case 0: return "serdes"; 17524 default: return "unknown"; 17525 } 17526 } 17527 17528 static char *tg3_bus_string(struct tg3 *tp, char *str) 17529 { 17530 if (tg3_flag(tp, PCI_EXPRESS)) { 17531 strcpy(str, "PCI Express"); 17532 return str; 17533 } else if (tg3_flag(tp, PCIX_MODE)) { 17534 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17535 17536 strcpy(str, "PCIX:"); 17537 17538 if ((clock_ctrl == 7) || 17539 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17540 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17541 strcat(str, "133MHz"); 17542 else if (clock_ctrl == 0) 17543 strcat(str, "33MHz"); 17544 else if (clock_ctrl == 2) 17545 strcat(str, "50MHz"); 17546 else if (clock_ctrl == 4) 17547 strcat(str, "66MHz"); 17548 else if (clock_ctrl == 6) 17549 strcat(str, "100MHz"); 17550 } else { 17551 strcpy(str, "PCI:"); 17552 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17553 strcat(str, "66MHz"); 17554 else 17555 strcat(str, "33MHz"); 17556 } 17557 if (tg3_flag(tp, PCI_32BIT)) 17558 strcat(str, ":32-bit"); 17559 else 17560 strcat(str, ":64-bit"); 17561 return str; 17562 } 17563 17564 static void tg3_init_coal(struct tg3 *tp) 17565 { 17566 struct ethtool_coalesce *ec = &tp->coal; 17567 17568 memset(ec, 0, sizeof(*ec)); 17569 ec->cmd = ETHTOOL_GCOALESCE; 17570 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17571 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17572 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17573 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17574 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17575 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17576 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17577 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17578 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17579 17580 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17581 HOSTCC_MODE_CLRTICK_TXBD)) { 17582 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17583 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17584 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17585 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17586 } 17587 17588 if (tg3_flag(tp, 5705_PLUS)) { 17589 ec->rx_coalesce_usecs_irq = 0; 17590 ec->tx_coalesce_usecs_irq = 0; 17591 ec->stats_block_coalesce_usecs = 0; 17592 } 17593 } 17594 17595 static int tg3_init_one(struct pci_dev *pdev, 17596 const struct pci_device_id *ent) 17597 { 17598 struct net_device *dev; 17599 struct tg3 *tp; 17600 int i, err; 17601 u32 sndmbx, rcvmbx, intmbx; 17602 char str[40]; 17603 u64 dma_mask, persist_dma_mask; 17604 netdev_features_t features = 0; 17605 u8 addr[ETH_ALEN] __aligned(2); 17606 17607 err = pci_enable_device(pdev); 17608 if (err) { 17609 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17610 return err; 17611 } 17612 17613 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17614 if (err) { 17615 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17616 goto err_out_disable_pdev; 17617 } 17618 17619 pci_set_master(pdev); 17620 17621 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17622 if (!dev) { 17623 err = -ENOMEM; 17624 goto err_out_free_res; 17625 } 17626 17627 SET_NETDEV_DEV(dev, &pdev->dev); 17628 17629 tp = netdev_priv(dev); 17630 tp->pdev = pdev; 17631 tp->dev = dev; 17632 tp->rx_mode = TG3_DEF_RX_MODE; 17633 tp->tx_mode = TG3_DEF_TX_MODE; 17634 tp->irq_sync = 1; 17635 tp->pcierr_recovery = false; 17636 17637 if (tg3_debug > 0) 17638 tp->msg_enable = tg3_debug; 17639 else 17640 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17641 17642 if (pdev_is_ssb_gige_core(pdev)) { 17643 tg3_flag_set(tp, IS_SSB_CORE); 17644 if (ssb_gige_must_flush_posted_writes(pdev)) 17645 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17646 if (ssb_gige_one_dma_at_once(pdev)) 17647 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17648 if (ssb_gige_have_roboswitch(pdev)) { 17649 tg3_flag_set(tp, USE_PHYLIB); 17650 tg3_flag_set(tp, ROBOSWITCH); 17651 } 17652 if (ssb_gige_is_rgmii(pdev)) 17653 tg3_flag_set(tp, RGMII_MODE); 17654 } 17655 17656 /* The word/byte swap controls here control register access byte 17657 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17658 * setting below. 17659 */ 17660 tp->misc_host_ctrl = 17661 MISC_HOST_CTRL_MASK_PCI_INT | 17662 MISC_HOST_CTRL_WORD_SWAP | 17663 MISC_HOST_CTRL_INDIR_ACCESS | 17664 MISC_HOST_CTRL_PCISTATE_RW; 17665 17666 /* The NONFRM (non-frame) byte/word swap controls take effect 17667 * on descriptor entries, anything which isn't packet data. 17668 * 17669 * The StrongARM chips on the board (one for tx, one for rx) 17670 * are running in big-endian mode. 17671 */ 17672 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17673 GRC_MODE_WSWAP_NONFRM_DATA); 17674 #ifdef __BIG_ENDIAN 17675 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17676 #endif 17677 spin_lock_init(&tp->lock); 17678 spin_lock_init(&tp->indirect_lock); 17679 INIT_WORK(&tp->reset_task, tg3_reset_task); 17680 17681 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17682 if (!tp->regs) { 17683 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17684 err = -ENOMEM; 17685 goto err_out_free_dev; 17686 } 17687 17688 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17689 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17690 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17691 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17692 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17693 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17694 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17695 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17696 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17697 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17698 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17699 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17700 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17701 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17702 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17703 tg3_flag_set(tp, ENABLE_APE); 17704 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17705 if (!tp->aperegs) { 17706 dev_err(&pdev->dev, 17707 "Cannot map APE registers, aborting\n"); 17708 err = -ENOMEM; 17709 goto err_out_iounmap; 17710 } 17711 } 17712 17713 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17714 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17715 17716 dev->ethtool_ops = &tg3_ethtool_ops; 17717 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17718 dev->netdev_ops = &tg3_netdev_ops; 17719 dev->irq = pdev->irq; 17720 17721 err = tg3_get_invariants(tp, ent); 17722 if (err) { 17723 dev_err(&pdev->dev, 17724 "Problem fetching invariants of chip, aborting\n"); 17725 goto err_out_apeunmap; 17726 } 17727 17728 /* The EPB bridge inside 5714, 5715, and 5780 and any 17729 * device behind the EPB cannot support DMA addresses > 40-bit. 17730 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17731 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17732 * do DMA address check in tg3_start_xmit(). 17733 */ 17734 if (tg3_flag(tp, IS_5788)) 17735 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17736 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17737 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17738 #ifdef CONFIG_HIGHMEM 17739 dma_mask = DMA_BIT_MASK(64); 17740 #endif 17741 } else 17742 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17743 17744 /* Configure DMA attributes. */ 17745 if (dma_mask > DMA_BIT_MASK(32)) { 17746 err = dma_set_mask(&pdev->dev, dma_mask); 17747 if (!err) { 17748 features |= NETIF_F_HIGHDMA; 17749 err = dma_set_coherent_mask(&pdev->dev, 17750 persist_dma_mask); 17751 if (err < 0) { 17752 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17753 "DMA for consistent allocations\n"); 17754 goto err_out_apeunmap; 17755 } 17756 } 17757 } 17758 if (err || dma_mask == DMA_BIT_MASK(32)) { 17759 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 17760 if (err) { 17761 dev_err(&pdev->dev, 17762 "No usable DMA configuration, aborting\n"); 17763 goto err_out_apeunmap; 17764 } 17765 } 17766 17767 tg3_init_bufmgr_config(tp); 17768 17769 /* 5700 B0 chips do not support checksumming correctly due 17770 * to hardware bugs. 17771 */ 17772 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17773 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17774 17775 if (tg3_flag(tp, 5755_PLUS)) 17776 features |= NETIF_F_IPV6_CSUM; 17777 } 17778 17779 /* TSO is on by default on chips that support hardware TSO. 17780 * Firmware TSO on older chips gives lower performance, so it 17781 * is off by default, but can be enabled using ethtool. 17782 */ 17783 if ((tg3_flag(tp, HW_TSO_1) || 17784 tg3_flag(tp, HW_TSO_2) || 17785 tg3_flag(tp, HW_TSO_3)) && 17786 (features & NETIF_F_IP_CSUM)) 17787 features |= NETIF_F_TSO; 17788 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17789 if (features & NETIF_F_IPV6_CSUM) 17790 features |= NETIF_F_TSO6; 17791 if (tg3_flag(tp, HW_TSO_3) || 17792 tg3_asic_rev(tp) == ASIC_REV_5761 || 17793 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17794 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17795 tg3_asic_rev(tp) == ASIC_REV_5785 || 17796 tg3_asic_rev(tp) == ASIC_REV_57780) 17797 features |= NETIF_F_TSO_ECN; 17798 } 17799 17800 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17801 NETIF_F_HW_VLAN_CTAG_RX; 17802 dev->vlan_features |= features; 17803 17804 /* 17805 * Add loopback capability only for a subset of devices that support 17806 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17807 * loopback for the remaining devices. 17808 */ 17809 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17810 !tg3_flag(tp, CPMU_PRESENT)) 17811 /* Add the loopback capability */ 17812 features |= NETIF_F_LOOPBACK; 17813 17814 dev->hw_features |= features; 17815 dev->priv_flags |= IFF_UNICAST_FLT; 17816 17817 /* MTU range: 60 - 9000 or 1500, depending on hardware */ 17818 dev->min_mtu = TG3_MIN_MTU; 17819 dev->max_mtu = TG3_MAX_MTU(tp); 17820 17821 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17822 !tg3_flag(tp, TSO_CAPABLE) && 17823 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17824 tg3_flag_set(tp, MAX_RXPEND_64); 17825 tp->rx_pending = 63; 17826 } 17827 17828 err = tg3_get_device_address(tp, addr); 17829 if (err) { 17830 dev_err(&pdev->dev, 17831 "Could not obtain valid ethernet address, aborting\n"); 17832 goto err_out_apeunmap; 17833 } 17834 eth_hw_addr_set(dev, addr); 17835 17836 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17837 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17838 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17839 for (i = 0; i < tp->irq_max; i++) { 17840 struct tg3_napi *tnapi = &tp->napi[i]; 17841 17842 tnapi->tp = tp; 17843 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17844 17845 tnapi->int_mbox = intmbx; 17846 intmbx += 0x8; 17847 17848 tnapi->consmbox = rcvmbx; 17849 tnapi->prodmbox = sndmbx; 17850 17851 if (i) 17852 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17853 else 17854 tnapi->coal_now = HOSTCC_MODE_NOW; 17855 17856 if (!tg3_flag(tp, SUPPORT_MSIX)) 17857 break; 17858 17859 /* 17860 * If we support MSIX, we'll be using RSS. If we're using 17861 * RSS, the first vector only handles link interrupts and the 17862 * remaining vectors handle rx and tx interrupts. Reuse the 17863 * mailbox values for the next iteration. The values we setup 17864 * above are still useful for the single vectored mode. 17865 */ 17866 if (!i) 17867 continue; 17868 17869 rcvmbx += 0x8; 17870 17871 if (sndmbx & 0x4) 17872 sndmbx -= 0x4; 17873 else 17874 sndmbx += 0xc; 17875 } 17876 17877 /* 17878 * Reset chip in case UNDI or EFI driver did not shutdown 17879 * DMA self test will enable WDMAC and we'll see (spurious) 17880 * pending DMA on the PCI bus at that point. 17881 */ 17882 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17883 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17884 tg3_full_lock(tp, 0); 17885 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17886 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17887 tg3_full_unlock(tp); 17888 } 17889 17890 err = tg3_test_dma(tp); 17891 if (err) { 17892 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17893 goto err_out_apeunmap; 17894 } 17895 17896 tg3_init_coal(tp); 17897 17898 pci_set_drvdata(pdev, dev); 17899 17900 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17901 tg3_asic_rev(tp) == ASIC_REV_5720 || 17902 tg3_asic_rev(tp) == ASIC_REV_5762) 17903 tg3_flag_set(tp, PTP_CAPABLE); 17904 17905 tg3_timer_init(tp); 17906 17907 tg3_carrier_off(tp); 17908 17909 err = register_netdev(dev); 17910 if (err) { 17911 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17912 goto err_out_apeunmap; 17913 } 17914 17915 if (tg3_flag(tp, PTP_CAPABLE)) { 17916 tg3_ptp_init(tp); 17917 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17918 &tp->pdev->dev); 17919 if (IS_ERR(tp->ptp_clock)) 17920 tp->ptp_clock = NULL; 17921 } 17922 17923 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17924 tp->board_part_number, 17925 tg3_chip_rev_id(tp), 17926 tg3_bus_string(tp, str), 17927 dev->dev_addr); 17928 17929 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 17930 char *ethtype; 17931 17932 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17933 ethtype = "10/100Base-TX"; 17934 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17935 ethtype = "1000Base-SX"; 17936 else 17937 ethtype = "10/100/1000Base-T"; 17938 17939 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 17940 "(WireSpeed[%d], EEE[%d])\n", 17941 tg3_phy_string(tp), ethtype, 17942 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 17943 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 17944 } 17945 17946 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 17947 (dev->features & NETIF_F_RXCSUM) != 0, 17948 tg3_flag(tp, USE_LINKCHG_REG) != 0, 17949 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 17950 tg3_flag(tp, ENABLE_ASF) != 0, 17951 tg3_flag(tp, TSO_CAPABLE) != 0); 17952 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 17953 tp->dma_rwctrl, 17954 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 17955 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 17956 17957 pci_save_state(pdev); 17958 17959 return 0; 17960 17961 err_out_apeunmap: 17962 if (tp->aperegs) { 17963 iounmap(tp->aperegs); 17964 tp->aperegs = NULL; 17965 } 17966 17967 err_out_iounmap: 17968 if (tp->regs) { 17969 iounmap(tp->regs); 17970 tp->regs = NULL; 17971 } 17972 17973 err_out_free_dev: 17974 free_netdev(dev); 17975 17976 err_out_free_res: 17977 pci_release_regions(pdev); 17978 17979 err_out_disable_pdev: 17980 if (pci_is_enabled(pdev)) 17981 pci_disable_device(pdev); 17982 return err; 17983 } 17984 17985 static void tg3_remove_one(struct pci_dev *pdev) 17986 { 17987 struct net_device *dev = pci_get_drvdata(pdev); 17988 17989 if (dev) { 17990 struct tg3 *tp = netdev_priv(dev); 17991 17992 tg3_ptp_fini(tp); 17993 17994 release_firmware(tp->fw); 17995 17996 tg3_reset_task_cancel(tp); 17997 17998 if (tg3_flag(tp, USE_PHYLIB)) { 17999 tg3_phy_fini(tp); 18000 tg3_mdio_fini(tp); 18001 } 18002 18003 unregister_netdev(dev); 18004 if (tp->aperegs) { 18005 iounmap(tp->aperegs); 18006 tp->aperegs = NULL; 18007 } 18008 if (tp->regs) { 18009 iounmap(tp->regs); 18010 tp->regs = NULL; 18011 } 18012 free_netdev(dev); 18013 pci_release_regions(pdev); 18014 pci_disable_device(pdev); 18015 } 18016 } 18017 18018 #ifdef CONFIG_PM_SLEEP 18019 static int tg3_suspend(struct device *device) 18020 { 18021 struct net_device *dev = dev_get_drvdata(device); 18022 struct tg3 *tp = netdev_priv(dev); 18023 int err = 0; 18024 18025 rtnl_lock(); 18026 18027 if (!netif_running(dev)) 18028 goto unlock; 18029 18030 tg3_reset_task_cancel(tp); 18031 tg3_phy_stop(tp); 18032 tg3_netif_stop(tp); 18033 18034 tg3_timer_stop(tp); 18035 18036 tg3_full_lock(tp, 1); 18037 tg3_disable_ints(tp); 18038 tg3_full_unlock(tp); 18039 18040 netif_device_detach(dev); 18041 18042 tg3_full_lock(tp, 0); 18043 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 18044 tg3_flag_clear(tp, INIT_COMPLETE); 18045 tg3_full_unlock(tp); 18046 18047 err = tg3_power_down_prepare(tp); 18048 if (err) { 18049 int err2; 18050 18051 tg3_full_lock(tp, 0); 18052 18053 tg3_flag_set(tp, INIT_COMPLETE); 18054 err2 = tg3_restart_hw(tp, true); 18055 if (err2) 18056 goto out; 18057 18058 tg3_timer_start(tp); 18059 18060 netif_device_attach(dev); 18061 tg3_netif_start(tp); 18062 18063 out: 18064 tg3_full_unlock(tp); 18065 18066 if (!err2) 18067 tg3_phy_start(tp); 18068 } 18069 18070 unlock: 18071 rtnl_unlock(); 18072 return err; 18073 } 18074 18075 static int tg3_resume(struct device *device) 18076 { 18077 struct net_device *dev = dev_get_drvdata(device); 18078 struct tg3 *tp = netdev_priv(dev); 18079 int err = 0; 18080 18081 rtnl_lock(); 18082 18083 if (!netif_running(dev)) 18084 goto unlock; 18085 18086 netif_device_attach(dev); 18087 18088 tg3_full_lock(tp, 0); 18089 18090 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18091 18092 tg3_flag_set(tp, INIT_COMPLETE); 18093 err = tg3_restart_hw(tp, 18094 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18095 if (err) 18096 goto out; 18097 18098 tg3_timer_start(tp); 18099 18100 tg3_netif_start(tp); 18101 18102 out: 18103 tg3_full_unlock(tp); 18104 18105 if (!err) 18106 tg3_phy_start(tp); 18107 18108 unlock: 18109 rtnl_unlock(); 18110 return err; 18111 } 18112 #endif /* CONFIG_PM_SLEEP */ 18113 18114 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18115 18116 static void tg3_shutdown(struct pci_dev *pdev) 18117 { 18118 struct net_device *dev = pci_get_drvdata(pdev); 18119 struct tg3 *tp = netdev_priv(dev); 18120 18121 tg3_reset_task_cancel(tp); 18122 18123 rtnl_lock(); 18124 18125 netif_device_detach(dev); 18126 18127 if (netif_running(dev)) 18128 dev_close(dev); 18129 18130 tg3_power_down(tp); 18131 18132 rtnl_unlock(); 18133 18134 pci_disable_device(pdev); 18135 } 18136 18137 /** 18138 * tg3_io_error_detected - called when PCI error is detected 18139 * @pdev: Pointer to PCI device 18140 * @state: The current pci connection state 18141 * 18142 * This function is called after a PCI bus error affecting 18143 * this device has been detected. 18144 */ 18145 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18146 pci_channel_state_t state) 18147 { 18148 struct net_device *netdev = pci_get_drvdata(pdev); 18149 struct tg3 *tp = netdev_priv(netdev); 18150 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18151 18152 netdev_info(netdev, "PCI I/O error detected\n"); 18153 18154 /* Want to make sure that the reset task doesn't run */ 18155 tg3_reset_task_cancel(tp); 18156 18157 rtnl_lock(); 18158 18159 /* Could be second call or maybe we don't have netdev yet */ 18160 if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) 18161 goto done; 18162 18163 /* We needn't recover from permanent error */ 18164 if (state == pci_channel_io_frozen) 18165 tp->pcierr_recovery = true; 18166 18167 tg3_phy_stop(tp); 18168 18169 tg3_netif_stop(tp); 18170 18171 tg3_timer_stop(tp); 18172 18173 netif_device_detach(netdev); 18174 18175 /* Clean up software state, even if MMIO is blocked */ 18176 tg3_full_lock(tp, 0); 18177 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18178 tg3_full_unlock(tp); 18179 18180 done: 18181 if (state == pci_channel_io_perm_failure) { 18182 if (netdev) { 18183 tg3_napi_enable(tp); 18184 dev_close(netdev); 18185 } 18186 err = PCI_ERS_RESULT_DISCONNECT; 18187 } else { 18188 pci_disable_device(pdev); 18189 } 18190 18191 rtnl_unlock(); 18192 18193 return err; 18194 } 18195 18196 /** 18197 * tg3_io_slot_reset - called after the pci bus has been reset. 18198 * @pdev: Pointer to PCI device 18199 * 18200 * Restart the card from scratch, as if from a cold-boot. 18201 * At this point, the card has exprienced a hard reset, 18202 * followed by fixups by BIOS, and has its config space 18203 * set up identically to what it was at cold boot. 18204 */ 18205 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18206 { 18207 struct net_device *netdev = pci_get_drvdata(pdev); 18208 struct tg3 *tp = netdev_priv(netdev); 18209 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18210 int err; 18211 18212 rtnl_lock(); 18213 18214 if (pci_enable_device(pdev)) { 18215 dev_err(&pdev->dev, 18216 "Cannot re-enable PCI device after reset.\n"); 18217 goto done; 18218 } 18219 18220 pci_set_master(pdev); 18221 pci_restore_state(pdev); 18222 pci_save_state(pdev); 18223 18224 if (!netdev || !netif_running(netdev)) { 18225 rc = PCI_ERS_RESULT_RECOVERED; 18226 goto done; 18227 } 18228 18229 err = tg3_power_up(tp); 18230 if (err) 18231 goto done; 18232 18233 rc = PCI_ERS_RESULT_RECOVERED; 18234 18235 done: 18236 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18237 tg3_napi_enable(tp); 18238 dev_close(netdev); 18239 } 18240 rtnl_unlock(); 18241 18242 return rc; 18243 } 18244 18245 /** 18246 * tg3_io_resume - called when traffic can start flowing again. 18247 * @pdev: Pointer to PCI device 18248 * 18249 * This callback is called when the error recovery driver tells 18250 * us that its OK to resume normal operation. 18251 */ 18252 static void tg3_io_resume(struct pci_dev *pdev) 18253 { 18254 struct net_device *netdev = pci_get_drvdata(pdev); 18255 struct tg3 *tp = netdev_priv(netdev); 18256 int err; 18257 18258 rtnl_lock(); 18259 18260 if (!netdev || !netif_running(netdev)) 18261 goto done; 18262 18263 tg3_full_lock(tp, 0); 18264 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18265 tg3_flag_set(tp, INIT_COMPLETE); 18266 err = tg3_restart_hw(tp, true); 18267 if (err) { 18268 tg3_full_unlock(tp); 18269 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18270 goto done; 18271 } 18272 18273 netif_device_attach(netdev); 18274 18275 tg3_timer_start(tp); 18276 18277 tg3_netif_start(tp); 18278 18279 tg3_full_unlock(tp); 18280 18281 tg3_phy_start(tp); 18282 18283 done: 18284 tp->pcierr_recovery = false; 18285 rtnl_unlock(); 18286 } 18287 18288 static const struct pci_error_handlers tg3_err_handler = { 18289 .error_detected = tg3_io_error_detected, 18290 .slot_reset = tg3_io_slot_reset, 18291 .resume = tg3_io_resume 18292 }; 18293 18294 static struct pci_driver tg3_driver = { 18295 .name = DRV_MODULE_NAME, 18296 .id_table = tg3_pci_tbl, 18297 .probe = tg3_init_one, 18298 .remove = tg3_remove_one, 18299 .err_handler = &tg3_err_handler, 18300 .driver.pm = &tg3_pm_ops, 18301 .shutdown = tg3_shutdown, 18302 }; 18303 18304 module_pci_driver(tg3_driver); 18305