1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2016 Broadcom Corporation. 8 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 10 * refers to Broadcom Inc. and/or its subsidiaries. 11 * 12 * Firmware is: 13 * Derived from proprietary unpublished source code, 14 * Copyright (C) 2000-2016 Broadcom Corporation. 15 * Copyright (C) 2016-2017 Broadcom Ltd. 16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 17 * refers to Broadcom Inc. and/or its subsidiaries. 18 * 19 * Permission is hereby granted for the distribution of this firmware 20 * data in hexadecimal or equivalent format, provided this copyright 21 * notice is accompanying it. 22 */ 23 24 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/stringify.h> 28 #include <linux/kernel.h> 29 #include <linux/sched/signal.h> 30 #include <linux/types.h> 31 #include <linux/compiler.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/in.h> 35 #include <linux/interrupt.h> 36 #include <linux/ioport.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/ethtool.h> 42 #include <linux/mdio.h> 43 #include <linux/mii.h> 44 #include <linux/phy.h> 45 #include <linux/brcmphy.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/ip.h> 49 #include <linux/tcp.h> 50 #include <linux/workqueue.h> 51 #include <linux/prefetch.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/firmware.h> 54 #include <linux/ssb/ssb_driver_gige.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <linux/crc32poly.h> 58 59 #include <net/checksum.h> 60 #include <net/ip.h> 61 62 #include <linux/io.h> 63 #include <asm/byteorder.h> 64 #include <linux/uaccess.h> 65 66 #include <uapi/linux/net_tstamp.h> 67 #include <linux/ptp_clock_kernel.h> 68 69 #define BAR_0 0 70 #define BAR_2 2 71 72 #include "tg3.h" 73 74 /* Functions & macros to verify TG3_FLAGS types */ 75 76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 77 { 78 return test_bit(flag, bits); 79 } 80 81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 82 { 83 set_bit(flag, bits); 84 } 85 86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 87 { 88 clear_bit(flag, bits); 89 } 90 91 #define tg3_flag(tp, flag) \ 92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 93 #define tg3_flag_set(tp, flag) \ 94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 95 #define tg3_flag_clear(tp, flag) \ 96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 97 98 #define DRV_MODULE_NAME "tg3" 99 /* DO NOT UPDATE TG3_*_NUM defines */ 100 #define TG3_MAJ_NUM 3 101 #define TG3_MIN_NUM 137 102 103 #define RESET_KIND_SHUTDOWN 0 104 #define RESET_KIND_INIT 1 105 #define RESET_KIND_SUSPEND 2 106 107 #define TG3_DEF_RX_MODE 0 108 #define TG3_DEF_TX_MODE 0 109 #define TG3_DEF_MSG_ENABLE \ 110 (NETIF_MSG_DRV | \ 111 NETIF_MSG_PROBE | \ 112 NETIF_MSG_LINK | \ 113 NETIF_MSG_TIMER | \ 114 NETIF_MSG_IFDOWN | \ 115 NETIF_MSG_IFUP | \ 116 NETIF_MSG_RX_ERR | \ 117 NETIF_MSG_TX_ERR) 118 119 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 120 121 /* length of time before we decide the hardware is borked, 122 * and dev->tx_timeout() should be called to fix the problem 123 */ 124 125 #define TG3_TX_TIMEOUT (5 * HZ) 126 127 /* hardware minimum and maximum for a single frame's data payload */ 128 #define TG3_MIN_MTU ETH_ZLEN 129 #define TG3_MAX_MTU(tp) \ 130 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 131 132 /* These numbers seem to be hard coded in the NIC firmware somehow. 133 * You can't change the ring sizes, but you can change where you place 134 * them in the NIC onboard memory. 135 */ 136 #define TG3_RX_STD_RING_SIZE(tp) \ 137 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 138 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 139 #define TG3_DEF_RX_RING_PENDING 200 140 #define TG3_RX_JMB_RING_SIZE(tp) \ 141 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 142 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 143 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 144 145 /* Do not place this n-ring entries value into the tp struct itself, 146 * we really want to expose these constants to GCC so that modulo et 147 * al. operations are done with shifts and masks instead of with 148 * hw multiply/modulo instructions. Another solution would be to 149 * replace things like '% foo' with '& (foo - 1)'. 150 */ 151 152 #define TG3_TX_RING_SIZE 512 153 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 154 155 #define TG3_RX_STD_RING_BYTES(tp) \ 156 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 157 #define TG3_RX_JMB_RING_BYTES(tp) \ 158 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 159 #define TG3_RX_RCB_RING_BYTES(tp) \ 160 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 161 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 162 TG3_TX_RING_SIZE) 163 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 164 165 #define TG3_DMA_BYTE_ENAB 64 166 167 #define TG3_RX_STD_DMA_SZ 1536 168 #define TG3_RX_JMB_DMA_SZ 9046 169 170 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 171 172 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 173 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 174 175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 176 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 177 178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 179 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 180 181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 182 * that are at least dword aligned when used in PCIX mode. The driver 183 * works around this bug by double copying the packet. This workaround 184 * is built into the normal double copy length check for efficiency. 185 * 186 * However, the double copy is only necessary on those architectures 187 * where unaligned memory accesses are inefficient. For those architectures 188 * where unaligned memory accesses incur little penalty, we can reintegrate 189 * the 5701 in the normal rx path. Doing so saves a device structure 190 * dereference by hardcoding the double copy threshold in place. 191 */ 192 #define TG3_RX_COPY_THRESHOLD 256 193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 194 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 195 #else 196 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 197 #endif 198 199 #if (NET_IP_ALIGN != 0) 200 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 201 #else 202 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 203 #endif 204 205 /* minimum number of free TX descriptors required to wake up TX process */ 206 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 207 #define TG3_TX_BD_DMA_MAX_2K 2048 208 #define TG3_TX_BD_DMA_MAX_4K 4096 209 210 #define TG3_RAW_IP_ALIGN 2 211 212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 214 215 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 216 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 217 218 #define FIRMWARE_TG3 "tigon/tg3.bin" 219 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 220 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 221 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 222 223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 225 MODULE_LICENSE("GPL"); 226 MODULE_FIRMWARE(FIRMWARE_TG3); 227 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 229 230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 231 module_param(tg3_debug, int, 0); 232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 233 234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 236 237 static const struct pci_device_id tg3_pci_tbl[] = { 238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 258 TG3_DRV_DATA_FLAG_5705_10_100}, 259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 261 TG3_DRV_DATA_FLAG_5705_10_100}, 262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 265 TG3_DRV_DATA_FLAG_5705_10_100}, 266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 287 PCI_VENDOR_ID_LENOVO, 288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 353 {} 354 }; 355 356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 357 358 static const struct { 359 const char string[ETH_GSTRING_LEN]; 360 } ethtool_stats_keys[] = { 361 { "rx_octets" }, 362 { "rx_fragments" }, 363 { "rx_ucast_packets" }, 364 { "rx_mcast_packets" }, 365 { "rx_bcast_packets" }, 366 { "rx_fcs_errors" }, 367 { "rx_align_errors" }, 368 { "rx_xon_pause_rcvd" }, 369 { "rx_xoff_pause_rcvd" }, 370 { "rx_mac_ctrl_rcvd" }, 371 { "rx_xoff_entered" }, 372 { "rx_frame_too_long_errors" }, 373 { "rx_jabbers" }, 374 { "rx_undersize_packets" }, 375 { "rx_in_length_errors" }, 376 { "rx_out_length_errors" }, 377 { "rx_64_or_less_octet_packets" }, 378 { "rx_65_to_127_octet_packets" }, 379 { "rx_128_to_255_octet_packets" }, 380 { "rx_256_to_511_octet_packets" }, 381 { "rx_512_to_1023_octet_packets" }, 382 { "rx_1024_to_1522_octet_packets" }, 383 { "rx_1523_to_2047_octet_packets" }, 384 { "rx_2048_to_4095_octet_packets" }, 385 { "rx_4096_to_8191_octet_packets" }, 386 { "rx_8192_to_9022_octet_packets" }, 387 388 { "tx_octets" }, 389 { "tx_collisions" }, 390 391 { "tx_xon_sent" }, 392 { "tx_xoff_sent" }, 393 { "tx_flow_control" }, 394 { "tx_mac_errors" }, 395 { "tx_single_collisions" }, 396 { "tx_mult_collisions" }, 397 { "tx_deferred" }, 398 { "tx_excessive_collisions" }, 399 { "tx_late_collisions" }, 400 { "tx_collide_2times" }, 401 { "tx_collide_3times" }, 402 { "tx_collide_4times" }, 403 { "tx_collide_5times" }, 404 { "tx_collide_6times" }, 405 { "tx_collide_7times" }, 406 { "tx_collide_8times" }, 407 { "tx_collide_9times" }, 408 { "tx_collide_10times" }, 409 { "tx_collide_11times" }, 410 { "tx_collide_12times" }, 411 { "tx_collide_13times" }, 412 { "tx_collide_14times" }, 413 { "tx_collide_15times" }, 414 { "tx_ucast_packets" }, 415 { "tx_mcast_packets" }, 416 { "tx_bcast_packets" }, 417 { "tx_carrier_sense_errors" }, 418 { "tx_discards" }, 419 { "tx_errors" }, 420 421 { "dma_writeq_full" }, 422 { "dma_write_prioq_full" }, 423 { "rxbds_empty" }, 424 { "rx_discards" }, 425 { "rx_errors" }, 426 { "rx_threshold_hit" }, 427 428 { "dma_readq_full" }, 429 { "dma_read_prioq_full" }, 430 { "tx_comp_queue_full" }, 431 432 { "ring_set_send_prod_index" }, 433 { "ring_status_update" }, 434 { "nic_irqs" }, 435 { "nic_avoided_irqs" }, 436 { "nic_tx_threshold_hit" }, 437 438 { "mbuf_lwm_thresh_hit" }, 439 }; 440 441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 442 #define TG3_NVRAM_TEST 0 443 #define TG3_LINK_TEST 1 444 #define TG3_REGISTER_TEST 2 445 #define TG3_MEMORY_TEST 3 446 #define TG3_MAC_LOOPB_TEST 4 447 #define TG3_PHY_LOOPB_TEST 5 448 #define TG3_EXT_LOOPB_TEST 6 449 #define TG3_INTERRUPT_TEST 7 450 451 452 static const struct { 453 const char string[ETH_GSTRING_LEN]; 454 } ethtool_test_keys[] = { 455 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 456 [TG3_LINK_TEST] = { "link test (online) " }, 457 [TG3_REGISTER_TEST] = { "register test (offline)" }, 458 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 459 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 460 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 461 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 462 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 463 }; 464 465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 466 467 468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 469 { 470 writel(val, tp->regs + off); 471 } 472 473 static u32 tg3_read32(struct tg3 *tp, u32 off) 474 { 475 return readl(tp->regs + off); 476 } 477 478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 479 { 480 writel(val, tp->aperegs + off); 481 } 482 483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 484 { 485 return readl(tp->aperegs + off); 486 } 487 488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 489 { 490 unsigned long flags; 491 492 spin_lock_irqsave(&tp->indirect_lock, flags); 493 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 494 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 495 spin_unlock_irqrestore(&tp->indirect_lock, flags); 496 } 497 498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 499 { 500 writel(val, tp->regs + off); 501 readl(tp->regs + off); 502 } 503 504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 505 { 506 unsigned long flags; 507 u32 val; 508 509 spin_lock_irqsave(&tp->indirect_lock, flags); 510 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 511 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 512 spin_unlock_irqrestore(&tp->indirect_lock, flags); 513 return val; 514 } 515 516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 517 { 518 unsigned long flags; 519 520 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 521 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 522 TG3_64BIT_REG_LOW, val); 523 return; 524 } 525 if (off == TG3_RX_STD_PROD_IDX_REG) { 526 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 527 TG3_64BIT_REG_LOW, val); 528 return; 529 } 530 531 spin_lock_irqsave(&tp->indirect_lock, flags); 532 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 533 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 534 spin_unlock_irqrestore(&tp->indirect_lock, flags); 535 536 /* In indirect mode when disabling interrupts, we also need 537 * to clear the interrupt bit in the GRC local ctrl register. 538 */ 539 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 540 (val == 0x1)) { 541 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 542 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 543 } 544 } 545 546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 547 { 548 unsigned long flags; 549 u32 val; 550 551 spin_lock_irqsave(&tp->indirect_lock, flags); 552 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 553 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 554 spin_unlock_irqrestore(&tp->indirect_lock, flags); 555 return val; 556 } 557 558 /* usec_wait specifies the wait time in usec when writing to certain registers 559 * where it is unsafe to read back the register without some delay. 560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 562 */ 563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 564 { 565 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 566 /* Non-posted methods */ 567 tp->write32(tp, off, val); 568 else { 569 /* Posted method */ 570 tg3_write32(tp, off, val); 571 if (usec_wait) 572 udelay(usec_wait); 573 tp->read32(tp, off); 574 } 575 /* Wait again after the read for the posted method to guarantee that 576 * the wait time is met. 577 */ 578 if (usec_wait) 579 udelay(usec_wait); 580 } 581 582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 583 { 584 tp->write32_mbox(tp, off, val); 585 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 586 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 587 !tg3_flag(tp, ICH_WORKAROUND))) 588 tp->read32_mbox(tp, off); 589 } 590 591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 592 { 593 void __iomem *mbox = tp->regs + off; 594 writel(val, mbox); 595 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 596 writel(val, mbox); 597 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 598 tg3_flag(tp, FLUSH_POSTED_WRITES)) 599 readl(mbox); 600 } 601 602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 603 { 604 return readl(tp->regs + off + GRCMBOX_BASE); 605 } 606 607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 608 { 609 writel(val, tp->regs + off + GRCMBOX_BASE); 610 } 611 612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 617 618 #define tw32(reg, val) tp->write32(tp, reg, val) 619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 621 #define tr32(reg) tp->read32(tp, reg) 622 623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 624 { 625 unsigned long flags; 626 627 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 628 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 629 return; 630 631 spin_lock_irqsave(&tp->indirect_lock, flags); 632 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 635 636 /* Always leave this as zero. */ 637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 638 } else { 639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 640 tw32_f(TG3PCI_MEM_WIN_DATA, val); 641 642 /* Always leave this as zero. */ 643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 644 } 645 spin_unlock_irqrestore(&tp->indirect_lock, flags); 646 } 647 648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 649 { 650 unsigned long flags; 651 652 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 653 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 654 *val = 0; 655 return; 656 } 657 658 spin_lock_irqsave(&tp->indirect_lock, flags); 659 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 660 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 661 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 662 663 /* Always leave this as zero. */ 664 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 665 } else { 666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 667 *val = tr32(TG3PCI_MEM_WIN_DATA); 668 669 /* Always leave this as zero. */ 670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 671 } 672 spin_unlock_irqrestore(&tp->indirect_lock, flags); 673 } 674 675 static void tg3_ape_lock_init(struct tg3 *tp) 676 { 677 int i; 678 u32 regbase, bit; 679 680 if (tg3_asic_rev(tp) == ASIC_REV_5761) 681 regbase = TG3_APE_LOCK_GRANT; 682 else 683 regbase = TG3_APE_PER_LOCK_GRANT; 684 685 /* Make sure the driver hasn't any stale locks. */ 686 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 687 switch (i) { 688 case TG3_APE_LOCK_PHY0: 689 case TG3_APE_LOCK_PHY1: 690 case TG3_APE_LOCK_PHY2: 691 case TG3_APE_LOCK_PHY3: 692 bit = APE_LOCK_GRANT_DRIVER; 693 break; 694 default: 695 if (!tp->pci_fn) 696 bit = APE_LOCK_GRANT_DRIVER; 697 else 698 bit = 1 << tp->pci_fn; 699 } 700 tg3_ape_write32(tp, regbase + 4 * i, bit); 701 } 702 703 } 704 705 static int tg3_ape_lock(struct tg3 *tp, int locknum) 706 { 707 int i, off; 708 int ret = 0; 709 u32 status, req, gnt, bit; 710 711 if (!tg3_flag(tp, ENABLE_APE)) 712 return 0; 713 714 switch (locknum) { 715 case TG3_APE_LOCK_GPIO: 716 if (tg3_asic_rev(tp) == ASIC_REV_5761) 717 return 0; 718 fallthrough; 719 case TG3_APE_LOCK_GRC: 720 case TG3_APE_LOCK_MEM: 721 if (!tp->pci_fn) 722 bit = APE_LOCK_REQ_DRIVER; 723 else 724 bit = 1 << tp->pci_fn; 725 break; 726 case TG3_APE_LOCK_PHY0: 727 case TG3_APE_LOCK_PHY1: 728 case TG3_APE_LOCK_PHY2: 729 case TG3_APE_LOCK_PHY3: 730 bit = APE_LOCK_REQ_DRIVER; 731 break; 732 default: 733 return -EINVAL; 734 } 735 736 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 737 req = TG3_APE_LOCK_REQ; 738 gnt = TG3_APE_LOCK_GRANT; 739 } else { 740 req = TG3_APE_PER_LOCK_REQ; 741 gnt = TG3_APE_PER_LOCK_GRANT; 742 } 743 744 off = 4 * locknum; 745 746 tg3_ape_write32(tp, req + off, bit); 747 748 /* Wait for up to 1 millisecond to acquire lock. */ 749 for (i = 0; i < 100; i++) { 750 status = tg3_ape_read32(tp, gnt + off); 751 if (status == bit) 752 break; 753 if (pci_channel_offline(tp->pdev)) 754 break; 755 756 udelay(10); 757 } 758 759 if (status != bit) { 760 /* Revoke the lock request. */ 761 tg3_ape_write32(tp, gnt + off, bit); 762 ret = -EBUSY; 763 } 764 765 return ret; 766 } 767 768 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 769 { 770 u32 gnt, bit; 771 772 if (!tg3_flag(tp, ENABLE_APE)) 773 return; 774 775 switch (locknum) { 776 case TG3_APE_LOCK_GPIO: 777 if (tg3_asic_rev(tp) == ASIC_REV_5761) 778 return; 779 fallthrough; 780 case TG3_APE_LOCK_GRC: 781 case TG3_APE_LOCK_MEM: 782 if (!tp->pci_fn) 783 bit = APE_LOCK_GRANT_DRIVER; 784 else 785 bit = 1 << tp->pci_fn; 786 break; 787 case TG3_APE_LOCK_PHY0: 788 case TG3_APE_LOCK_PHY1: 789 case TG3_APE_LOCK_PHY2: 790 case TG3_APE_LOCK_PHY3: 791 bit = APE_LOCK_GRANT_DRIVER; 792 break; 793 default: 794 return; 795 } 796 797 if (tg3_asic_rev(tp) == ASIC_REV_5761) 798 gnt = TG3_APE_LOCK_GRANT; 799 else 800 gnt = TG3_APE_PER_LOCK_GRANT; 801 802 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 803 } 804 805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 806 { 807 u32 apedata; 808 809 while (timeout_us) { 810 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 811 return -EBUSY; 812 813 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 814 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 815 break; 816 817 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 818 819 udelay(10); 820 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 821 } 822 823 return timeout_us ? 0 : -EBUSY; 824 } 825 826 #ifdef CONFIG_TIGON3_HWMON 827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 828 { 829 u32 i, apedata; 830 831 for (i = 0; i < timeout_us / 10; i++) { 832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 833 834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 835 break; 836 837 udelay(10); 838 } 839 840 return i == timeout_us / 10; 841 } 842 843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 844 u32 len) 845 { 846 int err; 847 u32 i, bufoff, msgoff, maxlen, apedata; 848 849 if (!tg3_flag(tp, APE_HAS_NCSI)) 850 return 0; 851 852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 853 if (apedata != APE_SEG_SIG_MAGIC) 854 return -ENODEV; 855 856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 857 if (!(apedata & APE_FW_STATUS_READY)) 858 return -EAGAIN; 859 860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 861 TG3_APE_SHMEM_BASE; 862 msgoff = bufoff + 2 * sizeof(u32); 863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 864 865 while (len) { 866 u32 length; 867 868 /* Cap xfer sizes to scratchpad limits. */ 869 length = (len > maxlen) ? maxlen : len; 870 len -= length; 871 872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 873 if (!(apedata & APE_FW_STATUS_READY)) 874 return -EAGAIN; 875 876 /* Wait for up to 1 msec for APE to service previous event. */ 877 err = tg3_ape_event_lock(tp, 1000); 878 if (err) 879 return err; 880 881 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 882 APE_EVENT_STATUS_SCRTCHPD_READ | 883 APE_EVENT_STATUS_EVENT_PENDING; 884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 885 886 tg3_ape_write32(tp, bufoff, base_off); 887 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 888 889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 891 892 base_off += length; 893 894 if (tg3_ape_wait_for_event(tp, 30000)) 895 return -EAGAIN; 896 897 for (i = 0; length; i += 4, length -= 4) { 898 u32 val = tg3_ape_read32(tp, msgoff + i); 899 memcpy(data, &val, sizeof(u32)); 900 data++; 901 } 902 } 903 904 return 0; 905 } 906 #endif 907 908 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 909 { 910 int err; 911 u32 apedata; 912 913 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 914 if (apedata != APE_SEG_SIG_MAGIC) 915 return -EAGAIN; 916 917 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 918 if (!(apedata & APE_FW_STATUS_READY)) 919 return -EAGAIN; 920 921 /* Wait for up to 20 millisecond for APE to service previous event. */ 922 err = tg3_ape_event_lock(tp, 20000); 923 if (err) 924 return err; 925 926 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 927 event | APE_EVENT_STATUS_EVENT_PENDING); 928 929 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 930 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 931 932 return 0; 933 } 934 935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 936 { 937 u32 event; 938 u32 apedata; 939 940 if (!tg3_flag(tp, ENABLE_APE)) 941 return; 942 943 switch (kind) { 944 case RESET_KIND_INIT: 945 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 947 APE_HOST_SEG_SIG_MAGIC); 948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 949 APE_HOST_SEG_LEN_MAGIC); 950 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 951 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 952 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 953 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 954 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 955 APE_HOST_BEHAV_NO_PHYLOCK); 956 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 957 TG3_APE_HOST_DRVR_STATE_START); 958 959 event = APE_EVENT_STATUS_STATE_START; 960 break; 961 case RESET_KIND_SHUTDOWN: 962 if (device_may_wakeup(&tp->pdev->dev) && 963 tg3_flag(tp, WOL_ENABLE)) { 964 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 965 TG3_APE_HOST_WOL_SPEED_AUTO); 966 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 967 } else 968 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 969 970 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 971 972 event = APE_EVENT_STATUS_STATE_UNLOAD; 973 break; 974 default: 975 return; 976 } 977 978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 979 980 tg3_ape_send_event(tp, event); 981 } 982 983 static void tg3_send_ape_heartbeat(struct tg3 *tp, 984 unsigned long interval) 985 { 986 /* Check if hb interval has exceeded */ 987 if (!tg3_flag(tp, ENABLE_APE) || 988 time_before(jiffies, tp->ape_hb_jiffies + interval)) 989 return; 990 991 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 992 tp->ape_hb_jiffies = jiffies; 993 } 994 995 static void tg3_disable_ints(struct tg3 *tp) 996 { 997 int i; 998 999 tw32(TG3PCI_MISC_HOST_CTRL, 1000 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 1001 for (i = 0; i < tp->irq_max; i++) 1002 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 1003 } 1004 1005 static void tg3_enable_ints(struct tg3 *tp) 1006 { 1007 int i; 1008 1009 tp->irq_sync = 0; 1010 wmb(); 1011 1012 tw32(TG3PCI_MISC_HOST_CTRL, 1013 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1014 1015 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1016 for (i = 0; i < tp->irq_cnt; i++) { 1017 struct tg3_napi *tnapi = &tp->napi[i]; 1018 1019 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1020 if (tg3_flag(tp, 1SHOT_MSI)) 1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1022 1023 tp->coal_now |= tnapi->coal_now; 1024 } 1025 1026 /* Force an initial interrupt */ 1027 if (!tg3_flag(tp, TAGGED_STATUS) && 1028 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1029 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1030 else 1031 tw32(HOSTCC_MODE, tp->coal_now); 1032 1033 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1034 } 1035 1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1037 { 1038 struct tg3 *tp = tnapi->tp; 1039 struct tg3_hw_status *sblk = tnapi->hw_status; 1040 unsigned int work_exists = 0; 1041 1042 /* check for phy events */ 1043 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1044 if (sblk->status & SD_STATUS_LINK_CHG) 1045 work_exists = 1; 1046 } 1047 1048 /* check for TX work to do */ 1049 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1050 work_exists = 1; 1051 1052 /* check for RX work to do */ 1053 if (tnapi->rx_rcb_prod_idx && 1054 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1055 work_exists = 1; 1056 1057 return work_exists; 1058 } 1059 1060 /* tg3_int_reenable 1061 * similar to tg3_enable_ints, but it accurately determines whether there 1062 * is new work pending and can return without flushing the PIO write 1063 * which reenables interrupts 1064 */ 1065 static void tg3_int_reenable(struct tg3_napi *tnapi) 1066 { 1067 struct tg3 *tp = tnapi->tp; 1068 1069 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1070 1071 /* When doing tagged status, this work check is unnecessary. 1072 * The last_tag we write above tells the chip which piece of 1073 * work we've completed. 1074 */ 1075 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1076 tw32(HOSTCC_MODE, tp->coalesce_mode | 1077 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1078 } 1079 1080 static void tg3_switch_clocks(struct tg3 *tp) 1081 { 1082 u32 clock_ctrl; 1083 u32 orig_clock_ctrl; 1084 1085 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1086 return; 1087 1088 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1089 1090 orig_clock_ctrl = clock_ctrl; 1091 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1092 CLOCK_CTRL_CLKRUN_OENABLE | 1093 0x1f); 1094 tp->pci_clock_ctrl = clock_ctrl; 1095 1096 if (tg3_flag(tp, 5705_PLUS)) { 1097 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1098 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1099 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1100 } 1101 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1102 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1103 clock_ctrl | 1104 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1105 40); 1106 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1107 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1108 40); 1109 } 1110 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1111 } 1112 1113 #define PHY_BUSY_LOOPS 5000 1114 1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1116 u32 *val) 1117 { 1118 u32 frame_val; 1119 unsigned int loops; 1120 int ret; 1121 1122 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1123 tw32_f(MAC_MI_MODE, 1124 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1125 udelay(80); 1126 } 1127 1128 tg3_ape_lock(tp, tp->phy_ape_lock); 1129 1130 *val = 0x0; 1131 1132 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1133 MI_COM_PHY_ADDR_MASK); 1134 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1135 MI_COM_REG_ADDR_MASK); 1136 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1137 1138 tw32_f(MAC_MI_COM, frame_val); 1139 1140 loops = PHY_BUSY_LOOPS; 1141 while (loops != 0) { 1142 udelay(10); 1143 frame_val = tr32(MAC_MI_COM); 1144 1145 if ((frame_val & MI_COM_BUSY) == 0) { 1146 udelay(5); 1147 frame_val = tr32(MAC_MI_COM); 1148 break; 1149 } 1150 loops -= 1; 1151 } 1152 1153 ret = -EBUSY; 1154 if (loops != 0) { 1155 *val = frame_val & MI_COM_DATA_MASK; 1156 ret = 0; 1157 } 1158 1159 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1160 tw32_f(MAC_MI_MODE, tp->mi_mode); 1161 udelay(80); 1162 } 1163 1164 tg3_ape_unlock(tp, tp->phy_ape_lock); 1165 1166 return ret; 1167 } 1168 1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1170 { 1171 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1172 } 1173 1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1175 u32 val) 1176 { 1177 u32 frame_val; 1178 unsigned int loops; 1179 int ret; 1180 1181 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1182 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1183 return 0; 1184 1185 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1186 tw32_f(MAC_MI_MODE, 1187 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1188 udelay(80); 1189 } 1190 1191 tg3_ape_lock(tp, tp->phy_ape_lock); 1192 1193 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1194 MI_COM_PHY_ADDR_MASK); 1195 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1196 MI_COM_REG_ADDR_MASK); 1197 frame_val |= (val & MI_COM_DATA_MASK); 1198 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1199 1200 tw32_f(MAC_MI_COM, frame_val); 1201 1202 loops = PHY_BUSY_LOOPS; 1203 while (loops != 0) { 1204 udelay(10); 1205 frame_val = tr32(MAC_MI_COM); 1206 if ((frame_val & MI_COM_BUSY) == 0) { 1207 udelay(5); 1208 frame_val = tr32(MAC_MI_COM); 1209 break; 1210 } 1211 loops -= 1; 1212 } 1213 1214 ret = -EBUSY; 1215 if (loops != 0) 1216 ret = 0; 1217 1218 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1219 tw32_f(MAC_MI_MODE, tp->mi_mode); 1220 udelay(80); 1221 } 1222 1223 tg3_ape_unlock(tp, tp->phy_ape_lock); 1224 1225 return ret; 1226 } 1227 1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1229 { 1230 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1231 } 1232 1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1234 { 1235 int err; 1236 1237 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1238 if (err) 1239 goto done; 1240 1241 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1242 if (err) 1243 goto done; 1244 1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1246 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1247 if (err) 1248 goto done; 1249 1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1251 1252 done: 1253 return err; 1254 } 1255 1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1257 { 1258 int err; 1259 1260 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1261 if (err) 1262 goto done; 1263 1264 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1265 if (err) 1266 goto done; 1267 1268 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1269 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1270 if (err) 1271 goto done; 1272 1273 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1274 1275 done: 1276 return err; 1277 } 1278 1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1280 { 1281 int err; 1282 1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1284 if (!err) 1285 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1286 1287 return err; 1288 } 1289 1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1291 { 1292 int err; 1293 1294 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1295 if (!err) 1296 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1297 1298 return err; 1299 } 1300 1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1302 { 1303 int err; 1304 1305 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1306 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1307 MII_TG3_AUXCTL_SHDWSEL_MISC); 1308 if (!err) 1309 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1310 1311 return err; 1312 } 1313 1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1315 { 1316 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1317 set |= MII_TG3_AUXCTL_MISC_WREN; 1318 1319 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1320 } 1321 1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1323 { 1324 u32 val; 1325 int err; 1326 1327 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1328 1329 if (err) 1330 return err; 1331 1332 if (enable) 1333 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1334 else 1335 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1336 1337 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1338 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1339 1340 return err; 1341 } 1342 1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1344 { 1345 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1346 reg | val | MII_TG3_MISC_SHDW_WREN); 1347 } 1348 1349 static int tg3_bmcr_reset(struct tg3 *tp) 1350 { 1351 u32 phy_control; 1352 int limit, err; 1353 1354 /* OK, reset it, and poll the BMCR_RESET bit until it 1355 * clears or we time out. 1356 */ 1357 phy_control = BMCR_RESET; 1358 err = tg3_writephy(tp, MII_BMCR, phy_control); 1359 if (err != 0) 1360 return -EBUSY; 1361 1362 limit = 5000; 1363 while (limit--) { 1364 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1365 if (err != 0) 1366 return -EBUSY; 1367 1368 if ((phy_control & BMCR_RESET) == 0) { 1369 udelay(40); 1370 break; 1371 } 1372 udelay(10); 1373 } 1374 if (limit < 0) 1375 return -EBUSY; 1376 1377 return 0; 1378 } 1379 1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1381 { 1382 struct tg3 *tp = bp->priv; 1383 u32 val; 1384 1385 spin_lock_bh(&tp->lock); 1386 1387 if (__tg3_readphy(tp, mii_id, reg, &val)) 1388 val = -EIO; 1389 1390 spin_unlock_bh(&tp->lock); 1391 1392 return val; 1393 } 1394 1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1396 { 1397 struct tg3 *tp = bp->priv; 1398 u32 ret = 0; 1399 1400 spin_lock_bh(&tp->lock); 1401 1402 if (__tg3_writephy(tp, mii_id, reg, val)) 1403 ret = -EIO; 1404 1405 spin_unlock_bh(&tp->lock); 1406 1407 return ret; 1408 } 1409 1410 static void tg3_mdio_config_5785(struct tg3 *tp) 1411 { 1412 u32 val; 1413 struct phy_device *phydev; 1414 1415 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1416 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1417 case PHY_ID_BCM50610: 1418 case PHY_ID_BCM50610M: 1419 val = MAC_PHYCFG2_50610_LED_MODES; 1420 break; 1421 case PHY_ID_BCMAC131: 1422 val = MAC_PHYCFG2_AC131_LED_MODES; 1423 break; 1424 case PHY_ID_RTL8211C: 1425 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1426 break; 1427 case PHY_ID_RTL8201E: 1428 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1429 break; 1430 default: 1431 return; 1432 } 1433 1434 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1435 tw32(MAC_PHYCFG2, val); 1436 1437 val = tr32(MAC_PHYCFG1); 1438 val &= ~(MAC_PHYCFG1_RGMII_INT | 1439 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1440 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1441 tw32(MAC_PHYCFG1, val); 1442 1443 return; 1444 } 1445 1446 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1447 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1448 MAC_PHYCFG2_FMODE_MASK_MASK | 1449 MAC_PHYCFG2_GMODE_MASK_MASK | 1450 MAC_PHYCFG2_ACT_MASK_MASK | 1451 MAC_PHYCFG2_QUAL_MASK_MASK | 1452 MAC_PHYCFG2_INBAND_ENABLE; 1453 1454 tw32(MAC_PHYCFG2, val); 1455 1456 val = tr32(MAC_PHYCFG1); 1457 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1458 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1459 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1460 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1461 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1462 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1463 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1464 } 1465 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1466 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1467 tw32(MAC_PHYCFG1, val); 1468 1469 val = tr32(MAC_EXT_RGMII_MODE); 1470 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1471 MAC_RGMII_MODE_RX_QUALITY | 1472 MAC_RGMII_MODE_RX_ACTIVITY | 1473 MAC_RGMII_MODE_RX_ENG_DET | 1474 MAC_RGMII_MODE_TX_ENABLE | 1475 MAC_RGMII_MODE_TX_LOWPWR | 1476 MAC_RGMII_MODE_TX_RESET); 1477 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1478 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1479 val |= MAC_RGMII_MODE_RX_INT_B | 1480 MAC_RGMII_MODE_RX_QUALITY | 1481 MAC_RGMII_MODE_RX_ACTIVITY | 1482 MAC_RGMII_MODE_RX_ENG_DET; 1483 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1484 val |= MAC_RGMII_MODE_TX_ENABLE | 1485 MAC_RGMII_MODE_TX_LOWPWR | 1486 MAC_RGMII_MODE_TX_RESET; 1487 } 1488 tw32(MAC_EXT_RGMII_MODE, val); 1489 } 1490 1491 static void tg3_mdio_start(struct tg3 *tp) 1492 { 1493 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1494 tw32_f(MAC_MI_MODE, tp->mi_mode); 1495 udelay(80); 1496 1497 if (tg3_flag(tp, MDIOBUS_INITED) && 1498 tg3_asic_rev(tp) == ASIC_REV_5785) 1499 tg3_mdio_config_5785(tp); 1500 } 1501 1502 static int tg3_mdio_init(struct tg3 *tp) 1503 { 1504 int i; 1505 u32 reg; 1506 struct phy_device *phydev; 1507 1508 if (tg3_flag(tp, 5717_PLUS)) { 1509 u32 is_serdes; 1510 1511 tp->phy_addr = tp->pci_fn + 1; 1512 1513 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1514 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1515 else 1516 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1517 TG3_CPMU_PHY_STRAP_IS_SERDES; 1518 if (is_serdes) 1519 tp->phy_addr += 7; 1520 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1521 int addr; 1522 1523 addr = ssb_gige_get_phyaddr(tp->pdev); 1524 if (addr < 0) 1525 return addr; 1526 tp->phy_addr = addr; 1527 } else 1528 tp->phy_addr = TG3_PHY_MII_ADDR; 1529 1530 tg3_mdio_start(tp); 1531 1532 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1533 return 0; 1534 1535 tp->mdio_bus = mdiobus_alloc(); 1536 if (tp->mdio_bus == NULL) 1537 return -ENOMEM; 1538 1539 tp->mdio_bus->name = "tg3 mdio bus"; 1540 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", 1541 (tp->pdev->bus->number << 8) | tp->pdev->devfn); 1542 tp->mdio_bus->priv = tp; 1543 tp->mdio_bus->parent = &tp->pdev->dev; 1544 tp->mdio_bus->read = &tg3_mdio_read; 1545 tp->mdio_bus->write = &tg3_mdio_write; 1546 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1547 1548 /* The bus registration will look for all the PHYs on the mdio bus. 1549 * Unfortunately, it does not ensure the PHY is powered up before 1550 * accessing the PHY ID registers. A chip reset is the 1551 * quickest way to bring the device back to an operational state.. 1552 */ 1553 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1554 tg3_bmcr_reset(tp); 1555 1556 i = mdiobus_register(tp->mdio_bus); 1557 if (i) { 1558 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1559 mdiobus_free(tp->mdio_bus); 1560 return i; 1561 } 1562 1563 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1564 1565 if (!phydev || !phydev->drv) { 1566 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1567 mdiobus_unregister(tp->mdio_bus); 1568 mdiobus_free(tp->mdio_bus); 1569 return -ENODEV; 1570 } 1571 1572 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1573 case PHY_ID_BCM57780: 1574 phydev->interface = PHY_INTERFACE_MODE_GMII; 1575 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1576 break; 1577 case PHY_ID_BCM50610: 1578 case PHY_ID_BCM50610M: 1579 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1580 PHY_BRCM_RX_REFCLK_UNUSED | 1581 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1582 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1583 fallthrough; 1584 case PHY_ID_RTL8211C: 1585 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1586 break; 1587 case PHY_ID_RTL8201E: 1588 case PHY_ID_BCMAC131: 1589 phydev->interface = PHY_INTERFACE_MODE_MII; 1590 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1591 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1592 break; 1593 } 1594 1595 tg3_flag_set(tp, MDIOBUS_INITED); 1596 1597 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1598 tg3_mdio_config_5785(tp); 1599 1600 return 0; 1601 } 1602 1603 static void tg3_mdio_fini(struct tg3 *tp) 1604 { 1605 if (tg3_flag(tp, MDIOBUS_INITED)) { 1606 tg3_flag_clear(tp, MDIOBUS_INITED); 1607 mdiobus_unregister(tp->mdio_bus); 1608 mdiobus_free(tp->mdio_bus); 1609 } 1610 } 1611 1612 /* tp->lock is held. */ 1613 static inline void tg3_generate_fw_event(struct tg3 *tp) 1614 { 1615 u32 val; 1616 1617 val = tr32(GRC_RX_CPU_EVENT); 1618 val |= GRC_RX_CPU_DRIVER_EVENT; 1619 tw32_f(GRC_RX_CPU_EVENT, val); 1620 1621 tp->last_event_jiffies = jiffies; 1622 } 1623 1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1625 1626 /* tp->lock is held. */ 1627 static void tg3_wait_for_event_ack(struct tg3 *tp) 1628 { 1629 int i; 1630 unsigned int delay_cnt; 1631 long time_remain; 1632 1633 /* If enough time has passed, no wait is necessary. */ 1634 time_remain = (long)(tp->last_event_jiffies + 1 + 1635 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1636 (long)jiffies; 1637 if (time_remain < 0) 1638 return; 1639 1640 /* Check if we can shorten the wait time. */ 1641 delay_cnt = jiffies_to_usecs(time_remain); 1642 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1643 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1644 delay_cnt = (delay_cnt >> 3) + 1; 1645 1646 for (i = 0; i < delay_cnt; i++) { 1647 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1648 break; 1649 if (pci_channel_offline(tp->pdev)) 1650 break; 1651 1652 udelay(8); 1653 } 1654 } 1655 1656 /* tp->lock is held. */ 1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1658 { 1659 u32 reg, val; 1660 1661 val = 0; 1662 if (!tg3_readphy(tp, MII_BMCR, ®)) 1663 val = reg << 16; 1664 if (!tg3_readphy(tp, MII_BMSR, ®)) 1665 val |= (reg & 0xffff); 1666 *data++ = val; 1667 1668 val = 0; 1669 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1670 val = reg << 16; 1671 if (!tg3_readphy(tp, MII_LPA, ®)) 1672 val |= (reg & 0xffff); 1673 *data++ = val; 1674 1675 val = 0; 1676 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1677 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1678 val = reg << 16; 1679 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1680 val |= (reg & 0xffff); 1681 } 1682 *data++ = val; 1683 1684 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1685 val = reg << 16; 1686 else 1687 val = 0; 1688 *data++ = val; 1689 } 1690 1691 /* tp->lock is held. */ 1692 static void tg3_ump_link_report(struct tg3 *tp) 1693 { 1694 u32 data[4]; 1695 1696 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1697 return; 1698 1699 tg3_phy_gather_ump_data(tp, data); 1700 1701 tg3_wait_for_event_ack(tp); 1702 1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1709 1710 tg3_generate_fw_event(tp); 1711 } 1712 1713 /* tp->lock is held. */ 1714 static void tg3_stop_fw(struct tg3 *tp) 1715 { 1716 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1717 /* Wait for RX cpu to ACK the previous event. */ 1718 tg3_wait_for_event_ack(tp); 1719 1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1721 1722 tg3_generate_fw_event(tp); 1723 1724 /* Wait for RX cpu to ACK this event. */ 1725 tg3_wait_for_event_ack(tp); 1726 } 1727 } 1728 1729 /* tp->lock is held. */ 1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1731 { 1732 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1733 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1734 1735 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1736 switch (kind) { 1737 case RESET_KIND_INIT: 1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1739 DRV_STATE_START); 1740 break; 1741 1742 case RESET_KIND_SHUTDOWN: 1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1744 DRV_STATE_UNLOAD); 1745 break; 1746 1747 case RESET_KIND_SUSPEND: 1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1749 DRV_STATE_SUSPEND); 1750 break; 1751 1752 default: 1753 break; 1754 } 1755 } 1756 } 1757 1758 /* tp->lock is held. */ 1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1760 { 1761 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1762 switch (kind) { 1763 case RESET_KIND_INIT: 1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1765 DRV_STATE_START_DONE); 1766 break; 1767 1768 case RESET_KIND_SHUTDOWN: 1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1770 DRV_STATE_UNLOAD_DONE); 1771 break; 1772 1773 default: 1774 break; 1775 } 1776 } 1777 } 1778 1779 /* tp->lock is held. */ 1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1781 { 1782 if (tg3_flag(tp, ENABLE_ASF)) { 1783 switch (kind) { 1784 case RESET_KIND_INIT: 1785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1786 DRV_STATE_START); 1787 break; 1788 1789 case RESET_KIND_SHUTDOWN: 1790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1791 DRV_STATE_UNLOAD); 1792 break; 1793 1794 case RESET_KIND_SUSPEND: 1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1796 DRV_STATE_SUSPEND); 1797 break; 1798 1799 default: 1800 break; 1801 } 1802 } 1803 } 1804 1805 static int tg3_poll_fw(struct tg3 *tp) 1806 { 1807 int i; 1808 u32 val; 1809 1810 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1811 return 0; 1812 1813 if (tg3_flag(tp, IS_SSB_CORE)) { 1814 /* We don't use firmware. */ 1815 return 0; 1816 } 1817 1818 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1819 /* Wait up to 20ms for init done. */ 1820 for (i = 0; i < 200; i++) { 1821 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1822 return 0; 1823 if (pci_channel_offline(tp->pdev)) 1824 return -ENODEV; 1825 1826 udelay(100); 1827 } 1828 return -ENODEV; 1829 } 1830 1831 /* Wait for firmware initialization to complete. */ 1832 for (i = 0; i < 100000; i++) { 1833 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1834 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1835 break; 1836 if (pci_channel_offline(tp->pdev)) { 1837 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1838 tg3_flag_set(tp, NO_FWARE_REPORTED); 1839 netdev_info(tp->dev, "No firmware running\n"); 1840 } 1841 1842 break; 1843 } 1844 1845 udelay(10); 1846 } 1847 1848 /* Chip might not be fitted with firmware. Some Sun onboard 1849 * parts are configured like that. So don't signal the timeout 1850 * of the above loop as an error, but do report the lack of 1851 * running firmware once. 1852 */ 1853 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1854 tg3_flag_set(tp, NO_FWARE_REPORTED); 1855 1856 netdev_info(tp->dev, "No firmware running\n"); 1857 } 1858 1859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1860 /* The 57765 A0 needs a little more 1861 * time to do some important work. 1862 */ 1863 mdelay(10); 1864 } 1865 1866 return 0; 1867 } 1868 1869 static void tg3_link_report(struct tg3 *tp) 1870 { 1871 if (!netif_carrier_ok(tp->dev)) { 1872 netif_info(tp, link, tp->dev, "Link is down\n"); 1873 tg3_ump_link_report(tp); 1874 } else if (netif_msg_link(tp)) { 1875 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1876 (tp->link_config.active_speed == SPEED_1000 ? 1877 1000 : 1878 (tp->link_config.active_speed == SPEED_100 ? 1879 100 : 10)), 1880 (tp->link_config.active_duplex == DUPLEX_FULL ? 1881 "full" : "half")); 1882 1883 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1884 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1885 "on" : "off", 1886 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1887 "on" : "off"); 1888 1889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1890 netdev_info(tp->dev, "EEE is %s\n", 1891 tp->setlpicnt ? "enabled" : "disabled"); 1892 1893 tg3_ump_link_report(tp); 1894 } 1895 1896 tp->link_up = netif_carrier_ok(tp->dev); 1897 } 1898 1899 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1900 { 1901 u32 flowctrl = 0; 1902 1903 if (adv & ADVERTISE_PAUSE_CAP) { 1904 flowctrl |= FLOW_CTRL_RX; 1905 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1906 flowctrl |= FLOW_CTRL_TX; 1907 } else if (adv & ADVERTISE_PAUSE_ASYM) 1908 flowctrl |= FLOW_CTRL_TX; 1909 1910 return flowctrl; 1911 } 1912 1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1914 { 1915 u16 miireg; 1916 1917 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1918 miireg = ADVERTISE_1000XPAUSE; 1919 else if (flow_ctrl & FLOW_CTRL_TX) 1920 miireg = ADVERTISE_1000XPSE_ASYM; 1921 else if (flow_ctrl & FLOW_CTRL_RX) 1922 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1923 else 1924 miireg = 0; 1925 1926 return miireg; 1927 } 1928 1929 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1930 { 1931 u32 flowctrl = 0; 1932 1933 if (adv & ADVERTISE_1000XPAUSE) { 1934 flowctrl |= FLOW_CTRL_RX; 1935 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1936 flowctrl |= FLOW_CTRL_TX; 1937 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1938 flowctrl |= FLOW_CTRL_TX; 1939 1940 return flowctrl; 1941 } 1942 1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1944 { 1945 u8 cap = 0; 1946 1947 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1948 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1949 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1950 if (lcladv & ADVERTISE_1000XPAUSE) 1951 cap = FLOW_CTRL_RX; 1952 if (rmtadv & ADVERTISE_1000XPAUSE) 1953 cap = FLOW_CTRL_TX; 1954 } 1955 1956 return cap; 1957 } 1958 1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1960 { 1961 u8 autoneg; 1962 u8 flowctrl = 0; 1963 u32 old_rx_mode = tp->rx_mode; 1964 u32 old_tx_mode = tp->tx_mode; 1965 1966 if (tg3_flag(tp, USE_PHYLIB)) 1967 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1968 else 1969 autoneg = tp->link_config.autoneg; 1970 1971 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1972 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1973 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1974 else 1975 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1976 } else 1977 flowctrl = tp->link_config.flowctrl; 1978 1979 tp->link_config.active_flowctrl = flowctrl; 1980 1981 if (flowctrl & FLOW_CTRL_RX) 1982 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1983 else 1984 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1985 1986 if (old_rx_mode != tp->rx_mode) 1987 tw32_f(MAC_RX_MODE, tp->rx_mode); 1988 1989 if (flowctrl & FLOW_CTRL_TX) 1990 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1991 else 1992 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1993 1994 if (old_tx_mode != tp->tx_mode) 1995 tw32_f(MAC_TX_MODE, tp->tx_mode); 1996 } 1997 1998 static void tg3_adjust_link(struct net_device *dev) 1999 { 2000 u8 oldflowctrl, linkmesg = 0; 2001 u32 mac_mode, lcl_adv, rmt_adv; 2002 struct tg3 *tp = netdev_priv(dev); 2003 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2004 2005 spin_lock_bh(&tp->lock); 2006 2007 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2008 MAC_MODE_HALF_DUPLEX); 2009 2010 oldflowctrl = tp->link_config.active_flowctrl; 2011 2012 if (phydev->link) { 2013 lcl_adv = 0; 2014 rmt_adv = 0; 2015 2016 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2017 mac_mode |= MAC_MODE_PORT_MODE_MII; 2018 else if (phydev->speed == SPEED_1000 || 2019 tg3_asic_rev(tp) != ASIC_REV_5785) 2020 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2021 else 2022 mac_mode |= MAC_MODE_PORT_MODE_MII; 2023 2024 if (phydev->duplex == DUPLEX_HALF) 2025 mac_mode |= MAC_MODE_HALF_DUPLEX; 2026 else { 2027 lcl_adv = mii_advertise_flowctrl( 2028 tp->link_config.flowctrl); 2029 2030 if (phydev->pause) 2031 rmt_adv = LPA_PAUSE_CAP; 2032 if (phydev->asym_pause) 2033 rmt_adv |= LPA_PAUSE_ASYM; 2034 } 2035 2036 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2037 } else 2038 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2039 2040 if (mac_mode != tp->mac_mode) { 2041 tp->mac_mode = mac_mode; 2042 tw32_f(MAC_MODE, tp->mac_mode); 2043 udelay(40); 2044 } 2045 2046 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2047 if (phydev->speed == SPEED_10) 2048 tw32(MAC_MI_STAT, 2049 MAC_MI_STAT_10MBPS_MODE | 2050 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2051 else 2052 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2053 } 2054 2055 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2056 tw32(MAC_TX_LENGTHS, 2057 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2058 (6 << TX_LENGTHS_IPG_SHIFT) | 2059 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2060 else 2061 tw32(MAC_TX_LENGTHS, 2062 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2063 (6 << TX_LENGTHS_IPG_SHIFT) | 2064 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2065 2066 if (phydev->link != tp->old_link || 2067 phydev->speed != tp->link_config.active_speed || 2068 phydev->duplex != tp->link_config.active_duplex || 2069 oldflowctrl != tp->link_config.active_flowctrl) 2070 linkmesg = 1; 2071 2072 tp->old_link = phydev->link; 2073 tp->link_config.active_speed = phydev->speed; 2074 tp->link_config.active_duplex = phydev->duplex; 2075 2076 spin_unlock_bh(&tp->lock); 2077 2078 if (linkmesg) 2079 tg3_link_report(tp); 2080 } 2081 2082 static int tg3_phy_init(struct tg3 *tp) 2083 { 2084 struct phy_device *phydev; 2085 2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2087 return 0; 2088 2089 /* Bring the PHY back to a known state. */ 2090 tg3_bmcr_reset(tp); 2091 2092 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2093 2094 /* Attach the MAC to the PHY. */ 2095 phydev = phy_connect(tp->dev, phydev_name(phydev), 2096 tg3_adjust_link, phydev->interface); 2097 if (IS_ERR(phydev)) { 2098 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2099 return PTR_ERR(phydev); 2100 } 2101 2102 /* Mask with MAC supported features. */ 2103 switch (phydev->interface) { 2104 case PHY_INTERFACE_MODE_GMII: 2105 case PHY_INTERFACE_MODE_RGMII: 2106 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2107 phy_set_max_speed(phydev, SPEED_1000); 2108 phy_support_asym_pause(phydev); 2109 break; 2110 } 2111 fallthrough; 2112 case PHY_INTERFACE_MODE_MII: 2113 phy_set_max_speed(phydev, SPEED_100); 2114 phy_support_asym_pause(phydev); 2115 break; 2116 default: 2117 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2118 return -EINVAL; 2119 } 2120 2121 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2122 2123 phy_attached_info(phydev); 2124 2125 return 0; 2126 } 2127 2128 static void tg3_phy_start(struct tg3 *tp) 2129 { 2130 struct phy_device *phydev; 2131 2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2133 return; 2134 2135 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2136 2137 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2138 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2139 phydev->speed = tp->link_config.speed; 2140 phydev->duplex = tp->link_config.duplex; 2141 phydev->autoneg = tp->link_config.autoneg; 2142 ethtool_convert_legacy_u32_to_link_mode( 2143 phydev->advertising, tp->link_config.advertising); 2144 } 2145 2146 phy_start(phydev); 2147 2148 phy_start_aneg(phydev); 2149 } 2150 2151 static void tg3_phy_stop(struct tg3 *tp) 2152 { 2153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2154 return; 2155 2156 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2157 } 2158 2159 static void tg3_phy_fini(struct tg3 *tp) 2160 { 2161 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2162 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2163 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2164 } 2165 } 2166 2167 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2168 { 2169 int err; 2170 u32 val; 2171 2172 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2173 return 0; 2174 2175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2176 /* Cannot do read-modify-write on 5401 */ 2177 err = tg3_phy_auxctl_write(tp, 2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2180 0x4c20); 2181 goto done; 2182 } 2183 2184 err = tg3_phy_auxctl_read(tp, 2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2186 if (err) 2187 return err; 2188 2189 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2190 err = tg3_phy_auxctl_write(tp, 2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2192 2193 done: 2194 return err; 2195 } 2196 2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2198 { 2199 u32 phytest; 2200 2201 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2202 u32 phy; 2203 2204 tg3_writephy(tp, MII_TG3_FET_TEST, 2205 phytest | MII_TG3_FET_SHADOW_EN); 2206 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2207 if (enable) 2208 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2209 else 2210 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2211 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2212 } 2213 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2214 } 2215 } 2216 2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2218 { 2219 u32 reg; 2220 2221 if (!tg3_flag(tp, 5705_PLUS) || 2222 (tg3_flag(tp, 5717_PLUS) && 2223 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2224 return; 2225 2226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2227 tg3_phy_fet_toggle_apd(tp, enable); 2228 return; 2229 } 2230 2231 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2232 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2233 MII_TG3_MISC_SHDW_SCR5_SDTL | 2234 MII_TG3_MISC_SHDW_SCR5_C125OE; 2235 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2236 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2237 2238 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2239 2240 2241 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2242 if (enable) 2243 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2244 2245 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2246 } 2247 2248 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2249 { 2250 u32 phy; 2251 2252 if (!tg3_flag(tp, 5705_PLUS) || 2253 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2254 return; 2255 2256 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2257 u32 ephy; 2258 2259 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2260 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2261 2262 tg3_writephy(tp, MII_TG3_FET_TEST, 2263 ephy | MII_TG3_FET_SHADOW_EN); 2264 if (!tg3_readphy(tp, reg, &phy)) { 2265 if (enable) 2266 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2267 else 2268 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2269 tg3_writephy(tp, reg, phy); 2270 } 2271 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2272 } 2273 } else { 2274 int ret; 2275 2276 ret = tg3_phy_auxctl_read(tp, 2277 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2278 if (!ret) { 2279 if (enable) 2280 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2281 else 2282 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2283 tg3_phy_auxctl_write(tp, 2284 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2285 } 2286 } 2287 } 2288 2289 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2290 { 2291 int ret; 2292 u32 val; 2293 2294 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2295 return; 2296 2297 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2298 if (!ret) 2299 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2300 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2301 } 2302 2303 static void tg3_phy_apply_otp(struct tg3 *tp) 2304 { 2305 u32 otp, phy; 2306 2307 if (!tp->phy_otp) 2308 return; 2309 2310 otp = tp->phy_otp; 2311 2312 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2313 return; 2314 2315 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2316 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2317 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2318 2319 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2320 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2321 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2322 2323 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2324 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2325 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2326 2327 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2328 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2329 2330 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2331 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2332 2333 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2334 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2335 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2336 2337 tg3_phy_toggle_auxctl_smdsp(tp, false); 2338 } 2339 2340 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2341 { 2342 u32 val; 2343 struct ethtool_eee *dest = &tp->eee; 2344 2345 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2346 return; 2347 2348 if (eee) 2349 dest = eee; 2350 2351 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2352 return; 2353 2354 /* Pull eee_active */ 2355 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2356 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2357 dest->eee_active = 1; 2358 } else 2359 dest->eee_active = 0; 2360 2361 /* Pull lp advertised settings */ 2362 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2363 return; 2364 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2365 2366 /* Pull advertised and eee_enabled settings */ 2367 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2368 return; 2369 dest->eee_enabled = !!val; 2370 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2371 2372 /* Pull tx_lpi_enabled */ 2373 val = tr32(TG3_CPMU_EEE_MODE); 2374 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2375 2376 /* Pull lpi timer value */ 2377 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2378 } 2379 2380 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2381 { 2382 u32 val; 2383 2384 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2385 return; 2386 2387 tp->setlpicnt = 0; 2388 2389 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2390 current_link_up && 2391 tp->link_config.active_duplex == DUPLEX_FULL && 2392 (tp->link_config.active_speed == SPEED_100 || 2393 tp->link_config.active_speed == SPEED_1000)) { 2394 u32 eeectl; 2395 2396 if (tp->link_config.active_speed == SPEED_1000) 2397 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2398 else 2399 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2400 2401 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2402 2403 tg3_eee_pull_config(tp, NULL); 2404 if (tp->eee.eee_active) 2405 tp->setlpicnt = 2; 2406 } 2407 2408 if (!tp->setlpicnt) { 2409 if (current_link_up && 2410 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2411 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2412 tg3_phy_toggle_auxctl_smdsp(tp, false); 2413 } 2414 2415 val = tr32(TG3_CPMU_EEE_MODE); 2416 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2417 } 2418 } 2419 2420 static void tg3_phy_eee_enable(struct tg3 *tp) 2421 { 2422 u32 val; 2423 2424 if (tp->link_config.active_speed == SPEED_1000 && 2425 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2426 tg3_asic_rev(tp) == ASIC_REV_5719 || 2427 tg3_flag(tp, 57765_CLASS)) && 2428 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2429 val = MII_TG3_DSP_TAP26_ALNOKO | 2430 MII_TG3_DSP_TAP26_RMRXSTO; 2431 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2432 tg3_phy_toggle_auxctl_smdsp(tp, false); 2433 } 2434 2435 val = tr32(TG3_CPMU_EEE_MODE); 2436 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2437 } 2438 2439 static int tg3_wait_macro_done(struct tg3 *tp) 2440 { 2441 int limit = 100; 2442 2443 while (limit--) { 2444 u32 tmp32; 2445 2446 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2447 if ((tmp32 & 0x1000) == 0) 2448 break; 2449 } 2450 } 2451 if (limit < 0) 2452 return -EBUSY; 2453 2454 return 0; 2455 } 2456 2457 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2458 { 2459 static const u32 test_pat[4][6] = { 2460 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2461 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2462 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2463 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2464 }; 2465 int chan; 2466 2467 for (chan = 0; chan < 4; chan++) { 2468 int i; 2469 2470 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2471 (chan * 0x2000) | 0x0200); 2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2473 2474 for (i = 0; i < 6; i++) 2475 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2476 test_pat[chan][i]); 2477 2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2479 if (tg3_wait_macro_done(tp)) { 2480 *resetp = 1; 2481 return -EBUSY; 2482 } 2483 2484 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2485 (chan * 0x2000) | 0x0200); 2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2487 if (tg3_wait_macro_done(tp)) { 2488 *resetp = 1; 2489 return -EBUSY; 2490 } 2491 2492 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2493 if (tg3_wait_macro_done(tp)) { 2494 *resetp = 1; 2495 return -EBUSY; 2496 } 2497 2498 for (i = 0; i < 6; i += 2) { 2499 u32 low, high; 2500 2501 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2502 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2503 tg3_wait_macro_done(tp)) { 2504 *resetp = 1; 2505 return -EBUSY; 2506 } 2507 low &= 0x7fff; 2508 high &= 0x000f; 2509 if (low != test_pat[chan][i] || 2510 high != test_pat[chan][i+1]) { 2511 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2512 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2514 2515 return -EBUSY; 2516 } 2517 } 2518 } 2519 2520 return 0; 2521 } 2522 2523 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2524 { 2525 int chan; 2526 2527 for (chan = 0; chan < 4; chan++) { 2528 int i; 2529 2530 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2531 (chan * 0x2000) | 0x0200); 2532 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2533 for (i = 0; i < 6; i++) 2534 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2535 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2536 if (tg3_wait_macro_done(tp)) 2537 return -EBUSY; 2538 } 2539 2540 return 0; 2541 } 2542 2543 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2544 { 2545 u32 reg32, phy9_orig; 2546 int retries, do_phy_reset, err; 2547 2548 retries = 10; 2549 do_phy_reset = 1; 2550 do { 2551 if (do_phy_reset) { 2552 err = tg3_bmcr_reset(tp); 2553 if (err) 2554 return err; 2555 do_phy_reset = 0; 2556 } 2557 2558 /* Disable transmitter and interrupt. */ 2559 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2560 continue; 2561 2562 reg32 |= 0x3000; 2563 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2564 2565 /* Set full-duplex, 1000 mbps. */ 2566 tg3_writephy(tp, MII_BMCR, 2567 BMCR_FULLDPLX | BMCR_SPEED1000); 2568 2569 /* Set to master mode. */ 2570 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2571 continue; 2572 2573 tg3_writephy(tp, MII_CTRL1000, 2574 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2575 2576 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2577 if (err) 2578 return err; 2579 2580 /* Block the PHY control access. */ 2581 tg3_phydsp_write(tp, 0x8005, 0x0800); 2582 2583 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2584 if (!err) 2585 break; 2586 } while (--retries); 2587 2588 err = tg3_phy_reset_chanpat(tp); 2589 if (err) 2590 return err; 2591 2592 tg3_phydsp_write(tp, 0x8005, 0x0000); 2593 2594 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2595 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2596 2597 tg3_phy_toggle_auxctl_smdsp(tp, false); 2598 2599 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2600 2601 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2602 if (err) 2603 return err; 2604 2605 reg32 &= ~0x3000; 2606 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2607 2608 return 0; 2609 } 2610 2611 static void tg3_carrier_off(struct tg3 *tp) 2612 { 2613 netif_carrier_off(tp->dev); 2614 tp->link_up = false; 2615 } 2616 2617 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2618 { 2619 if (tg3_flag(tp, ENABLE_ASF)) 2620 netdev_warn(tp->dev, 2621 "Management side-band traffic will be interrupted during phy settings change\n"); 2622 } 2623 2624 /* This will reset the tigon3 PHY if there is no valid 2625 * link unless the FORCE argument is non-zero. 2626 */ 2627 static int tg3_phy_reset(struct tg3 *tp) 2628 { 2629 u32 val, cpmuctrl; 2630 int err; 2631 2632 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2633 val = tr32(GRC_MISC_CFG); 2634 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2635 udelay(40); 2636 } 2637 err = tg3_readphy(tp, MII_BMSR, &val); 2638 err |= tg3_readphy(tp, MII_BMSR, &val); 2639 if (err != 0) 2640 return -EBUSY; 2641 2642 if (netif_running(tp->dev) && tp->link_up) { 2643 netif_carrier_off(tp->dev); 2644 tg3_link_report(tp); 2645 } 2646 2647 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2648 tg3_asic_rev(tp) == ASIC_REV_5704 || 2649 tg3_asic_rev(tp) == ASIC_REV_5705) { 2650 err = tg3_phy_reset_5703_4_5(tp); 2651 if (err) 2652 return err; 2653 goto out; 2654 } 2655 2656 cpmuctrl = 0; 2657 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2658 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2659 cpmuctrl = tr32(TG3_CPMU_CTRL); 2660 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2661 tw32(TG3_CPMU_CTRL, 2662 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2663 } 2664 2665 err = tg3_bmcr_reset(tp); 2666 if (err) 2667 return err; 2668 2669 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2670 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2671 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2672 2673 tw32(TG3_CPMU_CTRL, cpmuctrl); 2674 } 2675 2676 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2677 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2678 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2679 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2680 CPMU_LSPD_1000MB_MACCLK_12_5) { 2681 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2682 udelay(40); 2683 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2684 } 2685 } 2686 2687 if (tg3_flag(tp, 5717_PLUS) && 2688 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2689 return 0; 2690 2691 tg3_phy_apply_otp(tp); 2692 2693 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2694 tg3_phy_toggle_apd(tp, true); 2695 else 2696 tg3_phy_toggle_apd(tp, false); 2697 2698 out: 2699 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2700 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2701 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2702 tg3_phydsp_write(tp, 0x000a, 0x0323); 2703 tg3_phy_toggle_auxctl_smdsp(tp, false); 2704 } 2705 2706 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2707 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2709 } 2710 2711 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2712 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2713 tg3_phydsp_write(tp, 0x000a, 0x310b); 2714 tg3_phydsp_write(tp, 0x201f, 0x9506); 2715 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2716 tg3_phy_toggle_auxctl_smdsp(tp, false); 2717 } 2718 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2719 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2720 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2721 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2722 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2723 tg3_writephy(tp, MII_TG3_TEST1, 2724 MII_TG3_TEST1_TRIM_EN | 0x4); 2725 } else 2726 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2727 2728 tg3_phy_toggle_auxctl_smdsp(tp, false); 2729 } 2730 } 2731 2732 /* Set Extended packet length bit (bit 14) on all chips that */ 2733 /* support jumbo frames */ 2734 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2735 /* Cannot do read-modify-write on 5401 */ 2736 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2737 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2738 /* Set bit 14 with read-modify-write to preserve other bits */ 2739 err = tg3_phy_auxctl_read(tp, 2740 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2741 if (!err) 2742 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2743 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2744 } 2745 2746 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2747 * jumbo frames transmission. 2748 */ 2749 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2750 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2751 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2752 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2753 } 2754 2755 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2756 /* adjust output voltage */ 2757 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2758 } 2759 2760 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2761 tg3_phydsp_write(tp, 0xffb, 0x4000); 2762 2763 tg3_phy_toggle_automdix(tp, true); 2764 tg3_phy_set_wirespeed(tp); 2765 return 0; 2766 } 2767 2768 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2769 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2770 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2771 TG3_GPIO_MSG_NEED_VAUX) 2772 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2773 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2774 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2775 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2776 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2777 2778 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2779 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2780 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2781 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2782 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2783 2784 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2785 { 2786 u32 status, shift; 2787 2788 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2789 tg3_asic_rev(tp) == ASIC_REV_5719) 2790 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2791 else 2792 status = tr32(TG3_CPMU_DRV_STATUS); 2793 2794 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2795 status &= ~(TG3_GPIO_MSG_MASK << shift); 2796 status |= (newstat << shift); 2797 2798 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2799 tg3_asic_rev(tp) == ASIC_REV_5719) 2800 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2801 else 2802 tw32(TG3_CPMU_DRV_STATUS, status); 2803 2804 return status >> TG3_APE_GPIO_MSG_SHIFT; 2805 } 2806 2807 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2808 { 2809 if (!tg3_flag(tp, IS_NIC)) 2810 return 0; 2811 2812 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2813 tg3_asic_rev(tp) == ASIC_REV_5719 || 2814 tg3_asic_rev(tp) == ASIC_REV_5720) { 2815 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2816 return -EIO; 2817 2818 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2819 2820 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2821 TG3_GRC_LCLCTL_PWRSW_DELAY); 2822 2823 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2824 } else { 2825 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2826 TG3_GRC_LCLCTL_PWRSW_DELAY); 2827 } 2828 2829 return 0; 2830 } 2831 2832 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2833 { 2834 u32 grc_local_ctrl; 2835 2836 if (!tg3_flag(tp, IS_NIC) || 2837 tg3_asic_rev(tp) == ASIC_REV_5700 || 2838 tg3_asic_rev(tp) == ASIC_REV_5701) 2839 return; 2840 2841 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2842 2843 tw32_wait_f(GRC_LOCAL_CTRL, 2844 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2845 TG3_GRC_LCLCTL_PWRSW_DELAY); 2846 2847 tw32_wait_f(GRC_LOCAL_CTRL, 2848 grc_local_ctrl, 2849 TG3_GRC_LCLCTL_PWRSW_DELAY); 2850 2851 tw32_wait_f(GRC_LOCAL_CTRL, 2852 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2853 TG3_GRC_LCLCTL_PWRSW_DELAY); 2854 } 2855 2856 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2857 { 2858 if (!tg3_flag(tp, IS_NIC)) 2859 return; 2860 2861 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2862 tg3_asic_rev(tp) == ASIC_REV_5701) { 2863 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2864 (GRC_LCLCTRL_GPIO_OE0 | 2865 GRC_LCLCTRL_GPIO_OE1 | 2866 GRC_LCLCTRL_GPIO_OE2 | 2867 GRC_LCLCTRL_GPIO_OUTPUT0 | 2868 GRC_LCLCTRL_GPIO_OUTPUT1), 2869 TG3_GRC_LCLCTL_PWRSW_DELAY); 2870 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2871 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2872 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2873 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2874 GRC_LCLCTRL_GPIO_OE1 | 2875 GRC_LCLCTRL_GPIO_OE2 | 2876 GRC_LCLCTRL_GPIO_OUTPUT0 | 2877 GRC_LCLCTRL_GPIO_OUTPUT1 | 2878 tp->grc_local_ctrl; 2879 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2880 TG3_GRC_LCLCTL_PWRSW_DELAY); 2881 2882 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2883 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2884 TG3_GRC_LCLCTL_PWRSW_DELAY); 2885 2886 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2887 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2888 TG3_GRC_LCLCTL_PWRSW_DELAY); 2889 } else { 2890 u32 no_gpio2; 2891 u32 grc_local_ctrl = 0; 2892 2893 /* Workaround to prevent overdrawing Amps. */ 2894 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2895 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2896 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2897 grc_local_ctrl, 2898 TG3_GRC_LCLCTL_PWRSW_DELAY); 2899 } 2900 2901 /* On 5753 and variants, GPIO2 cannot be used. */ 2902 no_gpio2 = tp->nic_sram_data_cfg & 2903 NIC_SRAM_DATA_CFG_NO_GPIO2; 2904 2905 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2906 GRC_LCLCTRL_GPIO_OE1 | 2907 GRC_LCLCTRL_GPIO_OE2 | 2908 GRC_LCLCTRL_GPIO_OUTPUT1 | 2909 GRC_LCLCTRL_GPIO_OUTPUT2; 2910 if (no_gpio2) { 2911 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2912 GRC_LCLCTRL_GPIO_OUTPUT2); 2913 } 2914 tw32_wait_f(GRC_LOCAL_CTRL, 2915 tp->grc_local_ctrl | grc_local_ctrl, 2916 TG3_GRC_LCLCTL_PWRSW_DELAY); 2917 2918 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2919 2920 tw32_wait_f(GRC_LOCAL_CTRL, 2921 tp->grc_local_ctrl | grc_local_ctrl, 2922 TG3_GRC_LCLCTL_PWRSW_DELAY); 2923 2924 if (!no_gpio2) { 2925 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2926 tw32_wait_f(GRC_LOCAL_CTRL, 2927 tp->grc_local_ctrl | grc_local_ctrl, 2928 TG3_GRC_LCLCTL_PWRSW_DELAY); 2929 } 2930 } 2931 } 2932 2933 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2934 { 2935 u32 msg = 0; 2936 2937 /* Serialize power state transitions */ 2938 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2939 return; 2940 2941 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2942 msg = TG3_GPIO_MSG_NEED_VAUX; 2943 2944 msg = tg3_set_function_status(tp, msg); 2945 2946 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2947 goto done; 2948 2949 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2950 tg3_pwrsrc_switch_to_vaux(tp); 2951 else 2952 tg3_pwrsrc_die_with_vmain(tp); 2953 2954 done: 2955 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2956 } 2957 2958 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2959 { 2960 bool need_vaux = false; 2961 2962 /* The GPIOs do something completely different on 57765. */ 2963 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2964 return; 2965 2966 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2967 tg3_asic_rev(tp) == ASIC_REV_5719 || 2968 tg3_asic_rev(tp) == ASIC_REV_5720) { 2969 tg3_frob_aux_power_5717(tp, include_wol ? 2970 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2971 return; 2972 } 2973 2974 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2975 struct net_device *dev_peer; 2976 2977 dev_peer = pci_get_drvdata(tp->pdev_peer); 2978 2979 /* remove_one() may have been run on the peer. */ 2980 if (dev_peer) { 2981 struct tg3 *tp_peer = netdev_priv(dev_peer); 2982 2983 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2984 return; 2985 2986 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2987 tg3_flag(tp_peer, ENABLE_ASF)) 2988 need_vaux = true; 2989 } 2990 } 2991 2992 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 2993 tg3_flag(tp, ENABLE_ASF)) 2994 need_vaux = true; 2995 2996 if (need_vaux) 2997 tg3_pwrsrc_switch_to_vaux(tp); 2998 else 2999 tg3_pwrsrc_die_with_vmain(tp); 3000 } 3001 3002 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3003 { 3004 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3005 return 1; 3006 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3007 if (speed != SPEED_10) 3008 return 1; 3009 } else if (speed == SPEED_10) 3010 return 1; 3011 3012 return 0; 3013 } 3014 3015 static bool tg3_phy_power_bug(struct tg3 *tp) 3016 { 3017 switch (tg3_asic_rev(tp)) { 3018 case ASIC_REV_5700: 3019 case ASIC_REV_5704: 3020 return true; 3021 case ASIC_REV_5780: 3022 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3023 return true; 3024 return false; 3025 case ASIC_REV_5717: 3026 if (!tp->pci_fn) 3027 return true; 3028 return false; 3029 case ASIC_REV_5719: 3030 case ASIC_REV_5720: 3031 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3032 !tp->pci_fn) 3033 return true; 3034 return false; 3035 } 3036 3037 return false; 3038 } 3039 3040 static bool tg3_phy_led_bug(struct tg3 *tp) 3041 { 3042 switch (tg3_asic_rev(tp)) { 3043 case ASIC_REV_5719: 3044 case ASIC_REV_5720: 3045 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3046 !tp->pci_fn) 3047 return true; 3048 return false; 3049 } 3050 3051 return false; 3052 } 3053 3054 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3055 { 3056 u32 val; 3057 3058 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3059 return; 3060 3061 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3062 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3063 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3064 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3065 3066 sg_dig_ctrl |= 3067 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3068 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3069 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3070 } 3071 return; 3072 } 3073 3074 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3075 tg3_bmcr_reset(tp); 3076 val = tr32(GRC_MISC_CFG); 3077 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3078 udelay(40); 3079 return; 3080 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3081 u32 phytest; 3082 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3083 u32 phy; 3084 3085 tg3_writephy(tp, MII_ADVERTISE, 0); 3086 tg3_writephy(tp, MII_BMCR, 3087 BMCR_ANENABLE | BMCR_ANRESTART); 3088 3089 tg3_writephy(tp, MII_TG3_FET_TEST, 3090 phytest | MII_TG3_FET_SHADOW_EN); 3091 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3092 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3093 tg3_writephy(tp, 3094 MII_TG3_FET_SHDW_AUXMODE4, 3095 phy); 3096 } 3097 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3098 } 3099 return; 3100 } else if (do_low_power) { 3101 if (!tg3_phy_led_bug(tp)) 3102 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3103 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3104 3105 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3106 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3107 MII_TG3_AUXCTL_PCTL_VREG_11V; 3108 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3109 } 3110 3111 /* The PHY should not be powered down on some chips because 3112 * of bugs. 3113 */ 3114 if (tg3_phy_power_bug(tp)) 3115 return; 3116 3117 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3118 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3119 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3120 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3121 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3122 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3123 } 3124 3125 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3126 } 3127 3128 /* tp->lock is held. */ 3129 static int tg3_nvram_lock(struct tg3 *tp) 3130 { 3131 if (tg3_flag(tp, NVRAM)) { 3132 int i; 3133 3134 if (tp->nvram_lock_cnt == 0) { 3135 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3136 for (i = 0; i < 8000; i++) { 3137 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3138 break; 3139 udelay(20); 3140 } 3141 if (i == 8000) { 3142 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3143 return -ENODEV; 3144 } 3145 } 3146 tp->nvram_lock_cnt++; 3147 } 3148 return 0; 3149 } 3150 3151 /* tp->lock is held. */ 3152 static void tg3_nvram_unlock(struct tg3 *tp) 3153 { 3154 if (tg3_flag(tp, NVRAM)) { 3155 if (tp->nvram_lock_cnt > 0) 3156 tp->nvram_lock_cnt--; 3157 if (tp->nvram_lock_cnt == 0) 3158 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3159 } 3160 } 3161 3162 /* tp->lock is held. */ 3163 static void tg3_enable_nvram_access(struct tg3 *tp) 3164 { 3165 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3166 u32 nvaccess = tr32(NVRAM_ACCESS); 3167 3168 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3169 } 3170 } 3171 3172 /* tp->lock is held. */ 3173 static void tg3_disable_nvram_access(struct tg3 *tp) 3174 { 3175 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3176 u32 nvaccess = tr32(NVRAM_ACCESS); 3177 3178 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3179 } 3180 } 3181 3182 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3183 u32 offset, u32 *val) 3184 { 3185 u32 tmp; 3186 int i; 3187 3188 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3189 return -EINVAL; 3190 3191 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3192 EEPROM_ADDR_DEVID_MASK | 3193 EEPROM_ADDR_READ); 3194 tw32(GRC_EEPROM_ADDR, 3195 tmp | 3196 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3197 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3198 EEPROM_ADDR_ADDR_MASK) | 3199 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3200 3201 for (i = 0; i < 1000; i++) { 3202 tmp = tr32(GRC_EEPROM_ADDR); 3203 3204 if (tmp & EEPROM_ADDR_COMPLETE) 3205 break; 3206 msleep(1); 3207 } 3208 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3209 return -EBUSY; 3210 3211 tmp = tr32(GRC_EEPROM_DATA); 3212 3213 /* 3214 * The data will always be opposite the native endian 3215 * format. Perform a blind byteswap to compensate. 3216 */ 3217 *val = swab32(tmp); 3218 3219 return 0; 3220 } 3221 3222 #define NVRAM_CMD_TIMEOUT 10000 3223 3224 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3225 { 3226 int i; 3227 3228 tw32(NVRAM_CMD, nvram_cmd); 3229 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3230 usleep_range(10, 40); 3231 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3232 udelay(10); 3233 break; 3234 } 3235 } 3236 3237 if (i == NVRAM_CMD_TIMEOUT) 3238 return -EBUSY; 3239 3240 return 0; 3241 } 3242 3243 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3244 { 3245 if (tg3_flag(tp, NVRAM) && 3246 tg3_flag(tp, NVRAM_BUFFERED) && 3247 tg3_flag(tp, FLASH) && 3248 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3249 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3250 3251 addr = ((addr / tp->nvram_pagesize) << 3252 ATMEL_AT45DB0X1B_PAGE_POS) + 3253 (addr % tp->nvram_pagesize); 3254 3255 return addr; 3256 } 3257 3258 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3259 { 3260 if (tg3_flag(tp, NVRAM) && 3261 tg3_flag(tp, NVRAM_BUFFERED) && 3262 tg3_flag(tp, FLASH) && 3263 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3264 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3265 3266 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3267 tp->nvram_pagesize) + 3268 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3269 3270 return addr; 3271 } 3272 3273 /* NOTE: Data read in from NVRAM is byteswapped according to 3274 * the byteswapping settings for all other register accesses. 3275 * tg3 devices are BE devices, so on a BE machine, the data 3276 * returned will be exactly as it is seen in NVRAM. On a LE 3277 * machine, the 32-bit value will be byteswapped. 3278 */ 3279 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3280 { 3281 int ret; 3282 3283 if (!tg3_flag(tp, NVRAM)) 3284 return tg3_nvram_read_using_eeprom(tp, offset, val); 3285 3286 offset = tg3_nvram_phys_addr(tp, offset); 3287 3288 if (offset > NVRAM_ADDR_MSK) 3289 return -EINVAL; 3290 3291 ret = tg3_nvram_lock(tp); 3292 if (ret) 3293 return ret; 3294 3295 tg3_enable_nvram_access(tp); 3296 3297 tw32(NVRAM_ADDR, offset); 3298 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3299 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3300 3301 if (ret == 0) 3302 *val = tr32(NVRAM_RDDATA); 3303 3304 tg3_disable_nvram_access(tp); 3305 3306 tg3_nvram_unlock(tp); 3307 3308 return ret; 3309 } 3310 3311 /* Ensures NVRAM data is in bytestream format. */ 3312 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3313 { 3314 u32 v; 3315 int res = tg3_nvram_read(tp, offset, &v); 3316 if (!res) 3317 *val = cpu_to_be32(v); 3318 return res; 3319 } 3320 3321 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3322 u32 offset, u32 len, u8 *buf) 3323 { 3324 int i, j, rc = 0; 3325 u32 val; 3326 3327 for (i = 0; i < len; i += 4) { 3328 u32 addr; 3329 __be32 data; 3330 3331 addr = offset + i; 3332 3333 memcpy(&data, buf + i, 4); 3334 3335 /* 3336 * The SEEPROM interface expects the data to always be opposite 3337 * the native endian format. We accomplish this by reversing 3338 * all the operations that would have been performed on the 3339 * data from a call to tg3_nvram_read_be32(). 3340 */ 3341 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3342 3343 val = tr32(GRC_EEPROM_ADDR); 3344 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3345 3346 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3347 EEPROM_ADDR_READ); 3348 tw32(GRC_EEPROM_ADDR, val | 3349 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3350 (addr & EEPROM_ADDR_ADDR_MASK) | 3351 EEPROM_ADDR_START | 3352 EEPROM_ADDR_WRITE); 3353 3354 for (j = 0; j < 1000; j++) { 3355 val = tr32(GRC_EEPROM_ADDR); 3356 3357 if (val & EEPROM_ADDR_COMPLETE) 3358 break; 3359 msleep(1); 3360 } 3361 if (!(val & EEPROM_ADDR_COMPLETE)) { 3362 rc = -EBUSY; 3363 break; 3364 } 3365 } 3366 3367 return rc; 3368 } 3369 3370 /* offset and length are dword aligned */ 3371 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3372 u8 *buf) 3373 { 3374 int ret = 0; 3375 u32 pagesize = tp->nvram_pagesize; 3376 u32 pagemask = pagesize - 1; 3377 u32 nvram_cmd; 3378 u8 *tmp; 3379 3380 tmp = kmalloc(pagesize, GFP_KERNEL); 3381 if (tmp == NULL) 3382 return -ENOMEM; 3383 3384 while (len) { 3385 int j; 3386 u32 phy_addr, page_off, size; 3387 3388 phy_addr = offset & ~pagemask; 3389 3390 for (j = 0; j < pagesize; j += 4) { 3391 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3392 (__be32 *) (tmp + j)); 3393 if (ret) 3394 break; 3395 } 3396 if (ret) 3397 break; 3398 3399 page_off = offset & pagemask; 3400 size = pagesize; 3401 if (len < size) 3402 size = len; 3403 3404 len -= size; 3405 3406 memcpy(tmp + page_off, buf, size); 3407 3408 offset = offset + (pagesize - page_off); 3409 3410 tg3_enable_nvram_access(tp); 3411 3412 /* 3413 * Before we can erase the flash page, we need 3414 * to issue a special "write enable" command. 3415 */ 3416 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3417 3418 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3419 break; 3420 3421 /* Erase the target page */ 3422 tw32(NVRAM_ADDR, phy_addr); 3423 3424 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3425 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3426 3427 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3428 break; 3429 3430 /* Issue another write enable to start the write. */ 3431 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3432 3433 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3434 break; 3435 3436 for (j = 0; j < pagesize; j += 4) { 3437 __be32 data; 3438 3439 data = *((__be32 *) (tmp + j)); 3440 3441 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3442 3443 tw32(NVRAM_ADDR, phy_addr + j); 3444 3445 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3446 NVRAM_CMD_WR; 3447 3448 if (j == 0) 3449 nvram_cmd |= NVRAM_CMD_FIRST; 3450 else if (j == (pagesize - 4)) 3451 nvram_cmd |= NVRAM_CMD_LAST; 3452 3453 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3454 if (ret) 3455 break; 3456 } 3457 if (ret) 3458 break; 3459 } 3460 3461 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3462 tg3_nvram_exec_cmd(tp, nvram_cmd); 3463 3464 kfree(tmp); 3465 3466 return ret; 3467 } 3468 3469 /* offset and length are dword aligned */ 3470 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3471 u8 *buf) 3472 { 3473 int i, ret = 0; 3474 3475 for (i = 0; i < len; i += 4, offset += 4) { 3476 u32 page_off, phy_addr, nvram_cmd; 3477 __be32 data; 3478 3479 memcpy(&data, buf + i, 4); 3480 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3481 3482 page_off = offset % tp->nvram_pagesize; 3483 3484 phy_addr = tg3_nvram_phys_addr(tp, offset); 3485 3486 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3487 3488 if (page_off == 0 || i == 0) 3489 nvram_cmd |= NVRAM_CMD_FIRST; 3490 if (page_off == (tp->nvram_pagesize - 4)) 3491 nvram_cmd |= NVRAM_CMD_LAST; 3492 3493 if (i == (len - 4)) 3494 nvram_cmd |= NVRAM_CMD_LAST; 3495 3496 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3497 !tg3_flag(tp, FLASH) || 3498 !tg3_flag(tp, 57765_PLUS)) 3499 tw32(NVRAM_ADDR, phy_addr); 3500 3501 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3502 !tg3_flag(tp, 5755_PLUS) && 3503 (tp->nvram_jedecnum == JEDEC_ST) && 3504 (nvram_cmd & NVRAM_CMD_FIRST)) { 3505 u32 cmd; 3506 3507 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3508 ret = tg3_nvram_exec_cmd(tp, cmd); 3509 if (ret) 3510 break; 3511 } 3512 if (!tg3_flag(tp, FLASH)) { 3513 /* We always do complete word writes to eeprom. */ 3514 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3515 } 3516 3517 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3518 if (ret) 3519 break; 3520 } 3521 return ret; 3522 } 3523 3524 /* offset and length are dword aligned */ 3525 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3526 { 3527 int ret; 3528 3529 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3530 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3531 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3532 udelay(40); 3533 } 3534 3535 if (!tg3_flag(tp, NVRAM)) { 3536 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3537 } else { 3538 u32 grc_mode; 3539 3540 ret = tg3_nvram_lock(tp); 3541 if (ret) 3542 return ret; 3543 3544 tg3_enable_nvram_access(tp); 3545 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3546 tw32(NVRAM_WRITE1, 0x406); 3547 3548 grc_mode = tr32(GRC_MODE); 3549 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3550 3551 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3552 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3553 buf); 3554 } else { 3555 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3556 buf); 3557 } 3558 3559 grc_mode = tr32(GRC_MODE); 3560 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3561 3562 tg3_disable_nvram_access(tp); 3563 tg3_nvram_unlock(tp); 3564 } 3565 3566 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3567 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3568 udelay(40); 3569 } 3570 3571 return ret; 3572 } 3573 3574 #define RX_CPU_SCRATCH_BASE 0x30000 3575 #define RX_CPU_SCRATCH_SIZE 0x04000 3576 #define TX_CPU_SCRATCH_BASE 0x34000 3577 #define TX_CPU_SCRATCH_SIZE 0x04000 3578 3579 /* tp->lock is held. */ 3580 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3581 { 3582 int i; 3583 const int iters = 10000; 3584 3585 for (i = 0; i < iters; i++) { 3586 tw32(cpu_base + CPU_STATE, 0xffffffff); 3587 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3588 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3589 break; 3590 if (pci_channel_offline(tp->pdev)) 3591 return -EBUSY; 3592 } 3593 3594 return (i == iters) ? -EBUSY : 0; 3595 } 3596 3597 /* tp->lock is held. */ 3598 static int tg3_rxcpu_pause(struct tg3 *tp) 3599 { 3600 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3601 3602 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3603 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3604 udelay(10); 3605 3606 return rc; 3607 } 3608 3609 /* tp->lock is held. */ 3610 static int tg3_txcpu_pause(struct tg3 *tp) 3611 { 3612 return tg3_pause_cpu(tp, TX_CPU_BASE); 3613 } 3614 3615 /* tp->lock is held. */ 3616 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3617 { 3618 tw32(cpu_base + CPU_STATE, 0xffffffff); 3619 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3620 } 3621 3622 /* tp->lock is held. */ 3623 static void tg3_rxcpu_resume(struct tg3 *tp) 3624 { 3625 tg3_resume_cpu(tp, RX_CPU_BASE); 3626 } 3627 3628 /* tp->lock is held. */ 3629 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3630 { 3631 int rc; 3632 3633 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3634 3635 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3636 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3637 3638 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3639 return 0; 3640 } 3641 if (cpu_base == RX_CPU_BASE) { 3642 rc = tg3_rxcpu_pause(tp); 3643 } else { 3644 /* 3645 * There is only an Rx CPU for the 5750 derivative in the 3646 * BCM4785. 3647 */ 3648 if (tg3_flag(tp, IS_SSB_CORE)) 3649 return 0; 3650 3651 rc = tg3_txcpu_pause(tp); 3652 } 3653 3654 if (rc) { 3655 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3656 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3657 return -ENODEV; 3658 } 3659 3660 /* Clear firmware's nvram arbitration. */ 3661 if (tg3_flag(tp, NVRAM)) 3662 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3663 return 0; 3664 } 3665 3666 static int tg3_fw_data_len(struct tg3 *tp, 3667 const struct tg3_firmware_hdr *fw_hdr) 3668 { 3669 int fw_len; 3670 3671 /* Non fragmented firmware have one firmware header followed by a 3672 * contiguous chunk of data to be written. The length field in that 3673 * header is not the length of data to be written but the complete 3674 * length of the bss. The data length is determined based on 3675 * tp->fw->size minus headers. 3676 * 3677 * Fragmented firmware have a main header followed by multiple 3678 * fragments. Each fragment is identical to non fragmented firmware 3679 * with a firmware header followed by a contiguous chunk of data. In 3680 * the main header, the length field is unused and set to 0xffffffff. 3681 * In each fragment header the length is the entire size of that 3682 * fragment i.e. fragment data + header length. Data length is 3683 * therefore length field in the header minus TG3_FW_HDR_LEN. 3684 */ 3685 if (tp->fw_len == 0xffffffff) 3686 fw_len = be32_to_cpu(fw_hdr->len); 3687 else 3688 fw_len = tp->fw->size; 3689 3690 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3691 } 3692 3693 /* tp->lock is held. */ 3694 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3695 u32 cpu_scratch_base, int cpu_scratch_size, 3696 const struct tg3_firmware_hdr *fw_hdr) 3697 { 3698 int err, i; 3699 void (*write_op)(struct tg3 *, u32, u32); 3700 int total_len = tp->fw->size; 3701 3702 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3703 netdev_err(tp->dev, 3704 "%s: Trying to load TX cpu firmware which is 5705\n", 3705 __func__); 3706 return -EINVAL; 3707 } 3708 3709 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3710 write_op = tg3_write_mem; 3711 else 3712 write_op = tg3_write_indirect_reg32; 3713 3714 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3715 /* It is possible that bootcode is still loading at this point. 3716 * Get the nvram lock first before halting the cpu. 3717 */ 3718 int lock_err = tg3_nvram_lock(tp); 3719 err = tg3_halt_cpu(tp, cpu_base); 3720 if (!lock_err) 3721 tg3_nvram_unlock(tp); 3722 if (err) 3723 goto out; 3724 3725 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3726 write_op(tp, cpu_scratch_base + i, 0); 3727 tw32(cpu_base + CPU_STATE, 0xffffffff); 3728 tw32(cpu_base + CPU_MODE, 3729 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3730 } else { 3731 /* Subtract additional main header for fragmented firmware and 3732 * advance to the first fragment 3733 */ 3734 total_len -= TG3_FW_HDR_LEN; 3735 fw_hdr++; 3736 } 3737 3738 do { 3739 u32 *fw_data = (u32 *)(fw_hdr + 1); 3740 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3741 write_op(tp, cpu_scratch_base + 3742 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3743 (i * sizeof(u32)), 3744 be32_to_cpu(fw_data[i])); 3745 3746 total_len -= be32_to_cpu(fw_hdr->len); 3747 3748 /* Advance to next fragment */ 3749 fw_hdr = (struct tg3_firmware_hdr *) 3750 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3751 } while (total_len > 0); 3752 3753 err = 0; 3754 3755 out: 3756 return err; 3757 } 3758 3759 /* tp->lock is held. */ 3760 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3761 { 3762 int i; 3763 const int iters = 5; 3764 3765 tw32(cpu_base + CPU_STATE, 0xffffffff); 3766 tw32_f(cpu_base + CPU_PC, pc); 3767 3768 for (i = 0; i < iters; i++) { 3769 if (tr32(cpu_base + CPU_PC) == pc) 3770 break; 3771 tw32(cpu_base + CPU_STATE, 0xffffffff); 3772 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3773 tw32_f(cpu_base + CPU_PC, pc); 3774 udelay(1000); 3775 } 3776 3777 return (i == iters) ? -EBUSY : 0; 3778 } 3779 3780 /* tp->lock is held. */ 3781 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3782 { 3783 const struct tg3_firmware_hdr *fw_hdr; 3784 int err; 3785 3786 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3787 3788 /* Firmware blob starts with version numbers, followed by 3789 start address and length. We are setting complete length. 3790 length = end_address_of_bss - start_address_of_text. 3791 Remainder is the blob to be loaded contiguously 3792 from start address. */ 3793 3794 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3795 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3796 fw_hdr); 3797 if (err) 3798 return err; 3799 3800 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3801 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3802 fw_hdr); 3803 if (err) 3804 return err; 3805 3806 /* Now startup only the RX cpu. */ 3807 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3808 be32_to_cpu(fw_hdr->base_addr)); 3809 if (err) { 3810 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3811 "should be %08x\n", __func__, 3812 tr32(RX_CPU_BASE + CPU_PC), 3813 be32_to_cpu(fw_hdr->base_addr)); 3814 return -ENODEV; 3815 } 3816 3817 tg3_rxcpu_resume(tp); 3818 3819 return 0; 3820 } 3821 3822 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3823 { 3824 const int iters = 1000; 3825 int i; 3826 u32 val; 3827 3828 /* Wait for boot code to complete initialization and enter service 3829 * loop. It is then safe to download service patches 3830 */ 3831 for (i = 0; i < iters; i++) { 3832 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3833 break; 3834 3835 udelay(10); 3836 } 3837 3838 if (i == iters) { 3839 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3840 return -EBUSY; 3841 } 3842 3843 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3844 if (val & 0xff) { 3845 netdev_warn(tp->dev, 3846 "Other patches exist. Not downloading EEE patch\n"); 3847 return -EEXIST; 3848 } 3849 3850 return 0; 3851 } 3852 3853 /* tp->lock is held. */ 3854 static void tg3_load_57766_firmware(struct tg3 *tp) 3855 { 3856 struct tg3_firmware_hdr *fw_hdr; 3857 3858 if (!tg3_flag(tp, NO_NVRAM)) 3859 return; 3860 3861 if (tg3_validate_rxcpu_state(tp)) 3862 return; 3863 3864 if (!tp->fw) 3865 return; 3866 3867 /* This firmware blob has a different format than older firmware 3868 * releases as given below. The main difference is we have fragmented 3869 * data to be written to non-contiguous locations. 3870 * 3871 * In the beginning we have a firmware header identical to other 3872 * firmware which consists of version, base addr and length. The length 3873 * here is unused and set to 0xffffffff. 3874 * 3875 * This is followed by a series of firmware fragments which are 3876 * individually identical to previous firmware. i.e. they have the 3877 * firmware header and followed by data for that fragment. The version 3878 * field of the individual fragment header is unused. 3879 */ 3880 3881 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3882 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3883 return; 3884 3885 if (tg3_rxcpu_pause(tp)) 3886 return; 3887 3888 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3889 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3890 3891 tg3_rxcpu_resume(tp); 3892 } 3893 3894 /* tp->lock is held. */ 3895 static int tg3_load_tso_firmware(struct tg3 *tp) 3896 { 3897 const struct tg3_firmware_hdr *fw_hdr; 3898 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3899 int err; 3900 3901 if (!tg3_flag(tp, FW_TSO)) 3902 return 0; 3903 3904 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3905 3906 /* Firmware blob starts with version numbers, followed by 3907 start address and length. We are setting complete length. 3908 length = end_address_of_bss - start_address_of_text. 3909 Remainder is the blob to be loaded contiguously 3910 from start address. */ 3911 3912 cpu_scratch_size = tp->fw_len; 3913 3914 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3915 cpu_base = RX_CPU_BASE; 3916 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3917 } else { 3918 cpu_base = TX_CPU_BASE; 3919 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3920 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3921 } 3922 3923 err = tg3_load_firmware_cpu(tp, cpu_base, 3924 cpu_scratch_base, cpu_scratch_size, 3925 fw_hdr); 3926 if (err) 3927 return err; 3928 3929 /* Now startup the cpu. */ 3930 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3931 be32_to_cpu(fw_hdr->base_addr)); 3932 if (err) { 3933 netdev_err(tp->dev, 3934 "%s fails to set CPU PC, is %08x should be %08x\n", 3935 __func__, tr32(cpu_base + CPU_PC), 3936 be32_to_cpu(fw_hdr->base_addr)); 3937 return -ENODEV; 3938 } 3939 3940 tg3_resume_cpu(tp, cpu_base); 3941 return 0; 3942 } 3943 3944 /* tp->lock is held. */ 3945 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr, 3946 int index) 3947 { 3948 u32 addr_high, addr_low; 3949 3950 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3951 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3952 (mac_addr[4] << 8) | mac_addr[5]); 3953 3954 if (index < 4) { 3955 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3956 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3957 } else { 3958 index -= 4; 3959 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3960 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3961 } 3962 } 3963 3964 /* tp->lock is held. */ 3965 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3966 { 3967 u32 addr_high; 3968 int i; 3969 3970 for (i = 0; i < 4; i++) { 3971 if (i == 1 && skip_mac_1) 3972 continue; 3973 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3974 } 3975 3976 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3977 tg3_asic_rev(tp) == ASIC_REV_5704) { 3978 for (i = 4; i < 16; i++) 3979 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3980 } 3981 3982 addr_high = (tp->dev->dev_addr[0] + 3983 tp->dev->dev_addr[1] + 3984 tp->dev->dev_addr[2] + 3985 tp->dev->dev_addr[3] + 3986 tp->dev->dev_addr[4] + 3987 tp->dev->dev_addr[5]) & 3988 TX_BACKOFF_SEED_MASK; 3989 tw32(MAC_TX_BACKOFF_SEED, addr_high); 3990 } 3991 3992 static void tg3_enable_register_access(struct tg3 *tp) 3993 { 3994 /* 3995 * Make sure register accesses (indirect or otherwise) will function 3996 * correctly. 3997 */ 3998 pci_write_config_dword(tp->pdev, 3999 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4000 } 4001 4002 static int tg3_power_up(struct tg3 *tp) 4003 { 4004 int err; 4005 4006 tg3_enable_register_access(tp); 4007 4008 err = pci_set_power_state(tp->pdev, PCI_D0); 4009 if (!err) { 4010 /* Switch out of Vaux if it is a NIC */ 4011 tg3_pwrsrc_switch_to_vmain(tp); 4012 } else { 4013 netdev_err(tp->dev, "Transition to D0 failed\n"); 4014 } 4015 4016 return err; 4017 } 4018 4019 static int tg3_setup_phy(struct tg3 *, bool); 4020 4021 static int tg3_power_down_prepare(struct tg3 *tp) 4022 { 4023 u32 misc_host_ctrl; 4024 bool device_should_wake, do_low_power; 4025 4026 tg3_enable_register_access(tp); 4027 4028 /* Restore the CLKREQ setting. */ 4029 if (tg3_flag(tp, CLKREQ_BUG)) 4030 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4031 PCI_EXP_LNKCTL_CLKREQ_EN); 4032 4033 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4034 tw32(TG3PCI_MISC_HOST_CTRL, 4035 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4036 4037 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4038 tg3_flag(tp, WOL_ENABLE); 4039 4040 if (tg3_flag(tp, USE_PHYLIB)) { 4041 do_low_power = false; 4042 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4043 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4044 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; 4045 struct phy_device *phydev; 4046 u32 phyid; 4047 4048 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4049 4050 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4051 4052 tp->link_config.speed = phydev->speed; 4053 tp->link_config.duplex = phydev->duplex; 4054 tp->link_config.autoneg = phydev->autoneg; 4055 ethtool_convert_link_mode_to_legacy_u32( 4056 &tp->link_config.advertising, 4057 phydev->advertising); 4058 4059 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); 4060 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 4061 advertising); 4062 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 4063 advertising); 4064 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, 4065 advertising); 4066 4067 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4068 if (tg3_flag(tp, WOL_SPEED_100MB)) { 4069 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 4070 advertising); 4071 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 4072 advertising); 4073 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4074 advertising); 4075 } else { 4076 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4077 advertising); 4078 } 4079 } 4080 4081 linkmode_copy(phydev->advertising, advertising); 4082 phy_start_aneg(phydev); 4083 4084 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4085 if (phyid != PHY_ID_BCMAC131) { 4086 phyid &= PHY_BCM_OUI_MASK; 4087 if (phyid == PHY_BCM_OUI_1 || 4088 phyid == PHY_BCM_OUI_2 || 4089 phyid == PHY_BCM_OUI_3) 4090 do_low_power = true; 4091 } 4092 } 4093 } else { 4094 do_low_power = true; 4095 4096 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4097 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4098 4099 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4100 tg3_setup_phy(tp, false); 4101 } 4102 4103 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4104 u32 val; 4105 4106 val = tr32(GRC_VCPU_EXT_CTRL); 4107 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4108 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4109 int i; 4110 u32 val; 4111 4112 for (i = 0; i < 200; i++) { 4113 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4114 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4115 break; 4116 msleep(1); 4117 } 4118 } 4119 if (tg3_flag(tp, WOL_CAP)) 4120 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4121 WOL_DRV_STATE_SHUTDOWN | 4122 WOL_DRV_WOL | 4123 WOL_SET_MAGIC_PKT); 4124 4125 if (device_should_wake) { 4126 u32 mac_mode; 4127 4128 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4129 if (do_low_power && 4130 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4131 tg3_phy_auxctl_write(tp, 4132 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4133 MII_TG3_AUXCTL_PCTL_WOL_EN | 4134 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4135 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4136 udelay(40); 4137 } 4138 4139 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4140 mac_mode = MAC_MODE_PORT_MODE_GMII; 4141 else if (tp->phy_flags & 4142 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4143 if (tp->link_config.active_speed == SPEED_1000) 4144 mac_mode = MAC_MODE_PORT_MODE_GMII; 4145 else 4146 mac_mode = MAC_MODE_PORT_MODE_MII; 4147 } else 4148 mac_mode = MAC_MODE_PORT_MODE_MII; 4149 4150 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4151 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4152 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4153 SPEED_100 : SPEED_10; 4154 if (tg3_5700_link_polarity(tp, speed)) 4155 mac_mode |= MAC_MODE_LINK_POLARITY; 4156 else 4157 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4158 } 4159 } else { 4160 mac_mode = MAC_MODE_PORT_MODE_TBI; 4161 } 4162 4163 if (!tg3_flag(tp, 5750_PLUS)) 4164 tw32(MAC_LED_CTRL, tp->led_ctrl); 4165 4166 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4167 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4168 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4169 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4170 4171 if (tg3_flag(tp, ENABLE_APE)) 4172 mac_mode |= MAC_MODE_APE_TX_EN | 4173 MAC_MODE_APE_RX_EN | 4174 MAC_MODE_TDE_ENABLE; 4175 4176 tw32_f(MAC_MODE, mac_mode); 4177 udelay(100); 4178 4179 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4180 udelay(10); 4181 } 4182 4183 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4184 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4185 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4186 u32 base_val; 4187 4188 base_val = tp->pci_clock_ctrl; 4189 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4190 CLOCK_CTRL_TXCLK_DISABLE); 4191 4192 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4193 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4194 } else if (tg3_flag(tp, 5780_CLASS) || 4195 tg3_flag(tp, CPMU_PRESENT) || 4196 tg3_asic_rev(tp) == ASIC_REV_5906) { 4197 /* do nothing */ 4198 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4199 u32 newbits1, newbits2; 4200 4201 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4202 tg3_asic_rev(tp) == ASIC_REV_5701) { 4203 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4204 CLOCK_CTRL_TXCLK_DISABLE | 4205 CLOCK_CTRL_ALTCLK); 4206 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4207 } else if (tg3_flag(tp, 5705_PLUS)) { 4208 newbits1 = CLOCK_CTRL_625_CORE; 4209 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4210 } else { 4211 newbits1 = CLOCK_CTRL_ALTCLK; 4212 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4213 } 4214 4215 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4216 40); 4217 4218 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4219 40); 4220 4221 if (!tg3_flag(tp, 5705_PLUS)) { 4222 u32 newbits3; 4223 4224 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4225 tg3_asic_rev(tp) == ASIC_REV_5701) { 4226 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4227 CLOCK_CTRL_TXCLK_DISABLE | 4228 CLOCK_CTRL_44MHZ_CORE); 4229 } else { 4230 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4231 } 4232 4233 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4234 tp->pci_clock_ctrl | newbits3, 40); 4235 } 4236 } 4237 4238 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4239 tg3_power_down_phy(tp, do_low_power); 4240 4241 tg3_frob_aux_power(tp, true); 4242 4243 /* Workaround for unstable PLL clock */ 4244 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4245 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4246 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4247 u32 val = tr32(0x7d00); 4248 4249 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4250 tw32(0x7d00, val); 4251 if (!tg3_flag(tp, ENABLE_ASF)) { 4252 int err; 4253 4254 err = tg3_nvram_lock(tp); 4255 tg3_halt_cpu(tp, RX_CPU_BASE); 4256 if (!err) 4257 tg3_nvram_unlock(tp); 4258 } 4259 } 4260 4261 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4262 4263 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4264 4265 return 0; 4266 } 4267 4268 static void tg3_power_down(struct tg3 *tp) 4269 { 4270 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4271 pci_set_power_state(tp->pdev, PCI_D3hot); 4272 } 4273 4274 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) 4275 { 4276 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4277 case MII_TG3_AUX_STAT_10HALF: 4278 *speed = SPEED_10; 4279 *duplex = DUPLEX_HALF; 4280 break; 4281 4282 case MII_TG3_AUX_STAT_10FULL: 4283 *speed = SPEED_10; 4284 *duplex = DUPLEX_FULL; 4285 break; 4286 4287 case MII_TG3_AUX_STAT_100HALF: 4288 *speed = SPEED_100; 4289 *duplex = DUPLEX_HALF; 4290 break; 4291 4292 case MII_TG3_AUX_STAT_100FULL: 4293 *speed = SPEED_100; 4294 *duplex = DUPLEX_FULL; 4295 break; 4296 4297 case MII_TG3_AUX_STAT_1000HALF: 4298 *speed = SPEED_1000; 4299 *duplex = DUPLEX_HALF; 4300 break; 4301 4302 case MII_TG3_AUX_STAT_1000FULL: 4303 *speed = SPEED_1000; 4304 *duplex = DUPLEX_FULL; 4305 break; 4306 4307 default: 4308 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4309 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4310 SPEED_10; 4311 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4312 DUPLEX_HALF; 4313 break; 4314 } 4315 *speed = SPEED_UNKNOWN; 4316 *duplex = DUPLEX_UNKNOWN; 4317 break; 4318 } 4319 } 4320 4321 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4322 { 4323 int err = 0; 4324 u32 val, new_adv; 4325 4326 new_adv = ADVERTISE_CSMA; 4327 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4328 new_adv |= mii_advertise_flowctrl(flowctrl); 4329 4330 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4331 if (err) 4332 goto done; 4333 4334 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4335 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4336 4337 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4338 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4339 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4340 4341 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4342 if (err) 4343 goto done; 4344 } 4345 4346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4347 goto done; 4348 4349 tw32(TG3_CPMU_EEE_MODE, 4350 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4351 4352 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4353 if (!err) { 4354 u32 err2; 4355 4356 val = 0; 4357 /* Advertise 100-BaseTX EEE ability */ 4358 if (advertise & ADVERTISED_100baseT_Full) 4359 val |= MDIO_AN_EEE_ADV_100TX; 4360 /* Advertise 1000-BaseT EEE ability */ 4361 if (advertise & ADVERTISED_1000baseT_Full) 4362 val |= MDIO_AN_EEE_ADV_1000T; 4363 4364 if (!tp->eee.eee_enabled) { 4365 val = 0; 4366 tp->eee.advertised = 0; 4367 } else { 4368 tp->eee.advertised = advertise & 4369 (ADVERTISED_100baseT_Full | 4370 ADVERTISED_1000baseT_Full); 4371 } 4372 4373 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4374 if (err) 4375 val = 0; 4376 4377 switch (tg3_asic_rev(tp)) { 4378 case ASIC_REV_5717: 4379 case ASIC_REV_57765: 4380 case ASIC_REV_57766: 4381 case ASIC_REV_5719: 4382 /* If we advertised any eee advertisements above... */ 4383 if (val) 4384 val = MII_TG3_DSP_TAP26_ALNOKO | 4385 MII_TG3_DSP_TAP26_RMRXSTO | 4386 MII_TG3_DSP_TAP26_OPCSINPT; 4387 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4388 fallthrough; 4389 case ASIC_REV_5720: 4390 case ASIC_REV_5762: 4391 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4392 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4393 MII_TG3_DSP_CH34TP2_HIBW01); 4394 } 4395 4396 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4397 if (!err) 4398 err = err2; 4399 } 4400 4401 done: 4402 return err; 4403 } 4404 4405 static void tg3_phy_copper_begin(struct tg3 *tp) 4406 { 4407 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4408 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4409 u32 adv, fc; 4410 4411 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4412 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4413 adv = ADVERTISED_10baseT_Half | 4414 ADVERTISED_10baseT_Full; 4415 if (tg3_flag(tp, WOL_SPEED_100MB)) 4416 adv |= ADVERTISED_100baseT_Half | 4417 ADVERTISED_100baseT_Full; 4418 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4419 if (!(tp->phy_flags & 4420 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4421 adv |= ADVERTISED_1000baseT_Half; 4422 adv |= ADVERTISED_1000baseT_Full; 4423 } 4424 4425 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4426 } else { 4427 adv = tp->link_config.advertising; 4428 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4429 adv &= ~(ADVERTISED_1000baseT_Half | 4430 ADVERTISED_1000baseT_Full); 4431 4432 fc = tp->link_config.flowctrl; 4433 } 4434 4435 tg3_phy_autoneg_cfg(tp, adv, fc); 4436 4437 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4438 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4439 /* Normally during power down we want to autonegotiate 4440 * the lowest possible speed for WOL. However, to avoid 4441 * link flap, we leave it untouched. 4442 */ 4443 return; 4444 } 4445 4446 tg3_writephy(tp, MII_BMCR, 4447 BMCR_ANENABLE | BMCR_ANRESTART); 4448 } else { 4449 int i; 4450 u32 bmcr, orig_bmcr; 4451 4452 tp->link_config.active_speed = tp->link_config.speed; 4453 tp->link_config.active_duplex = tp->link_config.duplex; 4454 4455 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4456 /* With autoneg disabled, 5715 only links up when the 4457 * advertisement register has the configured speed 4458 * enabled. 4459 */ 4460 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4461 } 4462 4463 bmcr = 0; 4464 switch (tp->link_config.speed) { 4465 default: 4466 case SPEED_10: 4467 break; 4468 4469 case SPEED_100: 4470 bmcr |= BMCR_SPEED100; 4471 break; 4472 4473 case SPEED_1000: 4474 bmcr |= BMCR_SPEED1000; 4475 break; 4476 } 4477 4478 if (tp->link_config.duplex == DUPLEX_FULL) 4479 bmcr |= BMCR_FULLDPLX; 4480 4481 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4482 (bmcr != orig_bmcr)) { 4483 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4484 for (i = 0; i < 1500; i++) { 4485 u32 tmp; 4486 4487 udelay(10); 4488 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4489 tg3_readphy(tp, MII_BMSR, &tmp)) 4490 continue; 4491 if (!(tmp & BMSR_LSTATUS)) { 4492 udelay(40); 4493 break; 4494 } 4495 } 4496 tg3_writephy(tp, MII_BMCR, bmcr); 4497 udelay(40); 4498 } 4499 } 4500 } 4501 4502 static int tg3_phy_pull_config(struct tg3 *tp) 4503 { 4504 int err; 4505 u32 val; 4506 4507 err = tg3_readphy(tp, MII_BMCR, &val); 4508 if (err) 4509 goto done; 4510 4511 if (!(val & BMCR_ANENABLE)) { 4512 tp->link_config.autoneg = AUTONEG_DISABLE; 4513 tp->link_config.advertising = 0; 4514 tg3_flag_clear(tp, PAUSE_AUTONEG); 4515 4516 err = -EIO; 4517 4518 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4519 case 0: 4520 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4521 goto done; 4522 4523 tp->link_config.speed = SPEED_10; 4524 break; 4525 case BMCR_SPEED100: 4526 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4527 goto done; 4528 4529 tp->link_config.speed = SPEED_100; 4530 break; 4531 case BMCR_SPEED1000: 4532 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4533 tp->link_config.speed = SPEED_1000; 4534 break; 4535 } 4536 fallthrough; 4537 default: 4538 goto done; 4539 } 4540 4541 if (val & BMCR_FULLDPLX) 4542 tp->link_config.duplex = DUPLEX_FULL; 4543 else 4544 tp->link_config.duplex = DUPLEX_HALF; 4545 4546 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4547 4548 err = 0; 4549 goto done; 4550 } 4551 4552 tp->link_config.autoneg = AUTONEG_ENABLE; 4553 tp->link_config.advertising = ADVERTISED_Autoneg; 4554 tg3_flag_set(tp, PAUSE_AUTONEG); 4555 4556 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4557 u32 adv; 4558 4559 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4560 if (err) 4561 goto done; 4562 4563 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4564 tp->link_config.advertising |= adv | ADVERTISED_TP; 4565 4566 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4567 } else { 4568 tp->link_config.advertising |= ADVERTISED_FIBRE; 4569 } 4570 4571 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4572 u32 adv; 4573 4574 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4575 err = tg3_readphy(tp, MII_CTRL1000, &val); 4576 if (err) 4577 goto done; 4578 4579 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4580 } else { 4581 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4582 if (err) 4583 goto done; 4584 4585 adv = tg3_decode_flowctrl_1000X(val); 4586 tp->link_config.flowctrl = adv; 4587 4588 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4589 adv = mii_adv_to_ethtool_adv_x(val); 4590 } 4591 4592 tp->link_config.advertising |= adv; 4593 } 4594 4595 done: 4596 return err; 4597 } 4598 4599 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4600 { 4601 int err; 4602 4603 /* Turn off tap power management. */ 4604 /* Set Extended packet length bit */ 4605 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4606 4607 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4608 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4609 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4611 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4612 4613 udelay(40); 4614 4615 return err; 4616 } 4617 4618 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4619 { 4620 struct ethtool_eee eee; 4621 4622 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4623 return true; 4624 4625 tg3_eee_pull_config(tp, &eee); 4626 4627 if (tp->eee.eee_enabled) { 4628 if (tp->eee.advertised != eee.advertised || 4629 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4630 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4631 return false; 4632 } else { 4633 /* EEE is disabled but we're advertising */ 4634 if (eee.advertised) 4635 return false; 4636 } 4637 4638 return true; 4639 } 4640 4641 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4642 { 4643 u32 advmsk, tgtadv, advertising; 4644 4645 advertising = tp->link_config.advertising; 4646 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4647 4648 advmsk = ADVERTISE_ALL; 4649 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4650 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4651 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4652 } 4653 4654 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4655 return false; 4656 4657 if ((*lcladv & advmsk) != tgtadv) 4658 return false; 4659 4660 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4661 u32 tg3_ctrl; 4662 4663 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4664 4665 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4666 return false; 4667 4668 if (tgtadv && 4669 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4670 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4671 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4672 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4673 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4674 } else { 4675 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4676 } 4677 4678 if (tg3_ctrl != tgtadv) 4679 return false; 4680 } 4681 4682 return true; 4683 } 4684 4685 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4686 { 4687 u32 lpeth = 0; 4688 4689 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4690 u32 val; 4691 4692 if (tg3_readphy(tp, MII_STAT1000, &val)) 4693 return false; 4694 4695 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4696 } 4697 4698 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4699 return false; 4700 4701 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4702 tp->link_config.rmt_adv = lpeth; 4703 4704 return true; 4705 } 4706 4707 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4708 { 4709 if (curr_link_up != tp->link_up) { 4710 if (curr_link_up) { 4711 netif_carrier_on(tp->dev); 4712 } else { 4713 netif_carrier_off(tp->dev); 4714 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4715 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4716 } 4717 4718 tg3_link_report(tp); 4719 return true; 4720 } 4721 4722 return false; 4723 } 4724 4725 static void tg3_clear_mac_status(struct tg3 *tp) 4726 { 4727 tw32(MAC_EVENT, 0); 4728 4729 tw32_f(MAC_STATUS, 4730 MAC_STATUS_SYNC_CHANGED | 4731 MAC_STATUS_CFG_CHANGED | 4732 MAC_STATUS_MI_COMPLETION | 4733 MAC_STATUS_LNKSTATE_CHANGED); 4734 udelay(40); 4735 } 4736 4737 static void tg3_setup_eee(struct tg3 *tp) 4738 { 4739 u32 val; 4740 4741 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4742 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4743 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4744 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4745 4746 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4747 4748 tw32_f(TG3_CPMU_EEE_CTRL, 4749 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4750 4751 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4752 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4753 TG3_CPMU_EEEMD_LPI_IN_RX | 4754 TG3_CPMU_EEEMD_EEE_ENABLE; 4755 4756 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4757 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4758 4759 if (tg3_flag(tp, ENABLE_APE)) 4760 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4761 4762 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4763 4764 tw32_f(TG3_CPMU_EEE_DBTMR1, 4765 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4766 (tp->eee.tx_lpi_timer & 0xffff)); 4767 4768 tw32_f(TG3_CPMU_EEE_DBTMR2, 4769 TG3_CPMU_DBTMR2_APE_TX_2047US | 4770 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4771 } 4772 4773 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4774 { 4775 bool current_link_up; 4776 u32 bmsr, val; 4777 u32 lcl_adv, rmt_adv; 4778 u32 current_speed; 4779 u8 current_duplex; 4780 int i, err; 4781 4782 tg3_clear_mac_status(tp); 4783 4784 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4785 tw32_f(MAC_MI_MODE, 4786 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4787 udelay(80); 4788 } 4789 4790 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4791 4792 /* Some third-party PHYs need to be reset on link going 4793 * down. 4794 */ 4795 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4796 tg3_asic_rev(tp) == ASIC_REV_5704 || 4797 tg3_asic_rev(tp) == ASIC_REV_5705) && 4798 tp->link_up) { 4799 tg3_readphy(tp, MII_BMSR, &bmsr); 4800 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4801 !(bmsr & BMSR_LSTATUS)) 4802 force_reset = true; 4803 } 4804 if (force_reset) 4805 tg3_phy_reset(tp); 4806 4807 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4808 tg3_readphy(tp, MII_BMSR, &bmsr); 4809 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4810 !tg3_flag(tp, INIT_COMPLETE)) 4811 bmsr = 0; 4812 4813 if (!(bmsr & BMSR_LSTATUS)) { 4814 err = tg3_init_5401phy_dsp(tp); 4815 if (err) 4816 return err; 4817 4818 tg3_readphy(tp, MII_BMSR, &bmsr); 4819 for (i = 0; i < 1000; i++) { 4820 udelay(10); 4821 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4822 (bmsr & BMSR_LSTATUS)) { 4823 udelay(40); 4824 break; 4825 } 4826 } 4827 4828 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4829 TG3_PHY_REV_BCM5401_B0 && 4830 !(bmsr & BMSR_LSTATUS) && 4831 tp->link_config.active_speed == SPEED_1000) { 4832 err = tg3_phy_reset(tp); 4833 if (!err) 4834 err = tg3_init_5401phy_dsp(tp); 4835 if (err) 4836 return err; 4837 } 4838 } 4839 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4840 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4841 /* 5701 {A0,B0} CRC bug workaround */ 4842 tg3_writephy(tp, 0x15, 0x0a75); 4843 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4846 } 4847 4848 /* Clear pending interrupts... */ 4849 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4850 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4851 4852 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4853 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4854 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4855 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4856 4857 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4858 tg3_asic_rev(tp) == ASIC_REV_5701) { 4859 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4860 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4861 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4862 else 4863 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4864 } 4865 4866 current_link_up = false; 4867 current_speed = SPEED_UNKNOWN; 4868 current_duplex = DUPLEX_UNKNOWN; 4869 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4870 tp->link_config.rmt_adv = 0; 4871 4872 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4873 err = tg3_phy_auxctl_read(tp, 4874 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4875 &val); 4876 if (!err && !(val & (1 << 10))) { 4877 tg3_phy_auxctl_write(tp, 4878 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4879 val | (1 << 10)); 4880 goto relink; 4881 } 4882 } 4883 4884 bmsr = 0; 4885 for (i = 0; i < 100; i++) { 4886 tg3_readphy(tp, MII_BMSR, &bmsr); 4887 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4888 (bmsr & BMSR_LSTATUS)) 4889 break; 4890 udelay(40); 4891 } 4892 4893 if (bmsr & BMSR_LSTATUS) { 4894 u32 aux_stat, bmcr; 4895 4896 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4897 for (i = 0; i < 2000; i++) { 4898 udelay(10); 4899 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4900 aux_stat) 4901 break; 4902 } 4903 4904 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4905 ¤t_speed, 4906 ¤t_duplex); 4907 4908 bmcr = 0; 4909 for (i = 0; i < 200; i++) { 4910 tg3_readphy(tp, MII_BMCR, &bmcr); 4911 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4912 continue; 4913 if (bmcr && bmcr != 0x7fff) 4914 break; 4915 udelay(10); 4916 } 4917 4918 lcl_adv = 0; 4919 rmt_adv = 0; 4920 4921 tp->link_config.active_speed = current_speed; 4922 tp->link_config.active_duplex = current_duplex; 4923 4924 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4925 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4926 4927 if ((bmcr & BMCR_ANENABLE) && 4928 eee_config_ok && 4929 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4930 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4931 current_link_up = true; 4932 4933 /* EEE settings changes take effect only after a phy 4934 * reset. If we have skipped a reset due to Link Flap 4935 * Avoidance being enabled, do it now. 4936 */ 4937 if (!eee_config_ok && 4938 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4939 !force_reset) { 4940 tg3_setup_eee(tp); 4941 tg3_phy_reset(tp); 4942 } 4943 } else { 4944 if (!(bmcr & BMCR_ANENABLE) && 4945 tp->link_config.speed == current_speed && 4946 tp->link_config.duplex == current_duplex) { 4947 current_link_up = true; 4948 } 4949 } 4950 4951 if (current_link_up && 4952 tp->link_config.active_duplex == DUPLEX_FULL) { 4953 u32 reg, bit; 4954 4955 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4956 reg = MII_TG3_FET_GEN_STAT; 4957 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4958 } else { 4959 reg = MII_TG3_EXT_STAT; 4960 bit = MII_TG3_EXT_STAT_MDIX; 4961 } 4962 4963 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4964 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4965 4966 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4967 } 4968 } 4969 4970 relink: 4971 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4972 tg3_phy_copper_begin(tp); 4973 4974 if (tg3_flag(tp, ROBOSWITCH)) { 4975 current_link_up = true; 4976 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4977 current_speed = SPEED_1000; 4978 current_duplex = DUPLEX_FULL; 4979 tp->link_config.active_speed = current_speed; 4980 tp->link_config.active_duplex = current_duplex; 4981 } 4982 4983 tg3_readphy(tp, MII_BMSR, &bmsr); 4984 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4985 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4986 current_link_up = true; 4987 } 4988 4989 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4990 if (current_link_up) { 4991 if (tp->link_config.active_speed == SPEED_100 || 4992 tp->link_config.active_speed == SPEED_10) 4993 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4994 else 4995 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4996 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 4997 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4998 else 4999 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5000 5001 /* In order for the 5750 core in BCM4785 chip to work properly 5002 * in RGMII mode, the Led Control Register must be set up. 5003 */ 5004 if (tg3_flag(tp, RGMII_MODE)) { 5005 u32 led_ctrl = tr32(MAC_LED_CTRL); 5006 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5007 5008 if (tp->link_config.active_speed == SPEED_10) 5009 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5010 else if (tp->link_config.active_speed == SPEED_100) 5011 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5012 LED_CTRL_100MBPS_ON); 5013 else if (tp->link_config.active_speed == SPEED_1000) 5014 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5015 LED_CTRL_1000MBPS_ON); 5016 5017 tw32(MAC_LED_CTRL, led_ctrl); 5018 udelay(40); 5019 } 5020 5021 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5022 if (tp->link_config.active_duplex == DUPLEX_HALF) 5023 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5024 5025 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5026 if (current_link_up && 5027 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5028 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5029 else 5030 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5031 } 5032 5033 /* ??? Without this setting Netgear GA302T PHY does not 5034 * ??? send/receive packets... 5035 */ 5036 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5037 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5038 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5039 tw32_f(MAC_MI_MODE, tp->mi_mode); 5040 udelay(80); 5041 } 5042 5043 tw32_f(MAC_MODE, tp->mac_mode); 5044 udelay(40); 5045 5046 tg3_phy_eee_adjust(tp, current_link_up); 5047 5048 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5049 /* Polled via timer. */ 5050 tw32_f(MAC_EVENT, 0); 5051 } else { 5052 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5053 } 5054 udelay(40); 5055 5056 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5057 current_link_up && 5058 tp->link_config.active_speed == SPEED_1000 && 5059 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5060 udelay(120); 5061 tw32_f(MAC_STATUS, 5062 (MAC_STATUS_SYNC_CHANGED | 5063 MAC_STATUS_CFG_CHANGED)); 5064 udelay(40); 5065 tg3_write_mem(tp, 5066 NIC_SRAM_FIRMWARE_MBOX, 5067 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5068 } 5069 5070 /* Prevent send BD corruption. */ 5071 if (tg3_flag(tp, CLKREQ_BUG)) { 5072 if (tp->link_config.active_speed == SPEED_100 || 5073 tp->link_config.active_speed == SPEED_10) 5074 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5075 PCI_EXP_LNKCTL_CLKREQ_EN); 5076 else 5077 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5078 PCI_EXP_LNKCTL_CLKREQ_EN); 5079 } 5080 5081 tg3_test_and_report_link_chg(tp, current_link_up); 5082 5083 return 0; 5084 } 5085 5086 struct tg3_fiber_aneginfo { 5087 int state; 5088 #define ANEG_STATE_UNKNOWN 0 5089 #define ANEG_STATE_AN_ENABLE 1 5090 #define ANEG_STATE_RESTART_INIT 2 5091 #define ANEG_STATE_RESTART 3 5092 #define ANEG_STATE_DISABLE_LINK_OK 4 5093 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5094 #define ANEG_STATE_ABILITY_DETECT 6 5095 #define ANEG_STATE_ACK_DETECT_INIT 7 5096 #define ANEG_STATE_ACK_DETECT 8 5097 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5098 #define ANEG_STATE_COMPLETE_ACK 10 5099 #define ANEG_STATE_IDLE_DETECT_INIT 11 5100 #define ANEG_STATE_IDLE_DETECT 12 5101 #define ANEG_STATE_LINK_OK 13 5102 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5103 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5104 5105 u32 flags; 5106 #define MR_AN_ENABLE 0x00000001 5107 #define MR_RESTART_AN 0x00000002 5108 #define MR_AN_COMPLETE 0x00000004 5109 #define MR_PAGE_RX 0x00000008 5110 #define MR_NP_LOADED 0x00000010 5111 #define MR_TOGGLE_TX 0x00000020 5112 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5113 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5114 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5115 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5116 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5117 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5118 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5119 #define MR_TOGGLE_RX 0x00002000 5120 #define MR_NP_RX 0x00004000 5121 5122 #define MR_LINK_OK 0x80000000 5123 5124 unsigned long link_time, cur_time; 5125 5126 u32 ability_match_cfg; 5127 int ability_match_count; 5128 5129 char ability_match, idle_match, ack_match; 5130 5131 u32 txconfig, rxconfig; 5132 #define ANEG_CFG_NP 0x00000080 5133 #define ANEG_CFG_ACK 0x00000040 5134 #define ANEG_CFG_RF2 0x00000020 5135 #define ANEG_CFG_RF1 0x00000010 5136 #define ANEG_CFG_PS2 0x00000001 5137 #define ANEG_CFG_PS1 0x00008000 5138 #define ANEG_CFG_HD 0x00004000 5139 #define ANEG_CFG_FD 0x00002000 5140 #define ANEG_CFG_INVAL 0x00001f06 5141 5142 }; 5143 #define ANEG_OK 0 5144 #define ANEG_DONE 1 5145 #define ANEG_TIMER_ENAB 2 5146 #define ANEG_FAILED -1 5147 5148 #define ANEG_STATE_SETTLE_TIME 10000 5149 5150 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5151 struct tg3_fiber_aneginfo *ap) 5152 { 5153 u16 flowctrl; 5154 unsigned long delta; 5155 u32 rx_cfg_reg; 5156 int ret; 5157 5158 if (ap->state == ANEG_STATE_UNKNOWN) { 5159 ap->rxconfig = 0; 5160 ap->link_time = 0; 5161 ap->cur_time = 0; 5162 ap->ability_match_cfg = 0; 5163 ap->ability_match_count = 0; 5164 ap->ability_match = 0; 5165 ap->idle_match = 0; 5166 ap->ack_match = 0; 5167 } 5168 ap->cur_time++; 5169 5170 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5171 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5172 5173 if (rx_cfg_reg != ap->ability_match_cfg) { 5174 ap->ability_match_cfg = rx_cfg_reg; 5175 ap->ability_match = 0; 5176 ap->ability_match_count = 0; 5177 } else { 5178 if (++ap->ability_match_count > 1) { 5179 ap->ability_match = 1; 5180 ap->ability_match_cfg = rx_cfg_reg; 5181 } 5182 } 5183 if (rx_cfg_reg & ANEG_CFG_ACK) 5184 ap->ack_match = 1; 5185 else 5186 ap->ack_match = 0; 5187 5188 ap->idle_match = 0; 5189 } else { 5190 ap->idle_match = 1; 5191 ap->ability_match_cfg = 0; 5192 ap->ability_match_count = 0; 5193 ap->ability_match = 0; 5194 ap->ack_match = 0; 5195 5196 rx_cfg_reg = 0; 5197 } 5198 5199 ap->rxconfig = rx_cfg_reg; 5200 ret = ANEG_OK; 5201 5202 switch (ap->state) { 5203 case ANEG_STATE_UNKNOWN: 5204 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5205 ap->state = ANEG_STATE_AN_ENABLE; 5206 5207 fallthrough; 5208 case ANEG_STATE_AN_ENABLE: 5209 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5210 if (ap->flags & MR_AN_ENABLE) { 5211 ap->link_time = 0; 5212 ap->cur_time = 0; 5213 ap->ability_match_cfg = 0; 5214 ap->ability_match_count = 0; 5215 ap->ability_match = 0; 5216 ap->idle_match = 0; 5217 ap->ack_match = 0; 5218 5219 ap->state = ANEG_STATE_RESTART_INIT; 5220 } else { 5221 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5222 } 5223 break; 5224 5225 case ANEG_STATE_RESTART_INIT: 5226 ap->link_time = ap->cur_time; 5227 ap->flags &= ~(MR_NP_LOADED); 5228 ap->txconfig = 0; 5229 tw32(MAC_TX_AUTO_NEG, 0); 5230 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5231 tw32_f(MAC_MODE, tp->mac_mode); 5232 udelay(40); 5233 5234 ret = ANEG_TIMER_ENAB; 5235 ap->state = ANEG_STATE_RESTART; 5236 5237 fallthrough; 5238 case ANEG_STATE_RESTART: 5239 delta = ap->cur_time - ap->link_time; 5240 if (delta > ANEG_STATE_SETTLE_TIME) 5241 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5242 else 5243 ret = ANEG_TIMER_ENAB; 5244 break; 5245 5246 case ANEG_STATE_DISABLE_LINK_OK: 5247 ret = ANEG_DONE; 5248 break; 5249 5250 case ANEG_STATE_ABILITY_DETECT_INIT: 5251 ap->flags &= ~(MR_TOGGLE_TX); 5252 ap->txconfig = ANEG_CFG_FD; 5253 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5254 if (flowctrl & ADVERTISE_1000XPAUSE) 5255 ap->txconfig |= ANEG_CFG_PS1; 5256 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5257 ap->txconfig |= ANEG_CFG_PS2; 5258 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5259 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5260 tw32_f(MAC_MODE, tp->mac_mode); 5261 udelay(40); 5262 5263 ap->state = ANEG_STATE_ABILITY_DETECT; 5264 break; 5265 5266 case ANEG_STATE_ABILITY_DETECT: 5267 if (ap->ability_match != 0 && ap->rxconfig != 0) 5268 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5269 break; 5270 5271 case ANEG_STATE_ACK_DETECT_INIT: 5272 ap->txconfig |= ANEG_CFG_ACK; 5273 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5274 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5275 tw32_f(MAC_MODE, tp->mac_mode); 5276 udelay(40); 5277 5278 ap->state = ANEG_STATE_ACK_DETECT; 5279 5280 fallthrough; 5281 case ANEG_STATE_ACK_DETECT: 5282 if (ap->ack_match != 0) { 5283 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5284 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5285 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5286 } else { 5287 ap->state = ANEG_STATE_AN_ENABLE; 5288 } 5289 } else if (ap->ability_match != 0 && 5290 ap->rxconfig == 0) { 5291 ap->state = ANEG_STATE_AN_ENABLE; 5292 } 5293 break; 5294 5295 case ANEG_STATE_COMPLETE_ACK_INIT: 5296 if (ap->rxconfig & ANEG_CFG_INVAL) { 5297 ret = ANEG_FAILED; 5298 break; 5299 } 5300 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5301 MR_LP_ADV_HALF_DUPLEX | 5302 MR_LP_ADV_SYM_PAUSE | 5303 MR_LP_ADV_ASYM_PAUSE | 5304 MR_LP_ADV_REMOTE_FAULT1 | 5305 MR_LP_ADV_REMOTE_FAULT2 | 5306 MR_LP_ADV_NEXT_PAGE | 5307 MR_TOGGLE_RX | 5308 MR_NP_RX); 5309 if (ap->rxconfig & ANEG_CFG_FD) 5310 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5311 if (ap->rxconfig & ANEG_CFG_HD) 5312 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5313 if (ap->rxconfig & ANEG_CFG_PS1) 5314 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5315 if (ap->rxconfig & ANEG_CFG_PS2) 5316 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5317 if (ap->rxconfig & ANEG_CFG_RF1) 5318 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5319 if (ap->rxconfig & ANEG_CFG_RF2) 5320 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5321 if (ap->rxconfig & ANEG_CFG_NP) 5322 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5323 5324 ap->link_time = ap->cur_time; 5325 5326 ap->flags ^= (MR_TOGGLE_TX); 5327 if (ap->rxconfig & 0x0008) 5328 ap->flags |= MR_TOGGLE_RX; 5329 if (ap->rxconfig & ANEG_CFG_NP) 5330 ap->flags |= MR_NP_RX; 5331 ap->flags |= MR_PAGE_RX; 5332 5333 ap->state = ANEG_STATE_COMPLETE_ACK; 5334 ret = ANEG_TIMER_ENAB; 5335 break; 5336 5337 case ANEG_STATE_COMPLETE_ACK: 5338 if (ap->ability_match != 0 && 5339 ap->rxconfig == 0) { 5340 ap->state = ANEG_STATE_AN_ENABLE; 5341 break; 5342 } 5343 delta = ap->cur_time - ap->link_time; 5344 if (delta > ANEG_STATE_SETTLE_TIME) { 5345 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5346 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5347 } else { 5348 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5349 !(ap->flags & MR_NP_RX)) { 5350 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5351 } else { 5352 ret = ANEG_FAILED; 5353 } 5354 } 5355 } 5356 break; 5357 5358 case ANEG_STATE_IDLE_DETECT_INIT: 5359 ap->link_time = ap->cur_time; 5360 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5361 tw32_f(MAC_MODE, tp->mac_mode); 5362 udelay(40); 5363 5364 ap->state = ANEG_STATE_IDLE_DETECT; 5365 ret = ANEG_TIMER_ENAB; 5366 break; 5367 5368 case ANEG_STATE_IDLE_DETECT: 5369 if (ap->ability_match != 0 && 5370 ap->rxconfig == 0) { 5371 ap->state = ANEG_STATE_AN_ENABLE; 5372 break; 5373 } 5374 delta = ap->cur_time - ap->link_time; 5375 if (delta > ANEG_STATE_SETTLE_TIME) { 5376 /* XXX another gem from the Broadcom driver :( */ 5377 ap->state = ANEG_STATE_LINK_OK; 5378 } 5379 break; 5380 5381 case ANEG_STATE_LINK_OK: 5382 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5383 ret = ANEG_DONE; 5384 break; 5385 5386 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5387 /* ??? unimplemented */ 5388 break; 5389 5390 case ANEG_STATE_NEXT_PAGE_WAIT: 5391 /* ??? unimplemented */ 5392 break; 5393 5394 default: 5395 ret = ANEG_FAILED; 5396 break; 5397 } 5398 5399 return ret; 5400 } 5401 5402 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5403 { 5404 int res = 0; 5405 struct tg3_fiber_aneginfo aninfo; 5406 int status = ANEG_FAILED; 5407 unsigned int tick; 5408 u32 tmp; 5409 5410 tw32_f(MAC_TX_AUTO_NEG, 0); 5411 5412 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5413 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5414 udelay(40); 5415 5416 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5417 udelay(40); 5418 5419 memset(&aninfo, 0, sizeof(aninfo)); 5420 aninfo.flags |= MR_AN_ENABLE; 5421 aninfo.state = ANEG_STATE_UNKNOWN; 5422 aninfo.cur_time = 0; 5423 tick = 0; 5424 while (++tick < 195000) { 5425 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5426 if (status == ANEG_DONE || status == ANEG_FAILED) 5427 break; 5428 5429 udelay(1); 5430 } 5431 5432 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5433 tw32_f(MAC_MODE, tp->mac_mode); 5434 udelay(40); 5435 5436 *txflags = aninfo.txconfig; 5437 *rxflags = aninfo.flags; 5438 5439 if (status == ANEG_DONE && 5440 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5441 MR_LP_ADV_FULL_DUPLEX))) 5442 res = 1; 5443 5444 return res; 5445 } 5446 5447 static void tg3_init_bcm8002(struct tg3 *tp) 5448 { 5449 u32 mac_status = tr32(MAC_STATUS); 5450 int i; 5451 5452 /* Reset when initting first time or we have a link. */ 5453 if (tg3_flag(tp, INIT_COMPLETE) && 5454 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5455 return; 5456 5457 /* Set PLL lock range. */ 5458 tg3_writephy(tp, 0x16, 0x8007); 5459 5460 /* SW reset */ 5461 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5462 5463 /* Wait for reset to complete. */ 5464 /* XXX schedule_timeout() ... */ 5465 for (i = 0; i < 500; i++) 5466 udelay(10); 5467 5468 /* Config mode; select PMA/Ch 1 regs. */ 5469 tg3_writephy(tp, 0x10, 0x8411); 5470 5471 /* Enable auto-lock and comdet, select txclk for tx. */ 5472 tg3_writephy(tp, 0x11, 0x0a10); 5473 5474 tg3_writephy(tp, 0x18, 0x00a0); 5475 tg3_writephy(tp, 0x16, 0x41ff); 5476 5477 /* Assert and deassert POR. */ 5478 tg3_writephy(tp, 0x13, 0x0400); 5479 udelay(40); 5480 tg3_writephy(tp, 0x13, 0x0000); 5481 5482 tg3_writephy(tp, 0x11, 0x0a50); 5483 udelay(40); 5484 tg3_writephy(tp, 0x11, 0x0a10); 5485 5486 /* Wait for signal to stabilize */ 5487 /* XXX schedule_timeout() ... */ 5488 for (i = 0; i < 15000; i++) 5489 udelay(10); 5490 5491 /* Deselect the channel register so we can read the PHYID 5492 * later. 5493 */ 5494 tg3_writephy(tp, 0x10, 0x8011); 5495 } 5496 5497 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5498 { 5499 u16 flowctrl; 5500 bool current_link_up; 5501 u32 sg_dig_ctrl, sg_dig_status; 5502 u32 serdes_cfg, expected_sg_dig_ctrl; 5503 int workaround, port_a; 5504 5505 serdes_cfg = 0; 5506 expected_sg_dig_ctrl = 0; 5507 workaround = 0; 5508 port_a = 1; 5509 current_link_up = false; 5510 5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5513 workaround = 1; 5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5515 port_a = 0; 5516 5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5518 /* preserve bits 20-23 for voltage regulator */ 5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5520 } 5521 5522 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5523 5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5526 if (workaround) { 5527 u32 val = serdes_cfg; 5528 5529 if (port_a) 5530 val |= 0xc010000; 5531 else 5532 val |= 0x4010000; 5533 tw32_f(MAC_SERDES_CFG, val); 5534 } 5535 5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5537 } 5538 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5539 tg3_setup_flow_control(tp, 0, 0); 5540 current_link_up = true; 5541 } 5542 goto out; 5543 } 5544 5545 /* Want auto-negotiation. */ 5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5547 5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5549 if (flowctrl & ADVERTISE_1000XPAUSE) 5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5553 5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5556 tp->serdes_counter && 5557 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5558 MAC_STATUS_RCVD_CFG)) == 5559 MAC_STATUS_PCS_SYNCED)) { 5560 tp->serdes_counter--; 5561 current_link_up = true; 5562 goto out; 5563 } 5564 restart_autoneg: 5565 if (workaround) 5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5568 udelay(5); 5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5570 5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5574 MAC_STATUS_SIGNAL_DET)) { 5575 sg_dig_status = tr32(SG_DIG_STATUS); 5576 mac_status = tr32(MAC_STATUS); 5577 5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5579 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5580 u32 local_adv = 0, remote_adv = 0; 5581 5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5583 local_adv |= ADVERTISE_1000XPAUSE; 5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5585 local_adv |= ADVERTISE_1000XPSE_ASYM; 5586 5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5588 remote_adv |= LPA_1000XPAUSE; 5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5590 remote_adv |= LPA_1000XPAUSE_ASYM; 5591 5592 tp->link_config.rmt_adv = 5593 mii_adv_to_ethtool_adv_x(remote_adv); 5594 5595 tg3_setup_flow_control(tp, local_adv, remote_adv); 5596 current_link_up = true; 5597 tp->serdes_counter = 0; 5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5600 if (tp->serdes_counter) 5601 tp->serdes_counter--; 5602 else { 5603 if (workaround) { 5604 u32 val = serdes_cfg; 5605 5606 if (port_a) 5607 val |= 0xc010000; 5608 else 5609 val |= 0x4010000; 5610 5611 tw32_f(MAC_SERDES_CFG, val); 5612 } 5613 5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5615 udelay(40); 5616 5617 /* Link parallel detection - link is up */ 5618 /* only if we have PCS_SYNC and not */ 5619 /* receiving config code words */ 5620 mac_status = tr32(MAC_STATUS); 5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5622 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5623 tg3_setup_flow_control(tp, 0, 0); 5624 current_link_up = true; 5625 tp->phy_flags |= 5626 TG3_PHYFLG_PARALLEL_DETECT; 5627 tp->serdes_counter = 5628 SERDES_PARALLEL_DET_TIMEOUT; 5629 } else 5630 goto restart_autoneg; 5631 } 5632 } 5633 } else { 5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5636 } 5637 5638 out: 5639 return current_link_up; 5640 } 5641 5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5643 { 5644 bool current_link_up = false; 5645 5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5647 goto out; 5648 5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5650 u32 txflags, rxflags; 5651 int i; 5652 5653 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5654 u32 local_adv = 0, remote_adv = 0; 5655 5656 if (txflags & ANEG_CFG_PS1) 5657 local_adv |= ADVERTISE_1000XPAUSE; 5658 if (txflags & ANEG_CFG_PS2) 5659 local_adv |= ADVERTISE_1000XPSE_ASYM; 5660 5661 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5662 remote_adv |= LPA_1000XPAUSE; 5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5664 remote_adv |= LPA_1000XPAUSE_ASYM; 5665 5666 tp->link_config.rmt_adv = 5667 mii_adv_to_ethtool_adv_x(remote_adv); 5668 5669 tg3_setup_flow_control(tp, local_adv, remote_adv); 5670 5671 current_link_up = true; 5672 } 5673 for (i = 0; i < 30; i++) { 5674 udelay(20); 5675 tw32_f(MAC_STATUS, 5676 (MAC_STATUS_SYNC_CHANGED | 5677 MAC_STATUS_CFG_CHANGED)); 5678 udelay(40); 5679 if ((tr32(MAC_STATUS) & 5680 (MAC_STATUS_SYNC_CHANGED | 5681 MAC_STATUS_CFG_CHANGED)) == 0) 5682 break; 5683 } 5684 5685 mac_status = tr32(MAC_STATUS); 5686 if (!current_link_up && 5687 (mac_status & MAC_STATUS_PCS_SYNCED) && 5688 !(mac_status & MAC_STATUS_RCVD_CFG)) 5689 current_link_up = true; 5690 } else { 5691 tg3_setup_flow_control(tp, 0, 0); 5692 5693 /* Forcing 1000FD link up. */ 5694 current_link_up = true; 5695 5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5697 udelay(40); 5698 5699 tw32_f(MAC_MODE, tp->mac_mode); 5700 udelay(40); 5701 } 5702 5703 out: 5704 return current_link_up; 5705 } 5706 5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5708 { 5709 u32 orig_pause_cfg; 5710 u32 orig_active_speed; 5711 u8 orig_active_duplex; 5712 u32 mac_status; 5713 bool current_link_up; 5714 int i; 5715 5716 orig_pause_cfg = tp->link_config.active_flowctrl; 5717 orig_active_speed = tp->link_config.active_speed; 5718 orig_active_duplex = tp->link_config.active_duplex; 5719 5720 if (!tg3_flag(tp, HW_AUTONEG) && 5721 tp->link_up && 5722 tg3_flag(tp, INIT_COMPLETE)) { 5723 mac_status = tr32(MAC_STATUS); 5724 mac_status &= (MAC_STATUS_PCS_SYNCED | 5725 MAC_STATUS_SIGNAL_DET | 5726 MAC_STATUS_CFG_CHANGED | 5727 MAC_STATUS_RCVD_CFG); 5728 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5729 MAC_STATUS_SIGNAL_DET)) { 5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5731 MAC_STATUS_CFG_CHANGED)); 5732 return 0; 5733 } 5734 } 5735 5736 tw32_f(MAC_TX_AUTO_NEG, 0); 5737 5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5740 tw32_f(MAC_MODE, tp->mac_mode); 5741 udelay(40); 5742 5743 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5744 tg3_init_bcm8002(tp); 5745 5746 /* Enable link change event even when serdes polling. */ 5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5748 udelay(40); 5749 5750 tp->link_config.rmt_adv = 0; 5751 mac_status = tr32(MAC_STATUS); 5752 5753 if (tg3_flag(tp, HW_AUTONEG)) 5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5755 else 5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5757 5758 tp->napi[0].hw_status->status = 5759 (SD_STATUS_UPDATED | 5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5761 5762 for (i = 0; i < 100; i++) { 5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5764 MAC_STATUS_CFG_CHANGED)); 5765 udelay(5); 5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5767 MAC_STATUS_CFG_CHANGED | 5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5769 break; 5770 } 5771 5772 mac_status = tr32(MAC_STATUS); 5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5774 current_link_up = false; 5775 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5776 tp->serdes_counter == 0) { 5777 tw32_f(MAC_MODE, (tp->mac_mode | 5778 MAC_MODE_SEND_CONFIGS)); 5779 udelay(1); 5780 tw32_f(MAC_MODE, tp->mac_mode); 5781 } 5782 } 5783 5784 if (current_link_up) { 5785 tp->link_config.active_speed = SPEED_1000; 5786 tp->link_config.active_duplex = DUPLEX_FULL; 5787 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5788 LED_CTRL_LNKLED_OVERRIDE | 5789 LED_CTRL_1000MBPS_ON)); 5790 } else { 5791 tp->link_config.active_speed = SPEED_UNKNOWN; 5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5793 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5794 LED_CTRL_LNKLED_OVERRIDE | 5795 LED_CTRL_TRAFFIC_OVERRIDE)); 5796 } 5797 5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5799 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5800 if (orig_pause_cfg != now_pause_cfg || 5801 orig_active_speed != tp->link_config.active_speed || 5802 orig_active_duplex != tp->link_config.active_duplex) 5803 tg3_link_report(tp); 5804 } 5805 5806 return 0; 5807 } 5808 5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5810 { 5811 int err = 0; 5812 u32 bmsr, bmcr; 5813 u32 current_speed = SPEED_UNKNOWN; 5814 u8 current_duplex = DUPLEX_UNKNOWN; 5815 bool current_link_up = false; 5816 u32 local_adv, remote_adv, sgsr; 5817 5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5819 tg3_asic_rev(tp) == ASIC_REV_5720) && 5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5821 (sgsr & SERDES_TG3_SGMII_MODE)) { 5822 5823 if (force_reset) 5824 tg3_phy_reset(tp); 5825 5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5827 5828 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5830 } else { 5831 current_link_up = true; 5832 if (sgsr & SERDES_TG3_SPEED_1000) { 5833 current_speed = SPEED_1000; 5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5835 } else if (sgsr & SERDES_TG3_SPEED_100) { 5836 current_speed = SPEED_100; 5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5838 } else { 5839 current_speed = SPEED_10; 5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5841 } 5842 5843 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5844 current_duplex = DUPLEX_FULL; 5845 else 5846 current_duplex = DUPLEX_HALF; 5847 } 5848 5849 tw32_f(MAC_MODE, tp->mac_mode); 5850 udelay(40); 5851 5852 tg3_clear_mac_status(tp); 5853 5854 goto fiber_setup_done; 5855 } 5856 5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5858 tw32_f(MAC_MODE, tp->mac_mode); 5859 udelay(40); 5860 5861 tg3_clear_mac_status(tp); 5862 5863 if (force_reset) 5864 tg3_phy_reset(tp); 5865 5866 tp->link_config.rmt_adv = 0; 5867 5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5872 bmsr |= BMSR_LSTATUS; 5873 else 5874 bmsr &= ~BMSR_LSTATUS; 5875 } 5876 5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5878 5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5881 /* do nothing, just check for link up at the end */ 5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5883 u32 adv, newadv; 5884 5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5887 ADVERTISE_1000XPAUSE | 5888 ADVERTISE_1000XPSE_ASYM | 5889 ADVERTISE_SLCT); 5890 5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5893 5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5895 tg3_writephy(tp, MII_ADVERTISE, newadv); 5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5897 tg3_writephy(tp, MII_BMCR, bmcr); 5898 5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5902 5903 return err; 5904 } 5905 } else { 5906 u32 new_bmcr; 5907 5908 bmcr &= ~BMCR_SPEED1000; 5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5910 5911 if (tp->link_config.duplex == DUPLEX_FULL) 5912 new_bmcr |= BMCR_FULLDPLX; 5913 5914 if (new_bmcr != bmcr) { 5915 /* BMCR_SPEED1000 is a reserved bit that needs 5916 * to be set on write. 5917 */ 5918 new_bmcr |= BMCR_SPEED1000; 5919 5920 /* Force a linkdown */ 5921 if (tp->link_up) { 5922 u32 adv; 5923 5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5925 adv &= ~(ADVERTISE_1000XFULL | 5926 ADVERTISE_1000XHALF | 5927 ADVERTISE_SLCT); 5928 tg3_writephy(tp, MII_ADVERTISE, adv); 5929 tg3_writephy(tp, MII_BMCR, bmcr | 5930 BMCR_ANRESTART | 5931 BMCR_ANENABLE); 5932 udelay(10); 5933 tg3_carrier_off(tp); 5934 } 5935 tg3_writephy(tp, MII_BMCR, new_bmcr); 5936 bmcr = new_bmcr; 5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5941 bmsr |= BMSR_LSTATUS; 5942 else 5943 bmsr &= ~BMSR_LSTATUS; 5944 } 5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5946 } 5947 } 5948 5949 if (bmsr & BMSR_LSTATUS) { 5950 current_speed = SPEED_1000; 5951 current_link_up = true; 5952 if (bmcr & BMCR_FULLDPLX) 5953 current_duplex = DUPLEX_FULL; 5954 else 5955 current_duplex = DUPLEX_HALF; 5956 5957 local_adv = 0; 5958 remote_adv = 0; 5959 5960 if (bmcr & BMCR_ANENABLE) { 5961 u32 common; 5962 5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5965 common = local_adv & remote_adv; 5966 if (common & (ADVERTISE_1000XHALF | 5967 ADVERTISE_1000XFULL)) { 5968 if (common & ADVERTISE_1000XFULL) 5969 current_duplex = DUPLEX_FULL; 5970 else 5971 current_duplex = DUPLEX_HALF; 5972 5973 tp->link_config.rmt_adv = 5974 mii_adv_to_ethtool_adv_x(remote_adv); 5975 } else if (!tg3_flag(tp, 5780_CLASS)) { 5976 /* Link is up via parallel detect */ 5977 } else { 5978 current_link_up = false; 5979 } 5980 } 5981 } 5982 5983 fiber_setup_done: 5984 if (current_link_up && current_duplex == DUPLEX_FULL) 5985 tg3_setup_flow_control(tp, local_adv, remote_adv); 5986 5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5988 if (tp->link_config.active_duplex == DUPLEX_HALF) 5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5990 5991 tw32_f(MAC_MODE, tp->mac_mode); 5992 udelay(40); 5993 5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5995 5996 tp->link_config.active_speed = current_speed; 5997 tp->link_config.active_duplex = current_duplex; 5998 5999 tg3_test_and_report_link_chg(tp, current_link_up); 6000 return err; 6001 } 6002 6003 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6004 { 6005 if (tp->serdes_counter) { 6006 /* Give autoneg time to complete. */ 6007 tp->serdes_counter--; 6008 return; 6009 } 6010 6011 if (!tp->link_up && 6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6013 u32 bmcr; 6014 6015 tg3_readphy(tp, MII_BMCR, &bmcr); 6016 if (bmcr & BMCR_ANENABLE) { 6017 u32 phy1, phy2; 6018 6019 /* Select shadow register 0x1f */ 6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6022 6023 /* Select expansion interrupt status register */ 6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6025 MII_TG3_DSP_EXP1_INT_STAT); 6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6028 6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6030 /* We have signal detect and not receiving 6031 * config code words, link is up by parallel 6032 * detection. 6033 */ 6034 6035 bmcr &= ~BMCR_ANENABLE; 6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6037 tg3_writephy(tp, MII_BMCR, bmcr); 6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6039 } 6040 } 6041 } else if (tp->link_up && 6042 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6044 u32 phy2; 6045 6046 /* Select expansion interrupt status register */ 6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6048 MII_TG3_DSP_EXP1_INT_STAT); 6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6050 if (phy2 & 0x20) { 6051 u32 bmcr; 6052 6053 /* Config code words received, turn on autoneg. */ 6054 tg3_readphy(tp, MII_BMCR, &bmcr); 6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6056 6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6058 6059 } 6060 } 6061 } 6062 6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6064 { 6065 u32 val; 6066 int err; 6067 6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6069 err = tg3_setup_fiber_phy(tp, force_reset); 6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6071 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6072 else 6073 err = tg3_setup_copper_phy(tp, force_reset); 6074 6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6076 u32 scale; 6077 6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6080 scale = 65; 6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6082 scale = 6; 6083 else 6084 scale = 12; 6085 6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6088 tw32(GRC_MISC_CFG, val); 6089 } 6090 6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6092 (6 << TX_LENGTHS_IPG_SHIFT); 6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6094 tg3_asic_rev(tp) == ASIC_REV_5762) 6095 val |= tr32(MAC_TX_LENGTHS) & 6096 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6097 TX_LENGTHS_CNT_DWN_VAL_MSK); 6098 6099 if (tp->link_config.active_speed == SPEED_1000 && 6100 tp->link_config.active_duplex == DUPLEX_HALF) 6101 tw32(MAC_TX_LENGTHS, val | 6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6103 else 6104 tw32(MAC_TX_LENGTHS, val | 6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6106 6107 if (!tg3_flag(tp, 5705_PLUS)) { 6108 if (tp->link_up) { 6109 tw32(HOSTCC_STAT_COAL_TICKS, 6110 tp->coal.stats_block_coalesce_usecs); 6111 } else { 6112 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6113 } 6114 } 6115 6116 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6117 val = tr32(PCIE_PWR_MGMT_THRESH); 6118 if (!tp->link_up) 6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6120 tp->pwrmgmt_thresh; 6121 else 6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6123 tw32(PCIE_PWR_MGMT_THRESH, val); 6124 } 6125 6126 return err; 6127 } 6128 6129 /* tp->lock must be held */ 6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) 6131 { 6132 u64 stamp; 6133 6134 ptp_read_system_prets(sts); 6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6136 ptp_read_system_postts(sts); 6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6138 6139 return stamp; 6140 } 6141 6142 /* tp->lock must be held */ 6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6144 { 6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6146 6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6151 } 6152 6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6154 static inline void tg3_full_unlock(struct tg3 *tp); 6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6156 { 6157 struct tg3 *tp = netdev_priv(dev); 6158 6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6160 SOF_TIMESTAMPING_RX_SOFTWARE | 6161 SOF_TIMESTAMPING_SOFTWARE; 6162 6163 if (tg3_flag(tp, PTP_CAPABLE)) { 6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6165 SOF_TIMESTAMPING_RX_HARDWARE | 6166 SOF_TIMESTAMPING_RAW_HARDWARE; 6167 } 6168 6169 if (tp->ptp_clock) 6170 info->phc_index = ptp_clock_index(tp->ptp_clock); 6171 else 6172 info->phc_index = -1; 6173 6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6175 6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6180 return 0; 6181 } 6182 6183 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 6184 { 6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6186 bool neg_adj = false; 6187 u32 correction = 0; 6188 6189 if (ppb < 0) { 6190 neg_adj = true; 6191 ppb = -ppb; 6192 } 6193 6194 /* Frequency adjustment is performed using hardware with a 24 bit 6195 * accumulator and a programmable correction value. On each clk, the 6196 * correction value gets added to the accumulator and when it 6197 * overflows, the time counter is incremented/decremented. 6198 * 6199 * So conversion from ppb to correction value is 6200 * ppb * (1 << 24) / 1000000000 6201 */ 6202 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) & 6203 TG3_EAV_REF_CLK_CORRECT_MASK; 6204 6205 tg3_full_lock(tp, 0); 6206 6207 if (correction) 6208 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6209 TG3_EAV_REF_CLK_CORRECT_EN | 6210 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction); 6211 else 6212 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6213 6214 tg3_full_unlock(tp); 6215 6216 return 0; 6217 } 6218 6219 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6220 { 6221 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6222 6223 tg3_full_lock(tp, 0); 6224 tp->ptp_adjust += delta; 6225 tg3_full_unlock(tp); 6226 6227 return 0; 6228 } 6229 6230 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 6231 struct ptp_system_timestamp *sts) 6232 { 6233 u64 ns; 6234 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6235 6236 tg3_full_lock(tp, 0); 6237 ns = tg3_refclk_read(tp, sts); 6238 ns += tp->ptp_adjust; 6239 tg3_full_unlock(tp); 6240 6241 *ts = ns_to_timespec64(ns); 6242 6243 return 0; 6244 } 6245 6246 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6247 const struct timespec64 *ts) 6248 { 6249 u64 ns; 6250 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6251 6252 ns = timespec64_to_ns(ts); 6253 6254 tg3_full_lock(tp, 0); 6255 tg3_refclk_write(tp, ns); 6256 tp->ptp_adjust = 0; 6257 tg3_full_unlock(tp); 6258 6259 return 0; 6260 } 6261 6262 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6263 struct ptp_clock_request *rq, int on) 6264 { 6265 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6266 u32 clock_ctl; 6267 int rval = 0; 6268 6269 switch (rq->type) { 6270 case PTP_CLK_REQ_PEROUT: 6271 /* Reject requests with unsupported flags */ 6272 if (rq->perout.flags) 6273 return -EOPNOTSUPP; 6274 6275 if (rq->perout.index != 0) 6276 return -EINVAL; 6277 6278 tg3_full_lock(tp, 0); 6279 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6280 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6281 6282 if (on) { 6283 u64 nsec; 6284 6285 nsec = rq->perout.start.sec * 1000000000ULL + 6286 rq->perout.start.nsec; 6287 6288 if (rq->perout.period.sec || rq->perout.period.nsec) { 6289 netdev_warn(tp->dev, 6290 "Device supports only a one-shot timesync output, period must be 0\n"); 6291 rval = -EINVAL; 6292 goto err_out; 6293 } 6294 6295 if (nsec & (1ULL << 63)) { 6296 netdev_warn(tp->dev, 6297 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6298 rval = -EINVAL; 6299 goto err_out; 6300 } 6301 6302 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6303 tw32(TG3_EAV_WATCHDOG0_MSB, 6304 TG3_EAV_WATCHDOG0_EN | 6305 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6306 6307 tw32(TG3_EAV_REF_CLCK_CTL, 6308 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6309 } else { 6310 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6311 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6312 } 6313 6314 err_out: 6315 tg3_full_unlock(tp); 6316 return rval; 6317 6318 default: 6319 break; 6320 } 6321 6322 return -EOPNOTSUPP; 6323 } 6324 6325 static const struct ptp_clock_info tg3_ptp_caps = { 6326 .owner = THIS_MODULE, 6327 .name = "tg3 clock", 6328 .max_adj = 250000000, 6329 .n_alarm = 0, 6330 .n_ext_ts = 0, 6331 .n_per_out = 1, 6332 .n_pins = 0, 6333 .pps = 0, 6334 .adjfreq = tg3_ptp_adjfreq, 6335 .adjtime = tg3_ptp_adjtime, 6336 .gettimex64 = tg3_ptp_gettimex, 6337 .settime64 = tg3_ptp_settime, 6338 .enable = tg3_ptp_enable, 6339 }; 6340 6341 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6342 struct skb_shared_hwtstamps *timestamp) 6343 { 6344 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6345 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6346 tp->ptp_adjust); 6347 } 6348 6349 /* tp->lock must be held */ 6350 static void tg3_ptp_init(struct tg3 *tp) 6351 { 6352 if (!tg3_flag(tp, PTP_CAPABLE)) 6353 return; 6354 6355 /* Initialize the hardware clock to the system time. */ 6356 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6357 tp->ptp_adjust = 0; 6358 tp->ptp_info = tg3_ptp_caps; 6359 } 6360 6361 /* tp->lock must be held */ 6362 static void tg3_ptp_resume(struct tg3 *tp) 6363 { 6364 if (!tg3_flag(tp, PTP_CAPABLE)) 6365 return; 6366 6367 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6368 tp->ptp_adjust = 0; 6369 } 6370 6371 static void tg3_ptp_fini(struct tg3 *tp) 6372 { 6373 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6374 return; 6375 6376 ptp_clock_unregister(tp->ptp_clock); 6377 tp->ptp_clock = NULL; 6378 tp->ptp_adjust = 0; 6379 } 6380 6381 static inline int tg3_irq_sync(struct tg3 *tp) 6382 { 6383 return tp->irq_sync; 6384 } 6385 6386 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6387 { 6388 int i; 6389 6390 dst = (u32 *)((u8 *)dst + off); 6391 for (i = 0; i < len; i += sizeof(u32)) 6392 *dst++ = tr32(off + i); 6393 } 6394 6395 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6396 { 6397 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6398 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6399 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6400 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6401 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6402 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6403 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6404 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6405 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6406 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6407 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6408 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6409 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6410 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6411 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6412 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6413 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6414 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6415 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6416 6417 if (tg3_flag(tp, SUPPORT_MSIX)) 6418 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6419 6420 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6421 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6422 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6423 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6424 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6425 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6426 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6427 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6428 6429 if (!tg3_flag(tp, 5705_PLUS)) { 6430 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6431 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6432 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6433 } 6434 6435 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6436 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6437 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6438 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6439 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6440 6441 if (tg3_flag(tp, NVRAM)) 6442 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6443 } 6444 6445 static void tg3_dump_state(struct tg3 *tp) 6446 { 6447 int i; 6448 u32 *regs; 6449 6450 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6451 if (!regs) 6452 return; 6453 6454 if (tg3_flag(tp, PCI_EXPRESS)) { 6455 /* Read up to but not including private PCI registers */ 6456 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6457 regs[i / sizeof(u32)] = tr32(i); 6458 } else 6459 tg3_dump_legacy_regs(tp, regs); 6460 6461 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6462 if (!regs[i + 0] && !regs[i + 1] && 6463 !regs[i + 2] && !regs[i + 3]) 6464 continue; 6465 6466 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6467 i * 4, 6468 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6469 } 6470 6471 kfree(regs); 6472 6473 for (i = 0; i < tp->irq_cnt; i++) { 6474 struct tg3_napi *tnapi = &tp->napi[i]; 6475 6476 /* SW status block */ 6477 netdev_err(tp->dev, 6478 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6479 i, 6480 tnapi->hw_status->status, 6481 tnapi->hw_status->status_tag, 6482 tnapi->hw_status->rx_jumbo_consumer, 6483 tnapi->hw_status->rx_consumer, 6484 tnapi->hw_status->rx_mini_consumer, 6485 tnapi->hw_status->idx[0].rx_producer, 6486 tnapi->hw_status->idx[0].tx_consumer); 6487 6488 netdev_err(tp->dev, 6489 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6490 i, 6491 tnapi->last_tag, tnapi->last_irq_tag, 6492 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6493 tnapi->rx_rcb_ptr, 6494 tnapi->prodring.rx_std_prod_idx, 6495 tnapi->prodring.rx_std_cons_idx, 6496 tnapi->prodring.rx_jmb_prod_idx, 6497 tnapi->prodring.rx_jmb_cons_idx); 6498 } 6499 } 6500 6501 /* This is called whenever we suspect that the system chipset is re- 6502 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6503 * is bogus tx completions. We try to recover by setting the 6504 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6505 * in the workqueue. 6506 */ 6507 static void tg3_tx_recover(struct tg3 *tp) 6508 { 6509 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6510 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6511 6512 netdev_warn(tp->dev, 6513 "The system may be re-ordering memory-mapped I/O " 6514 "cycles to the network device, attempting to recover. " 6515 "Please report the problem to the driver maintainer " 6516 "and include system chipset information.\n"); 6517 6518 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6519 } 6520 6521 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6522 { 6523 /* Tell compiler to fetch tx indices from memory. */ 6524 barrier(); 6525 return tnapi->tx_pending - 6526 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6527 } 6528 6529 /* Tigon3 never reports partial packet sends. So we do not 6530 * need special logic to handle SKBs that have not had all 6531 * of their frags sent yet, like SunGEM does. 6532 */ 6533 static void tg3_tx(struct tg3_napi *tnapi) 6534 { 6535 struct tg3 *tp = tnapi->tp; 6536 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6537 u32 sw_idx = tnapi->tx_cons; 6538 struct netdev_queue *txq; 6539 int index = tnapi - tp->napi; 6540 unsigned int pkts_compl = 0, bytes_compl = 0; 6541 6542 if (tg3_flag(tp, ENABLE_TSS)) 6543 index--; 6544 6545 txq = netdev_get_tx_queue(tp->dev, index); 6546 6547 while (sw_idx != hw_idx) { 6548 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6549 struct sk_buff *skb = ri->skb; 6550 int i, tx_bug = 0; 6551 6552 if (unlikely(skb == NULL)) { 6553 tg3_tx_recover(tp); 6554 return; 6555 } 6556 6557 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6558 struct skb_shared_hwtstamps timestamp; 6559 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); 6560 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6561 6562 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6563 6564 skb_tstamp_tx(skb, ×tamp); 6565 } 6566 6567 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), 6568 skb_headlen(skb), DMA_TO_DEVICE); 6569 6570 ri->skb = NULL; 6571 6572 while (ri->fragmented) { 6573 ri->fragmented = false; 6574 sw_idx = NEXT_TX(sw_idx); 6575 ri = &tnapi->tx_buffers[sw_idx]; 6576 } 6577 6578 sw_idx = NEXT_TX(sw_idx); 6579 6580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6581 ri = &tnapi->tx_buffers[sw_idx]; 6582 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6583 tx_bug = 1; 6584 6585 dma_unmap_page(&tp->pdev->dev, 6586 dma_unmap_addr(ri, mapping), 6587 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6588 DMA_TO_DEVICE); 6589 6590 while (ri->fragmented) { 6591 ri->fragmented = false; 6592 sw_idx = NEXT_TX(sw_idx); 6593 ri = &tnapi->tx_buffers[sw_idx]; 6594 } 6595 6596 sw_idx = NEXT_TX(sw_idx); 6597 } 6598 6599 pkts_compl++; 6600 bytes_compl += skb->len; 6601 6602 dev_consume_skb_any(skb); 6603 6604 if (unlikely(tx_bug)) { 6605 tg3_tx_recover(tp); 6606 return; 6607 } 6608 } 6609 6610 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6611 6612 tnapi->tx_cons = sw_idx; 6613 6614 /* Need to make the tx_cons update visible to tg3_start_xmit() 6615 * before checking for netif_queue_stopped(). Without the 6616 * memory barrier, there is a small possibility that tg3_start_xmit() 6617 * will miss it and cause the queue to be stopped forever. 6618 */ 6619 smp_mb(); 6620 6621 if (unlikely(netif_tx_queue_stopped(txq) && 6622 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6623 __netif_tx_lock(txq, smp_processor_id()); 6624 if (netif_tx_queue_stopped(txq) && 6625 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6626 netif_tx_wake_queue(txq); 6627 __netif_tx_unlock(txq); 6628 } 6629 } 6630 6631 static void tg3_frag_free(bool is_frag, void *data) 6632 { 6633 if (is_frag) 6634 skb_free_frag(data); 6635 else 6636 kfree(data); 6637 } 6638 6639 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6640 { 6641 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6642 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6643 6644 if (!ri->data) 6645 return; 6646 6647 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, 6648 DMA_FROM_DEVICE); 6649 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6650 ri->data = NULL; 6651 } 6652 6653 6654 /* Returns size of skb allocated or < 0 on error. 6655 * 6656 * We only need to fill in the address because the other members 6657 * of the RX descriptor are invariant, see tg3_init_rings. 6658 * 6659 * Note the purposeful assymetry of cpu vs. chip accesses. For 6660 * posting buffers we only dirty the first cache line of the RX 6661 * descriptor (containing the address). Whereas for the RX status 6662 * buffers the cpu only reads the last cacheline of the RX descriptor 6663 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6664 */ 6665 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6666 u32 opaque_key, u32 dest_idx_unmasked, 6667 unsigned int *frag_size) 6668 { 6669 struct tg3_rx_buffer_desc *desc; 6670 struct ring_info *map; 6671 u8 *data; 6672 dma_addr_t mapping; 6673 int skb_size, data_size, dest_idx; 6674 6675 switch (opaque_key) { 6676 case RXD_OPAQUE_RING_STD: 6677 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6678 desc = &tpr->rx_std[dest_idx]; 6679 map = &tpr->rx_std_buffers[dest_idx]; 6680 data_size = tp->rx_pkt_map_sz; 6681 break; 6682 6683 case RXD_OPAQUE_RING_JUMBO: 6684 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6685 desc = &tpr->rx_jmb[dest_idx].std; 6686 map = &tpr->rx_jmb_buffers[dest_idx]; 6687 data_size = TG3_RX_JMB_MAP_SZ; 6688 break; 6689 6690 default: 6691 return -EINVAL; 6692 } 6693 6694 /* Do not overwrite any of the map or rp information 6695 * until we are sure we can commit to a new buffer. 6696 * 6697 * Callers depend upon this behavior and assume that 6698 * we leave everything unchanged if we fail. 6699 */ 6700 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6701 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6702 if (skb_size <= PAGE_SIZE) { 6703 data = napi_alloc_frag(skb_size); 6704 *frag_size = skb_size; 6705 } else { 6706 data = kmalloc(skb_size, GFP_ATOMIC); 6707 *frag_size = 0; 6708 } 6709 if (!data) 6710 return -ENOMEM; 6711 6712 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), 6713 data_size, DMA_FROM_DEVICE); 6714 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { 6715 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6716 return -EIO; 6717 } 6718 6719 map->data = data; 6720 dma_unmap_addr_set(map, mapping, mapping); 6721 6722 desc->addr_hi = ((u64)mapping >> 32); 6723 desc->addr_lo = ((u64)mapping & 0xffffffff); 6724 6725 return data_size; 6726 } 6727 6728 /* We only need to move over in the address because the other 6729 * members of the RX descriptor are invariant. See notes above 6730 * tg3_alloc_rx_data for full details. 6731 */ 6732 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6733 struct tg3_rx_prodring_set *dpr, 6734 u32 opaque_key, int src_idx, 6735 u32 dest_idx_unmasked) 6736 { 6737 struct tg3 *tp = tnapi->tp; 6738 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6739 struct ring_info *src_map, *dest_map; 6740 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6741 int dest_idx; 6742 6743 switch (opaque_key) { 6744 case RXD_OPAQUE_RING_STD: 6745 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6746 dest_desc = &dpr->rx_std[dest_idx]; 6747 dest_map = &dpr->rx_std_buffers[dest_idx]; 6748 src_desc = &spr->rx_std[src_idx]; 6749 src_map = &spr->rx_std_buffers[src_idx]; 6750 break; 6751 6752 case RXD_OPAQUE_RING_JUMBO: 6753 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6754 dest_desc = &dpr->rx_jmb[dest_idx].std; 6755 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6756 src_desc = &spr->rx_jmb[src_idx].std; 6757 src_map = &spr->rx_jmb_buffers[src_idx]; 6758 break; 6759 6760 default: 6761 return; 6762 } 6763 6764 dest_map->data = src_map->data; 6765 dma_unmap_addr_set(dest_map, mapping, 6766 dma_unmap_addr(src_map, mapping)); 6767 dest_desc->addr_hi = src_desc->addr_hi; 6768 dest_desc->addr_lo = src_desc->addr_lo; 6769 6770 /* Ensure that the update to the skb happens after the physical 6771 * addresses have been transferred to the new BD location. 6772 */ 6773 smp_wmb(); 6774 6775 src_map->data = NULL; 6776 } 6777 6778 /* The RX ring scheme is composed of multiple rings which post fresh 6779 * buffers to the chip, and one special ring the chip uses to report 6780 * status back to the host. 6781 * 6782 * The special ring reports the status of received packets to the 6783 * host. The chip does not write into the original descriptor the 6784 * RX buffer was obtained from. The chip simply takes the original 6785 * descriptor as provided by the host, updates the status and length 6786 * field, then writes this into the next status ring entry. 6787 * 6788 * Each ring the host uses to post buffers to the chip is described 6789 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6790 * it is first placed into the on-chip ram. When the packet's length 6791 * is known, it walks down the TG3_BDINFO entries to select the ring. 6792 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6793 * which is within the range of the new packet's length is chosen. 6794 * 6795 * The "separate ring for rx status" scheme may sound queer, but it makes 6796 * sense from a cache coherency perspective. If only the host writes 6797 * to the buffer post rings, and only the chip writes to the rx status 6798 * rings, then cache lines never move beyond shared-modified state. 6799 * If both the host and chip were to write into the same ring, cache line 6800 * eviction could occur since both entities want it in an exclusive state. 6801 */ 6802 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6803 { 6804 struct tg3 *tp = tnapi->tp; 6805 u32 work_mask, rx_std_posted = 0; 6806 u32 std_prod_idx, jmb_prod_idx; 6807 u32 sw_idx = tnapi->rx_rcb_ptr; 6808 u16 hw_idx; 6809 int received; 6810 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6811 6812 hw_idx = *(tnapi->rx_rcb_prod_idx); 6813 /* 6814 * We need to order the read of hw_idx and the read of 6815 * the opaque cookie. 6816 */ 6817 rmb(); 6818 work_mask = 0; 6819 received = 0; 6820 std_prod_idx = tpr->rx_std_prod_idx; 6821 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6822 while (sw_idx != hw_idx && budget > 0) { 6823 struct ring_info *ri; 6824 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6825 unsigned int len; 6826 struct sk_buff *skb; 6827 dma_addr_t dma_addr; 6828 u32 opaque_key, desc_idx, *post_ptr; 6829 u8 *data; 6830 u64 tstamp = 0; 6831 6832 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6833 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6834 if (opaque_key == RXD_OPAQUE_RING_STD) { 6835 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6836 dma_addr = dma_unmap_addr(ri, mapping); 6837 data = ri->data; 6838 post_ptr = &std_prod_idx; 6839 rx_std_posted++; 6840 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6841 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6842 dma_addr = dma_unmap_addr(ri, mapping); 6843 data = ri->data; 6844 post_ptr = &jmb_prod_idx; 6845 } else 6846 goto next_pkt_nopost; 6847 6848 work_mask |= opaque_key; 6849 6850 if (desc->err_vlan & RXD_ERR_MASK) { 6851 drop_it: 6852 tg3_recycle_rx(tnapi, tpr, opaque_key, 6853 desc_idx, *post_ptr); 6854 drop_it_no_recycle: 6855 /* Other statistics kept track of by card. */ 6856 tp->rx_dropped++; 6857 goto next_pkt; 6858 } 6859 6860 prefetch(data + TG3_RX_OFFSET(tp)); 6861 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6862 ETH_FCS_LEN; 6863 6864 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6865 RXD_FLAG_PTPSTAT_PTPV1 || 6866 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6867 RXD_FLAG_PTPSTAT_PTPV2) { 6868 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6869 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6870 } 6871 6872 if (len > TG3_RX_COPY_THRESH(tp)) { 6873 int skb_size; 6874 unsigned int frag_size; 6875 6876 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6877 *post_ptr, &frag_size); 6878 if (skb_size < 0) 6879 goto drop_it; 6880 6881 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, 6882 DMA_FROM_DEVICE); 6883 6884 /* Ensure that the update to the data happens 6885 * after the usage of the old DMA mapping. 6886 */ 6887 smp_wmb(); 6888 6889 ri->data = NULL; 6890 6891 skb = build_skb(data, frag_size); 6892 if (!skb) { 6893 tg3_frag_free(frag_size != 0, data); 6894 goto drop_it_no_recycle; 6895 } 6896 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6897 } else { 6898 tg3_recycle_rx(tnapi, tpr, opaque_key, 6899 desc_idx, *post_ptr); 6900 6901 skb = netdev_alloc_skb(tp->dev, 6902 len + TG3_RAW_IP_ALIGN); 6903 if (skb == NULL) 6904 goto drop_it_no_recycle; 6905 6906 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6907 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, 6908 DMA_FROM_DEVICE); 6909 memcpy(skb->data, 6910 data + TG3_RX_OFFSET(tp), 6911 len); 6912 dma_sync_single_for_device(&tp->pdev->dev, dma_addr, 6913 len, DMA_FROM_DEVICE); 6914 } 6915 6916 skb_put(skb, len); 6917 if (tstamp) 6918 tg3_hwclock_to_timestamp(tp, tstamp, 6919 skb_hwtstamps(skb)); 6920 6921 if ((tp->dev->features & NETIF_F_RXCSUM) && 6922 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6923 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6924 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6925 skb->ip_summed = CHECKSUM_UNNECESSARY; 6926 else 6927 skb_checksum_none_assert(skb); 6928 6929 skb->protocol = eth_type_trans(skb, tp->dev); 6930 6931 if (len > (tp->dev->mtu + ETH_HLEN) && 6932 skb->protocol != htons(ETH_P_8021Q) && 6933 skb->protocol != htons(ETH_P_8021AD)) { 6934 dev_kfree_skb_any(skb); 6935 goto drop_it_no_recycle; 6936 } 6937 6938 if (desc->type_flags & RXD_FLAG_VLAN && 6939 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6940 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6941 desc->err_vlan & RXD_VLAN_MASK); 6942 6943 napi_gro_receive(&tnapi->napi, skb); 6944 6945 received++; 6946 budget--; 6947 6948 next_pkt: 6949 (*post_ptr)++; 6950 6951 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6952 tpr->rx_std_prod_idx = std_prod_idx & 6953 tp->rx_std_ring_mask; 6954 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6955 tpr->rx_std_prod_idx); 6956 work_mask &= ~RXD_OPAQUE_RING_STD; 6957 rx_std_posted = 0; 6958 } 6959 next_pkt_nopost: 6960 sw_idx++; 6961 sw_idx &= tp->rx_ret_ring_mask; 6962 6963 /* Refresh hw_idx to see if there is new work */ 6964 if (sw_idx == hw_idx) { 6965 hw_idx = *(tnapi->rx_rcb_prod_idx); 6966 rmb(); 6967 } 6968 } 6969 6970 /* ACK the status ring. */ 6971 tnapi->rx_rcb_ptr = sw_idx; 6972 tw32_rx_mbox(tnapi->consmbox, sw_idx); 6973 6974 /* Refill RX ring(s). */ 6975 if (!tg3_flag(tp, ENABLE_RSS)) { 6976 /* Sync BD data before updating mailbox */ 6977 wmb(); 6978 6979 if (work_mask & RXD_OPAQUE_RING_STD) { 6980 tpr->rx_std_prod_idx = std_prod_idx & 6981 tp->rx_std_ring_mask; 6982 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6983 tpr->rx_std_prod_idx); 6984 } 6985 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 6986 tpr->rx_jmb_prod_idx = jmb_prod_idx & 6987 tp->rx_jmb_ring_mask; 6988 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 6989 tpr->rx_jmb_prod_idx); 6990 } 6991 } else if (work_mask) { 6992 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 6993 * updated before the producer indices can be updated. 6994 */ 6995 smp_wmb(); 6996 6997 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 6998 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 6999 7000 if (tnapi != &tp->napi[1]) { 7001 tp->rx_refill = true; 7002 napi_schedule(&tp->napi[1].napi); 7003 } 7004 } 7005 7006 return received; 7007 } 7008 7009 static void tg3_poll_link(struct tg3 *tp) 7010 { 7011 /* handle link change and other phy events */ 7012 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7013 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7014 7015 if (sblk->status & SD_STATUS_LINK_CHG) { 7016 sblk->status = SD_STATUS_UPDATED | 7017 (sblk->status & ~SD_STATUS_LINK_CHG); 7018 spin_lock(&tp->lock); 7019 if (tg3_flag(tp, USE_PHYLIB)) { 7020 tw32_f(MAC_STATUS, 7021 (MAC_STATUS_SYNC_CHANGED | 7022 MAC_STATUS_CFG_CHANGED | 7023 MAC_STATUS_MI_COMPLETION | 7024 MAC_STATUS_LNKSTATE_CHANGED)); 7025 udelay(40); 7026 } else 7027 tg3_setup_phy(tp, false); 7028 spin_unlock(&tp->lock); 7029 } 7030 } 7031 } 7032 7033 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7034 struct tg3_rx_prodring_set *dpr, 7035 struct tg3_rx_prodring_set *spr) 7036 { 7037 u32 si, di, cpycnt, src_prod_idx; 7038 int i, err = 0; 7039 7040 while (1) { 7041 src_prod_idx = spr->rx_std_prod_idx; 7042 7043 /* Make sure updates to the rx_std_buffers[] entries and the 7044 * standard producer index are seen in the correct order. 7045 */ 7046 smp_rmb(); 7047 7048 if (spr->rx_std_cons_idx == src_prod_idx) 7049 break; 7050 7051 if (spr->rx_std_cons_idx < src_prod_idx) 7052 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7053 else 7054 cpycnt = tp->rx_std_ring_mask + 1 - 7055 spr->rx_std_cons_idx; 7056 7057 cpycnt = min(cpycnt, 7058 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7059 7060 si = spr->rx_std_cons_idx; 7061 di = dpr->rx_std_prod_idx; 7062 7063 for (i = di; i < di + cpycnt; i++) { 7064 if (dpr->rx_std_buffers[i].data) { 7065 cpycnt = i - di; 7066 err = -ENOSPC; 7067 break; 7068 } 7069 } 7070 7071 if (!cpycnt) 7072 break; 7073 7074 /* Ensure that updates to the rx_std_buffers ring and the 7075 * shadowed hardware producer ring from tg3_recycle_skb() are 7076 * ordered correctly WRT the skb check above. 7077 */ 7078 smp_rmb(); 7079 7080 memcpy(&dpr->rx_std_buffers[di], 7081 &spr->rx_std_buffers[si], 7082 cpycnt * sizeof(struct ring_info)); 7083 7084 for (i = 0; i < cpycnt; i++, di++, si++) { 7085 struct tg3_rx_buffer_desc *sbd, *dbd; 7086 sbd = &spr->rx_std[si]; 7087 dbd = &dpr->rx_std[di]; 7088 dbd->addr_hi = sbd->addr_hi; 7089 dbd->addr_lo = sbd->addr_lo; 7090 } 7091 7092 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7093 tp->rx_std_ring_mask; 7094 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7095 tp->rx_std_ring_mask; 7096 } 7097 7098 while (1) { 7099 src_prod_idx = spr->rx_jmb_prod_idx; 7100 7101 /* Make sure updates to the rx_jmb_buffers[] entries and 7102 * the jumbo producer index are seen in the correct order. 7103 */ 7104 smp_rmb(); 7105 7106 if (spr->rx_jmb_cons_idx == src_prod_idx) 7107 break; 7108 7109 if (spr->rx_jmb_cons_idx < src_prod_idx) 7110 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7111 else 7112 cpycnt = tp->rx_jmb_ring_mask + 1 - 7113 spr->rx_jmb_cons_idx; 7114 7115 cpycnt = min(cpycnt, 7116 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7117 7118 si = spr->rx_jmb_cons_idx; 7119 di = dpr->rx_jmb_prod_idx; 7120 7121 for (i = di; i < di + cpycnt; i++) { 7122 if (dpr->rx_jmb_buffers[i].data) { 7123 cpycnt = i - di; 7124 err = -ENOSPC; 7125 break; 7126 } 7127 } 7128 7129 if (!cpycnt) 7130 break; 7131 7132 /* Ensure that updates to the rx_jmb_buffers ring and the 7133 * shadowed hardware producer ring from tg3_recycle_skb() are 7134 * ordered correctly WRT the skb check above. 7135 */ 7136 smp_rmb(); 7137 7138 memcpy(&dpr->rx_jmb_buffers[di], 7139 &spr->rx_jmb_buffers[si], 7140 cpycnt * sizeof(struct ring_info)); 7141 7142 for (i = 0; i < cpycnt; i++, di++, si++) { 7143 struct tg3_rx_buffer_desc *sbd, *dbd; 7144 sbd = &spr->rx_jmb[si].std; 7145 dbd = &dpr->rx_jmb[di].std; 7146 dbd->addr_hi = sbd->addr_hi; 7147 dbd->addr_lo = sbd->addr_lo; 7148 } 7149 7150 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7151 tp->rx_jmb_ring_mask; 7152 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7153 tp->rx_jmb_ring_mask; 7154 } 7155 7156 return err; 7157 } 7158 7159 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7160 { 7161 struct tg3 *tp = tnapi->tp; 7162 7163 /* run TX completion thread */ 7164 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7165 tg3_tx(tnapi); 7166 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7167 return work_done; 7168 } 7169 7170 if (!tnapi->rx_rcb_prod_idx) 7171 return work_done; 7172 7173 /* run RX thread, within the bounds set by NAPI. 7174 * All RX "locking" is done by ensuring outside 7175 * code synchronizes with tg3->napi.poll() 7176 */ 7177 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7178 work_done += tg3_rx(tnapi, budget - work_done); 7179 7180 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7181 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7182 int i, err = 0; 7183 u32 std_prod_idx = dpr->rx_std_prod_idx; 7184 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7185 7186 tp->rx_refill = false; 7187 for (i = 1; i <= tp->rxq_cnt; i++) 7188 err |= tg3_rx_prodring_xfer(tp, dpr, 7189 &tp->napi[i].prodring); 7190 7191 wmb(); 7192 7193 if (std_prod_idx != dpr->rx_std_prod_idx) 7194 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7195 dpr->rx_std_prod_idx); 7196 7197 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7198 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7199 dpr->rx_jmb_prod_idx); 7200 7201 if (err) 7202 tw32_f(HOSTCC_MODE, tp->coal_now); 7203 } 7204 7205 return work_done; 7206 } 7207 7208 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7209 { 7210 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7211 schedule_work(&tp->reset_task); 7212 } 7213 7214 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7215 { 7216 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7217 cancel_work_sync(&tp->reset_task); 7218 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7219 } 7220 7221 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7222 { 7223 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7224 struct tg3 *tp = tnapi->tp; 7225 int work_done = 0; 7226 struct tg3_hw_status *sblk = tnapi->hw_status; 7227 7228 while (1) { 7229 work_done = tg3_poll_work(tnapi, work_done, budget); 7230 7231 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7232 goto tx_recovery; 7233 7234 if (unlikely(work_done >= budget)) 7235 break; 7236 7237 /* tp->last_tag is used in tg3_int_reenable() below 7238 * to tell the hw how much work has been processed, 7239 * so we must read it before checking for more work. 7240 */ 7241 tnapi->last_tag = sblk->status_tag; 7242 tnapi->last_irq_tag = tnapi->last_tag; 7243 rmb(); 7244 7245 /* check for RX/TX work to do */ 7246 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7247 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7248 7249 /* This test here is not race free, but will reduce 7250 * the number of interrupts by looping again. 7251 */ 7252 if (tnapi == &tp->napi[1] && tp->rx_refill) 7253 continue; 7254 7255 napi_complete_done(napi, work_done); 7256 /* Reenable interrupts. */ 7257 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7258 7259 /* This test here is synchronized by napi_schedule() 7260 * and napi_complete() to close the race condition. 7261 */ 7262 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7263 tw32(HOSTCC_MODE, tp->coalesce_mode | 7264 HOSTCC_MODE_ENABLE | 7265 tnapi->coal_now); 7266 } 7267 break; 7268 } 7269 } 7270 7271 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7272 return work_done; 7273 7274 tx_recovery: 7275 /* work_done is guaranteed to be less than budget. */ 7276 napi_complete(napi); 7277 tg3_reset_task_schedule(tp); 7278 return work_done; 7279 } 7280 7281 static void tg3_process_error(struct tg3 *tp) 7282 { 7283 u32 val; 7284 bool real_error = false; 7285 7286 if (tg3_flag(tp, ERROR_PROCESSED)) 7287 return; 7288 7289 /* Check Flow Attention register */ 7290 val = tr32(HOSTCC_FLOW_ATTN); 7291 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7292 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7293 real_error = true; 7294 } 7295 7296 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7297 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7298 real_error = true; 7299 } 7300 7301 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7302 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7303 real_error = true; 7304 } 7305 7306 if (!real_error) 7307 return; 7308 7309 tg3_dump_state(tp); 7310 7311 tg3_flag_set(tp, ERROR_PROCESSED); 7312 tg3_reset_task_schedule(tp); 7313 } 7314 7315 static int tg3_poll(struct napi_struct *napi, int budget) 7316 { 7317 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7318 struct tg3 *tp = tnapi->tp; 7319 int work_done = 0; 7320 struct tg3_hw_status *sblk = tnapi->hw_status; 7321 7322 while (1) { 7323 if (sblk->status & SD_STATUS_ERROR) 7324 tg3_process_error(tp); 7325 7326 tg3_poll_link(tp); 7327 7328 work_done = tg3_poll_work(tnapi, work_done, budget); 7329 7330 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7331 goto tx_recovery; 7332 7333 if (unlikely(work_done >= budget)) 7334 break; 7335 7336 if (tg3_flag(tp, TAGGED_STATUS)) { 7337 /* tp->last_tag is used in tg3_int_reenable() below 7338 * to tell the hw how much work has been processed, 7339 * so we must read it before checking for more work. 7340 */ 7341 tnapi->last_tag = sblk->status_tag; 7342 tnapi->last_irq_tag = tnapi->last_tag; 7343 rmb(); 7344 } else 7345 sblk->status &= ~SD_STATUS_UPDATED; 7346 7347 if (likely(!tg3_has_work(tnapi))) { 7348 napi_complete_done(napi, work_done); 7349 tg3_int_reenable(tnapi); 7350 break; 7351 } 7352 } 7353 7354 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7355 return work_done; 7356 7357 tx_recovery: 7358 /* work_done is guaranteed to be less than budget. */ 7359 napi_complete(napi); 7360 tg3_reset_task_schedule(tp); 7361 return work_done; 7362 } 7363 7364 static void tg3_napi_disable(struct tg3 *tp) 7365 { 7366 int i; 7367 7368 for (i = tp->irq_cnt - 1; i >= 0; i--) 7369 napi_disable(&tp->napi[i].napi); 7370 } 7371 7372 static void tg3_napi_enable(struct tg3 *tp) 7373 { 7374 int i; 7375 7376 for (i = 0; i < tp->irq_cnt; i++) 7377 napi_enable(&tp->napi[i].napi); 7378 } 7379 7380 static void tg3_napi_init(struct tg3 *tp) 7381 { 7382 int i; 7383 7384 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); 7385 for (i = 1; i < tp->irq_cnt; i++) 7386 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); 7387 } 7388 7389 static void tg3_napi_fini(struct tg3 *tp) 7390 { 7391 int i; 7392 7393 for (i = 0; i < tp->irq_cnt; i++) 7394 netif_napi_del(&tp->napi[i].napi); 7395 } 7396 7397 static inline void tg3_netif_stop(struct tg3 *tp) 7398 { 7399 netif_trans_update(tp->dev); /* prevent tx timeout */ 7400 tg3_napi_disable(tp); 7401 netif_carrier_off(tp->dev); 7402 netif_tx_disable(tp->dev); 7403 } 7404 7405 /* tp->lock must be held */ 7406 static inline void tg3_netif_start(struct tg3 *tp) 7407 { 7408 tg3_ptp_resume(tp); 7409 7410 /* NOTE: unconditional netif_tx_wake_all_queues is only 7411 * appropriate so long as all callers are assured to 7412 * have free tx slots (such as after tg3_init_hw) 7413 */ 7414 netif_tx_wake_all_queues(tp->dev); 7415 7416 if (tp->link_up) 7417 netif_carrier_on(tp->dev); 7418 7419 tg3_napi_enable(tp); 7420 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7421 tg3_enable_ints(tp); 7422 } 7423 7424 static void tg3_irq_quiesce(struct tg3 *tp) 7425 __releases(tp->lock) 7426 __acquires(tp->lock) 7427 { 7428 int i; 7429 7430 BUG_ON(tp->irq_sync); 7431 7432 tp->irq_sync = 1; 7433 smp_mb(); 7434 7435 spin_unlock_bh(&tp->lock); 7436 7437 for (i = 0; i < tp->irq_cnt; i++) 7438 synchronize_irq(tp->napi[i].irq_vec); 7439 7440 spin_lock_bh(&tp->lock); 7441 } 7442 7443 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7444 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7445 * with as well. Most of the time, this is not necessary except when 7446 * shutting down the device. 7447 */ 7448 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7449 { 7450 spin_lock_bh(&tp->lock); 7451 if (irq_sync) 7452 tg3_irq_quiesce(tp); 7453 } 7454 7455 static inline void tg3_full_unlock(struct tg3 *tp) 7456 { 7457 spin_unlock_bh(&tp->lock); 7458 } 7459 7460 /* One-shot MSI handler - Chip automatically disables interrupt 7461 * after sending MSI so driver doesn't have to do it. 7462 */ 7463 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7464 { 7465 struct tg3_napi *tnapi = dev_id; 7466 struct tg3 *tp = tnapi->tp; 7467 7468 prefetch(tnapi->hw_status); 7469 if (tnapi->rx_rcb) 7470 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7471 7472 if (likely(!tg3_irq_sync(tp))) 7473 napi_schedule(&tnapi->napi); 7474 7475 return IRQ_HANDLED; 7476 } 7477 7478 /* MSI ISR - No need to check for interrupt sharing and no need to 7479 * flush status block and interrupt mailbox. PCI ordering rules 7480 * guarantee that MSI will arrive after the status block. 7481 */ 7482 static irqreturn_t tg3_msi(int irq, void *dev_id) 7483 { 7484 struct tg3_napi *tnapi = dev_id; 7485 struct tg3 *tp = tnapi->tp; 7486 7487 prefetch(tnapi->hw_status); 7488 if (tnapi->rx_rcb) 7489 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7490 /* 7491 * Writing any value to intr-mbox-0 clears PCI INTA# and 7492 * chip-internal interrupt pending events. 7493 * Writing non-zero to intr-mbox-0 additional tells the 7494 * NIC to stop sending us irqs, engaging "in-intr-handler" 7495 * event coalescing. 7496 */ 7497 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7498 if (likely(!tg3_irq_sync(tp))) 7499 napi_schedule(&tnapi->napi); 7500 7501 return IRQ_RETVAL(1); 7502 } 7503 7504 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7505 { 7506 struct tg3_napi *tnapi = dev_id; 7507 struct tg3 *tp = tnapi->tp; 7508 struct tg3_hw_status *sblk = tnapi->hw_status; 7509 unsigned int handled = 1; 7510 7511 /* In INTx mode, it is possible for the interrupt to arrive at 7512 * the CPU before the status block posted prior to the interrupt. 7513 * Reading the PCI State register will confirm whether the 7514 * interrupt is ours and will flush the status block. 7515 */ 7516 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7517 if (tg3_flag(tp, CHIP_RESETTING) || 7518 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7519 handled = 0; 7520 goto out; 7521 } 7522 } 7523 7524 /* 7525 * Writing any value to intr-mbox-0 clears PCI INTA# and 7526 * chip-internal interrupt pending events. 7527 * Writing non-zero to intr-mbox-0 additional tells the 7528 * NIC to stop sending us irqs, engaging "in-intr-handler" 7529 * event coalescing. 7530 * 7531 * Flush the mailbox to de-assert the IRQ immediately to prevent 7532 * spurious interrupts. The flush impacts performance but 7533 * excessive spurious interrupts can be worse in some cases. 7534 */ 7535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7536 if (tg3_irq_sync(tp)) 7537 goto out; 7538 sblk->status &= ~SD_STATUS_UPDATED; 7539 if (likely(tg3_has_work(tnapi))) { 7540 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7541 napi_schedule(&tnapi->napi); 7542 } else { 7543 /* No work, shared interrupt perhaps? re-enable 7544 * interrupts, and flush that PCI write 7545 */ 7546 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7547 0x00000000); 7548 } 7549 out: 7550 return IRQ_RETVAL(handled); 7551 } 7552 7553 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7554 { 7555 struct tg3_napi *tnapi = dev_id; 7556 struct tg3 *tp = tnapi->tp; 7557 struct tg3_hw_status *sblk = tnapi->hw_status; 7558 unsigned int handled = 1; 7559 7560 /* In INTx mode, it is possible for the interrupt to arrive at 7561 * the CPU before the status block posted prior to the interrupt. 7562 * Reading the PCI State register will confirm whether the 7563 * interrupt is ours and will flush the status block. 7564 */ 7565 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7566 if (tg3_flag(tp, CHIP_RESETTING) || 7567 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7568 handled = 0; 7569 goto out; 7570 } 7571 } 7572 7573 /* 7574 * writing any value to intr-mbox-0 clears PCI INTA# and 7575 * chip-internal interrupt pending events. 7576 * writing non-zero to intr-mbox-0 additional tells the 7577 * NIC to stop sending us irqs, engaging "in-intr-handler" 7578 * event coalescing. 7579 * 7580 * Flush the mailbox to de-assert the IRQ immediately to prevent 7581 * spurious interrupts. The flush impacts performance but 7582 * excessive spurious interrupts can be worse in some cases. 7583 */ 7584 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7585 7586 /* 7587 * In a shared interrupt configuration, sometimes other devices' 7588 * interrupts will scream. We record the current status tag here 7589 * so that the above check can report that the screaming interrupts 7590 * are unhandled. Eventually they will be silenced. 7591 */ 7592 tnapi->last_irq_tag = sblk->status_tag; 7593 7594 if (tg3_irq_sync(tp)) 7595 goto out; 7596 7597 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7598 7599 napi_schedule(&tnapi->napi); 7600 7601 out: 7602 return IRQ_RETVAL(handled); 7603 } 7604 7605 /* ISR for interrupt test */ 7606 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7607 { 7608 struct tg3_napi *tnapi = dev_id; 7609 struct tg3 *tp = tnapi->tp; 7610 struct tg3_hw_status *sblk = tnapi->hw_status; 7611 7612 if ((sblk->status & SD_STATUS_UPDATED) || 7613 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7614 tg3_disable_ints(tp); 7615 return IRQ_RETVAL(1); 7616 } 7617 return IRQ_RETVAL(0); 7618 } 7619 7620 #ifdef CONFIG_NET_POLL_CONTROLLER 7621 static void tg3_poll_controller(struct net_device *dev) 7622 { 7623 int i; 7624 struct tg3 *tp = netdev_priv(dev); 7625 7626 if (tg3_irq_sync(tp)) 7627 return; 7628 7629 for (i = 0; i < tp->irq_cnt; i++) 7630 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7631 } 7632 #endif 7633 7634 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue) 7635 { 7636 struct tg3 *tp = netdev_priv(dev); 7637 7638 if (netif_msg_tx_err(tp)) { 7639 netdev_err(dev, "transmit timed out, resetting\n"); 7640 tg3_dump_state(tp); 7641 } 7642 7643 tg3_reset_task_schedule(tp); 7644 } 7645 7646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7648 { 7649 u32 base = (u32) mapping & 0xffffffff; 7650 7651 return base + len + 8 < base; 7652 } 7653 7654 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7655 * of any 4GB boundaries: 4G, 8G, etc 7656 */ 7657 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7658 u32 len, u32 mss) 7659 { 7660 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7661 u32 base = (u32) mapping & 0xffffffff; 7662 7663 return ((base + len + (mss & 0x3fff)) < base); 7664 } 7665 return 0; 7666 } 7667 7668 /* Test for DMA addresses > 40-bit */ 7669 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7670 int len) 7671 { 7672 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7673 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7674 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7675 return 0; 7676 #else 7677 return 0; 7678 #endif 7679 } 7680 7681 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7682 dma_addr_t mapping, u32 len, u32 flags, 7683 u32 mss, u32 vlan) 7684 { 7685 txbd->addr_hi = ((u64) mapping >> 32); 7686 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7687 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7688 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7689 } 7690 7691 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7692 dma_addr_t map, u32 len, u32 flags, 7693 u32 mss, u32 vlan) 7694 { 7695 struct tg3 *tp = tnapi->tp; 7696 bool hwbug = false; 7697 7698 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7699 hwbug = true; 7700 7701 if (tg3_4g_overflow_test(map, len)) 7702 hwbug = true; 7703 7704 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7705 hwbug = true; 7706 7707 if (tg3_40bit_overflow_test(tp, map, len)) 7708 hwbug = true; 7709 7710 if (tp->dma_limit) { 7711 u32 prvidx = *entry; 7712 u32 tmp_flag = flags & ~TXD_FLAG_END; 7713 while (len > tp->dma_limit && *budget) { 7714 u32 frag_len = tp->dma_limit; 7715 len -= tp->dma_limit; 7716 7717 /* Avoid the 8byte DMA problem */ 7718 if (len <= 8) { 7719 len += tp->dma_limit / 2; 7720 frag_len = tp->dma_limit / 2; 7721 } 7722 7723 tnapi->tx_buffers[*entry].fragmented = true; 7724 7725 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7726 frag_len, tmp_flag, mss, vlan); 7727 *budget -= 1; 7728 prvidx = *entry; 7729 *entry = NEXT_TX(*entry); 7730 7731 map += frag_len; 7732 } 7733 7734 if (len) { 7735 if (*budget) { 7736 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7737 len, flags, mss, vlan); 7738 *budget -= 1; 7739 *entry = NEXT_TX(*entry); 7740 } else { 7741 hwbug = true; 7742 tnapi->tx_buffers[prvidx].fragmented = false; 7743 } 7744 } 7745 } else { 7746 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7747 len, flags, mss, vlan); 7748 *entry = NEXT_TX(*entry); 7749 } 7750 7751 return hwbug; 7752 } 7753 7754 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7755 { 7756 int i; 7757 struct sk_buff *skb; 7758 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7759 7760 skb = txb->skb; 7761 txb->skb = NULL; 7762 7763 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), 7764 skb_headlen(skb), DMA_TO_DEVICE); 7765 7766 while (txb->fragmented) { 7767 txb->fragmented = false; 7768 entry = NEXT_TX(entry); 7769 txb = &tnapi->tx_buffers[entry]; 7770 } 7771 7772 for (i = 0; i <= last; i++) { 7773 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7774 7775 entry = NEXT_TX(entry); 7776 txb = &tnapi->tx_buffers[entry]; 7777 7778 dma_unmap_page(&tnapi->tp->pdev->dev, 7779 dma_unmap_addr(txb, mapping), 7780 skb_frag_size(frag), DMA_TO_DEVICE); 7781 7782 while (txb->fragmented) { 7783 txb->fragmented = false; 7784 entry = NEXT_TX(entry); 7785 txb = &tnapi->tx_buffers[entry]; 7786 } 7787 } 7788 } 7789 7790 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7792 struct sk_buff **pskb, 7793 u32 *entry, u32 *budget, 7794 u32 base_flags, u32 mss, u32 vlan) 7795 { 7796 struct tg3 *tp = tnapi->tp; 7797 struct sk_buff *new_skb, *skb = *pskb; 7798 dma_addr_t new_addr = 0; 7799 int ret = 0; 7800 7801 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7802 new_skb = skb_copy(skb, GFP_ATOMIC); 7803 else { 7804 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7805 7806 new_skb = skb_copy_expand(skb, 7807 skb_headroom(skb) + more_headroom, 7808 skb_tailroom(skb), GFP_ATOMIC); 7809 } 7810 7811 if (!new_skb) { 7812 ret = -1; 7813 } else { 7814 /* New SKB is guaranteed to be linear. */ 7815 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, 7816 new_skb->len, DMA_TO_DEVICE); 7817 /* Make sure the mapping succeeded */ 7818 if (dma_mapping_error(&tp->pdev->dev, new_addr)) { 7819 dev_kfree_skb_any(new_skb); 7820 ret = -1; 7821 } else { 7822 u32 save_entry = *entry; 7823 7824 base_flags |= TXD_FLAG_END; 7825 7826 tnapi->tx_buffers[*entry].skb = new_skb; 7827 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7828 mapping, new_addr); 7829 7830 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7831 new_skb->len, base_flags, 7832 mss, vlan)) { 7833 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7834 dev_kfree_skb_any(new_skb); 7835 ret = -1; 7836 } 7837 } 7838 } 7839 7840 dev_consume_skb_any(skb); 7841 *pskb = new_skb; 7842 return ret; 7843 } 7844 7845 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7846 { 7847 /* Check if we will never have enough descriptors, 7848 * as gso_segs can be more than current ring size 7849 */ 7850 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7851 } 7852 7853 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7854 7855 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7856 * indicated in tg3_tx_frag_set() 7857 */ 7858 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7859 struct netdev_queue *txq, struct sk_buff *skb) 7860 { 7861 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7862 struct sk_buff *segs, *seg, *next; 7863 7864 /* Estimate the number of fragments in the worst case */ 7865 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7866 netif_tx_stop_queue(txq); 7867 7868 /* netif_tx_stop_queue() must be done before checking 7869 * checking tx index in tg3_tx_avail() below, because in 7870 * tg3_tx(), we update tx index before checking for 7871 * netif_tx_queue_stopped(). 7872 */ 7873 smp_mb(); 7874 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7875 return NETDEV_TX_BUSY; 7876 7877 netif_tx_wake_queue(txq); 7878 } 7879 7880 segs = skb_gso_segment(skb, tp->dev->features & 7881 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7882 if (IS_ERR(segs) || !segs) 7883 goto tg3_tso_bug_end; 7884 7885 skb_list_walk_safe(segs, seg, next) { 7886 skb_mark_not_on_list(seg); 7887 tg3_start_xmit(seg, tp->dev); 7888 } 7889 7890 tg3_tso_bug_end: 7891 dev_consume_skb_any(skb); 7892 7893 return NETDEV_TX_OK; 7894 } 7895 7896 /* hard_start_xmit for all devices */ 7897 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7898 { 7899 struct tg3 *tp = netdev_priv(dev); 7900 u32 len, entry, base_flags, mss, vlan = 0; 7901 u32 budget; 7902 int i = -1, would_hit_hwbug; 7903 dma_addr_t mapping; 7904 struct tg3_napi *tnapi; 7905 struct netdev_queue *txq; 7906 unsigned int last; 7907 struct iphdr *iph = NULL; 7908 struct tcphdr *tcph = NULL; 7909 __sum16 tcp_csum = 0, ip_csum = 0; 7910 __be16 ip_tot_len = 0; 7911 7912 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7913 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7914 if (tg3_flag(tp, ENABLE_TSS)) 7915 tnapi++; 7916 7917 budget = tg3_tx_avail(tnapi); 7918 7919 /* We are running in BH disabled context with netif_tx_lock 7920 * and TX reclaim runs via tp->napi.poll inside of a software 7921 * interrupt. Furthermore, IRQ processing runs lockless so we have 7922 * no IRQ context deadlocks to worry about either. Rejoice! 7923 */ 7924 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7925 if (!netif_tx_queue_stopped(txq)) { 7926 netif_tx_stop_queue(txq); 7927 7928 /* This is a hard error, log it. */ 7929 netdev_err(dev, 7930 "BUG! Tx Ring full when queue awake!\n"); 7931 } 7932 return NETDEV_TX_BUSY; 7933 } 7934 7935 entry = tnapi->tx_prod; 7936 base_flags = 0; 7937 7938 mss = skb_shinfo(skb)->gso_size; 7939 if (mss) { 7940 u32 tcp_opt_len, hdr_len; 7941 7942 if (skb_cow_head(skb, 0)) 7943 goto drop; 7944 7945 iph = ip_hdr(skb); 7946 tcp_opt_len = tcp_optlen(skb); 7947 7948 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7949 7950 /* HW/FW can not correctly segment packets that have been 7951 * vlan encapsulated. 7952 */ 7953 if (skb->protocol == htons(ETH_P_8021Q) || 7954 skb->protocol == htons(ETH_P_8021AD)) { 7955 if (tg3_tso_bug_gso_check(tnapi, skb)) 7956 return tg3_tso_bug(tp, tnapi, txq, skb); 7957 goto drop; 7958 } 7959 7960 if (!skb_is_gso_v6(skb)) { 7961 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7962 tg3_flag(tp, TSO_BUG)) { 7963 if (tg3_tso_bug_gso_check(tnapi, skb)) 7964 return tg3_tso_bug(tp, tnapi, txq, skb); 7965 goto drop; 7966 } 7967 ip_csum = iph->check; 7968 ip_tot_len = iph->tot_len; 7969 iph->check = 0; 7970 iph->tot_len = htons(mss + hdr_len); 7971 } 7972 7973 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 7974 TXD_FLAG_CPU_POST_DMA); 7975 7976 tcph = tcp_hdr(skb); 7977 tcp_csum = tcph->check; 7978 7979 if (tg3_flag(tp, HW_TSO_1) || 7980 tg3_flag(tp, HW_TSO_2) || 7981 tg3_flag(tp, HW_TSO_3)) { 7982 tcph->check = 0; 7983 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 7984 } else { 7985 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 7986 0, IPPROTO_TCP, 0); 7987 } 7988 7989 if (tg3_flag(tp, HW_TSO_3)) { 7990 mss |= (hdr_len & 0xc) << 12; 7991 if (hdr_len & 0x10) 7992 base_flags |= 0x00000010; 7993 base_flags |= (hdr_len & 0x3e0) << 5; 7994 } else if (tg3_flag(tp, HW_TSO_2)) 7995 mss |= hdr_len << 9; 7996 else if (tg3_flag(tp, HW_TSO_1) || 7997 tg3_asic_rev(tp) == ASIC_REV_5705) { 7998 if (tcp_opt_len || iph->ihl > 5) { 7999 int tsflags; 8000 8001 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8002 mss |= (tsflags << 11); 8003 } 8004 } else { 8005 if (tcp_opt_len || iph->ihl > 5) { 8006 int tsflags; 8007 8008 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8009 base_flags |= tsflags << 12; 8010 } 8011 } 8012 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 8013 /* HW/FW can not correctly checksum packets that have been 8014 * vlan encapsulated. 8015 */ 8016 if (skb->protocol == htons(ETH_P_8021Q) || 8017 skb->protocol == htons(ETH_P_8021AD)) { 8018 if (skb_checksum_help(skb)) 8019 goto drop; 8020 } else { 8021 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8022 } 8023 } 8024 8025 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8026 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8027 base_flags |= TXD_FLAG_JMB_PKT; 8028 8029 if (skb_vlan_tag_present(skb)) { 8030 base_flags |= TXD_FLAG_VLAN; 8031 vlan = skb_vlan_tag_get(skb); 8032 } 8033 8034 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8035 tg3_flag(tp, TX_TSTAMP_EN)) { 8036 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8037 base_flags |= TXD_FLAG_HWTSTAMP; 8038 } 8039 8040 len = skb_headlen(skb); 8041 8042 mapping = dma_map_single(&tp->pdev->dev, skb->data, len, 8043 DMA_TO_DEVICE); 8044 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8045 goto drop; 8046 8047 8048 tnapi->tx_buffers[entry].skb = skb; 8049 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8050 8051 would_hit_hwbug = 0; 8052 8053 if (tg3_flag(tp, 5701_DMA_BUG)) 8054 would_hit_hwbug = 1; 8055 8056 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8057 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8058 mss, vlan)) { 8059 would_hit_hwbug = 1; 8060 } else if (skb_shinfo(skb)->nr_frags > 0) { 8061 u32 tmp_mss = mss; 8062 8063 if (!tg3_flag(tp, HW_TSO_1) && 8064 !tg3_flag(tp, HW_TSO_2) && 8065 !tg3_flag(tp, HW_TSO_3)) 8066 tmp_mss = 0; 8067 8068 /* Now loop through additional data 8069 * fragments, and queue them. 8070 */ 8071 last = skb_shinfo(skb)->nr_frags - 1; 8072 for (i = 0; i <= last; i++) { 8073 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8074 8075 len = skb_frag_size(frag); 8076 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8077 len, DMA_TO_DEVICE); 8078 8079 tnapi->tx_buffers[entry].skb = NULL; 8080 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8081 mapping); 8082 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8083 goto dma_error; 8084 8085 if (!budget || 8086 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8087 len, base_flags | 8088 ((i == last) ? TXD_FLAG_END : 0), 8089 tmp_mss, vlan)) { 8090 would_hit_hwbug = 1; 8091 break; 8092 } 8093 } 8094 } 8095 8096 if (would_hit_hwbug) { 8097 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8098 8099 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8100 /* If it's a TSO packet, do GSO instead of 8101 * allocating and copying to a large linear SKB 8102 */ 8103 if (ip_tot_len) { 8104 iph->check = ip_csum; 8105 iph->tot_len = ip_tot_len; 8106 } 8107 tcph->check = tcp_csum; 8108 return tg3_tso_bug(tp, tnapi, txq, skb); 8109 } 8110 8111 /* If the workaround fails due to memory/mapping 8112 * failure, silently drop this packet. 8113 */ 8114 entry = tnapi->tx_prod; 8115 budget = tg3_tx_avail(tnapi); 8116 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8117 base_flags, mss, vlan)) 8118 goto drop_nofree; 8119 } 8120 8121 skb_tx_timestamp(skb); 8122 netdev_tx_sent_queue(txq, skb->len); 8123 8124 /* Sync BD data before updating mailbox */ 8125 wmb(); 8126 8127 tnapi->tx_prod = entry; 8128 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8129 netif_tx_stop_queue(txq); 8130 8131 /* netif_tx_stop_queue() must be done before checking 8132 * checking tx index in tg3_tx_avail() below, because in 8133 * tg3_tx(), we update tx index before checking for 8134 * netif_tx_queue_stopped(). 8135 */ 8136 smp_mb(); 8137 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8138 netif_tx_wake_queue(txq); 8139 } 8140 8141 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8142 /* Packets are ready, update Tx producer idx on card. */ 8143 tw32_tx_mbox(tnapi->prodmbox, entry); 8144 } 8145 8146 return NETDEV_TX_OK; 8147 8148 dma_error: 8149 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8150 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8151 drop: 8152 dev_kfree_skb_any(skb); 8153 drop_nofree: 8154 tp->tx_dropped++; 8155 return NETDEV_TX_OK; 8156 } 8157 8158 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8159 { 8160 if (enable) { 8161 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8162 MAC_MODE_PORT_MODE_MASK); 8163 8164 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8165 8166 if (!tg3_flag(tp, 5705_PLUS)) 8167 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8168 8169 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8170 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8171 else 8172 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8173 } else { 8174 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8175 8176 if (tg3_flag(tp, 5705_PLUS) || 8177 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8178 tg3_asic_rev(tp) == ASIC_REV_5700) 8179 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8180 } 8181 8182 tw32(MAC_MODE, tp->mac_mode); 8183 udelay(40); 8184 } 8185 8186 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8187 { 8188 u32 val, bmcr, mac_mode, ptest = 0; 8189 8190 tg3_phy_toggle_apd(tp, false); 8191 tg3_phy_toggle_automdix(tp, false); 8192 8193 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8194 return -EIO; 8195 8196 bmcr = BMCR_FULLDPLX; 8197 switch (speed) { 8198 case SPEED_10: 8199 break; 8200 case SPEED_100: 8201 bmcr |= BMCR_SPEED100; 8202 break; 8203 case SPEED_1000: 8204 default: 8205 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8206 speed = SPEED_100; 8207 bmcr |= BMCR_SPEED100; 8208 } else { 8209 speed = SPEED_1000; 8210 bmcr |= BMCR_SPEED1000; 8211 } 8212 } 8213 8214 if (extlpbk) { 8215 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8216 tg3_readphy(tp, MII_CTRL1000, &val); 8217 val |= CTL1000_AS_MASTER | 8218 CTL1000_ENABLE_MASTER; 8219 tg3_writephy(tp, MII_CTRL1000, val); 8220 } else { 8221 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8222 MII_TG3_FET_PTEST_TRIM_2; 8223 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8224 } 8225 } else 8226 bmcr |= BMCR_LOOPBACK; 8227 8228 tg3_writephy(tp, MII_BMCR, bmcr); 8229 8230 /* The write needs to be flushed for the FETs */ 8231 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8232 tg3_readphy(tp, MII_BMCR, &bmcr); 8233 8234 udelay(40); 8235 8236 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8237 tg3_asic_rev(tp) == ASIC_REV_5785) { 8238 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8239 MII_TG3_FET_PTEST_FRC_TX_LINK | 8240 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8241 8242 /* The write needs to be flushed for the AC131 */ 8243 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8244 } 8245 8246 /* Reset to prevent losing 1st rx packet intermittently */ 8247 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8248 tg3_flag(tp, 5780_CLASS)) { 8249 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8250 udelay(10); 8251 tw32_f(MAC_RX_MODE, tp->rx_mode); 8252 } 8253 8254 mac_mode = tp->mac_mode & 8255 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8256 if (speed == SPEED_1000) 8257 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8258 else 8259 mac_mode |= MAC_MODE_PORT_MODE_MII; 8260 8261 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8262 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8263 8264 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8265 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8266 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8267 mac_mode |= MAC_MODE_LINK_POLARITY; 8268 8269 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8270 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8271 } 8272 8273 tw32(MAC_MODE, mac_mode); 8274 udelay(40); 8275 8276 return 0; 8277 } 8278 8279 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8280 { 8281 struct tg3 *tp = netdev_priv(dev); 8282 8283 if (features & NETIF_F_LOOPBACK) { 8284 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8285 return; 8286 8287 spin_lock_bh(&tp->lock); 8288 tg3_mac_loopback(tp, true); 8289 netif_carrier_on(tp->dev); 8290 spin_unlock_bh(&tp->lock); 8291 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8292 } else { 8293 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8294 return; 8295 8296 spin_lock_bh(&tp->lock); 8297 tg3_mac_loopback(tp, false); 8298 /* Force link status check */ 8299 tg3_setup_phy(tp, true); 8300 spin_unlock_bh(&tp->lock); 8301 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8302 } 8303 } 8304 8305 static netdev_features_t tg3_fix_features(struct net_device *dev, 8306 netdev_features_t features) 8307 { 8308 struct tg3 *tp = netdev_priv(dev); 8309 8310 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8311 features &= ~NETIF_F_ALL_TSO; 8312 8313 return features; 8314 } 8315 8316 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8317 { 8318 netdev_features_t changed = dev->features ^ features; 8319 8320 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8321 tg3_set_loopback(dev, features); 8322 8323 return 0; 8324 } 8325 8326 static void tg3_rx_prodring_free(struct tg3 *tp, 8327 struct tg3_rx_prodring_set *tpr) 8328 { 8329 int i; 8330 8331 if (tpr != &tp->napi[0].prodring) { 8332 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8333 i = (i + 1) & tp->rx_std_ring_mask) 8334 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8335 tp->rx_pkt_map_sz); 8336 8337 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8338 for (i = tpr->rx_jmb_cons_idx; 8339 i != tpr->rx_jmb_prod_idx; 8340 i = (i + 1) & tp->rx_jmb_ring_mask) { 8341 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8342 TG3_RX_JMB_MAP_SZ); 8343 } 8344 } 8345 8346 return; 8347 } 8348 8349 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8350 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8351 tp->rx_pkt_map_sz); 8352 8353 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8354 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8355 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8356 TG3_RX_JMB_MAP_SZ); 8357 } 8358 } 8359 8360 /* Initialize rx rings for packet processing. 8361 * 8362 * The chip has been shut down and the driver detached from 8363 * the networking, so no interrupts or new tx packets will 8364 * end up in the driver. tp->{tx,}lock are held and thus 8365 * we may not sleep. 8366 */ 8367 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8368 struct tg3_rx_prodring_set *tpr) 8369 { 8370 u32 i, rx_pkt_dma_sz; 8371 8372 tpr->rx_std_cons_idx = 0; 8373 tpr->rx_std_prod_idx = 0; 8374 tpr->rx_jmb_cons_idx = 0; 8375 tpr->rx_jmb_prod_idx = 0; 8376 8377 if (tpr != &tp->napi[0].prodring) { 8378 memset(&tpr->rx_std_buffers[0], 0, 8379 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8380 if (tpr->rx_jmb_buffers) 8381 memset(&tpr->rx_jmb_buffers[0], 0, 8382 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8383 goto done; 8384 } 8385 8386 /* Zero out all descriptors. */ 8387 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8388 8389 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8390 if (tg3_flag(tp, 5780_CLASS) && 8391 tp->dev->mtu > ETH_DATA_LEN) 8392 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8393 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8394 8395 /* Initialize invariants of the rings, we only set this 8396 * stuff once. This works because the card does not 8397 * write into the rx buffer posting rings. 8398 */ 8399 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8400 struct tg3_rx_buffer_desc *rxd; 8401 8402 rxd = &tpr->rx_std[i]; 8403 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8404 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8405 rxd->opaque = (RXD_OPAQUE_RING_STD | 8406 (i << RXD_OPAQUE_INDEX_SHIFT)); 8407 } 8408 8409 /* Now allocate fresh SKBs for each rx ring. */ 8410 for (i = 0; i < tp->rx_pending; i++) { 8411 unsigned int frag_size; 8412 8413 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8414 &frag_size) < 0) { 8415 netdev_warn(tp->dev, 8416 "Using a smaller RX standard ring. Only " 8417 "%d out of %d buffers were allocated " 8418 "successfully\n", i, tp->rx_pending); 8419 if (i == 0) 8420 goto initfail; 8421 tp->rx_pending = i; 8422 break; 8423 } 8424 } 8425 8426 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8427 goto done; 8428 8429 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8430 8431 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8432 goto done; 8433 8434 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8435 struct tg3_rx_buffer_desc *rxd; 8436 8437 rxd = &tpr->rx_jmb[i].std; 8438 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8439 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8440 RXD_FLAG_JUMBO; 8441 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8442 (i << RXD_OPAQUE_INDEX_SHIFT)); 8443 } 8444 8445 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8446 unsigned int frag_size; 8447 8448 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8449 &frag_size) < 0) { 8450 netdev_warn(tp->dev, 8451 "Using a smaller RX jumbo ring. Only %d " 8452 "out of %d buffers were allocated " 8453 "successfully\n", i, tp->rx_jumbo_pending); 8454 if (i == 0) 8455 goto initfail; 8456 tp->rx_jumbo_pending = i; 8457 break; 8458 } 8459 } 8460 8461 done: 8462 return 0; 8463 8464 initfail: 8465 tg3_rx_prodring_free(tp, tpr); 8466 return -ENOMEM; 8467 } 8468 8469 static void tg3_rx_prodring_fini(struct tg3 *tp, 8470 struct tg3_rx_prodring_set *tpr) 8471 { 8472 kfree(tpr->rx_std_buffers); 8473 tpr->rx_std_buffers = NULL; 8474 kfree(tpr->rx_jmb_buffers); 8475 tpr->rx_jmb_buffers = NULL; 8476 if (tpr->rx_std) { 8477 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8478 tpr->rx_std, tpr->rx_std_mapping); 8479 tpr->rx_std = NULL; 8480 } 8481 if (tpr->rx_jmb) { 8482 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8483 tpr->rx_jmb, tpr->rx_jmb_mapping); 8484 tpr->rx_jmb = NULL; 8485 } 8486 } 8487 8488 static int tg3_rx_prodring_init(struct tg3 *tp, 8489 struct tg3_rx_prodring_set *tpr) 8490 { 8491 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8492 GFP_KERNEL); 8493 if (!tpr->rx_std_buffers) 8494 return -ENOMEM; 8495 8496 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8497 TG3_RX_STD_RING_BYTES(tp), 8498 &tpr->rx_std_mapping, 8499 GFP_KERNEL); 8500 if (!tpr->rx_std) 8501 goto err_out; 8502 8503 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8504 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8505 GFP_KERNEL); 8506 if (!tpr->rx_jmb_buffers) 8507 goto err_out; 8508 8509 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8510 TG3_RX_JMB_RING_BYTES(tp), 8511 &tpr->rx_jmb_mapping, 8512 GFP_KERNEL); 8513 if (!tpr->rx_jmb) 8514 goto err_out; 8515 } 8516 8517 return 0; 8518 8519 err_out: 8520 tg3_rx_prodring_fini(tp, tpr); 8521 return -ENOMEM; 8522 } 8523 8524 /* Free up pending packets in all rx/tx rings. 8525 * 8526 * The chip has been shut down and the driver detached from 8527 * the networking, so no interrupts or new tx packets will 8528 * end up in the driver. tp->{tx,}lock is not held and we are not 8529 * in an interrupt context and thus may sleep. 8530 */ 8531 static void tg3_free_rings(struct tg3 *tp) 8532 { 8533 int i, j; 8534 8535 for (j = 0; j < tp->irq_cnt; j++) { 8536 struct tg3_napi *tnapi = &tp->napi[j]; 8537 8538 tg3_rx_prodring_free(tp, &tnapi->prodring); 8539 8540 if (!tnapi->tx_buffers) 8541 continue; 8542 8543 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8544 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8545 8546 if (!skb) 8547 continue; 8548 8549 tg3_tx_skb_unmap(tnapi, i, 8550 skb_shinfo(skb)->nr_frags - 1); 8551 8552 dev_consume_skb_any(skb); 8553 } 8554 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8555 } 8556 } 8557 8558 /* Initialize tx/rx rings for packet processing. 8559 * 8560 * The chip has been shut down and the driver detached from 8561 * the networking, so no interrupts or new tx packets will 8562 * end up in the driver. tp->{tx,}lock are held and thus 8563 * we may not sleep. 8564 */ 8565 static int tg3_init_rings(struct tg3 *tp) 8566 { 8567 int i; 8568 8569 /* Free up all the SKBs. */ 8570 tg3_free_rings(tp); 8571 8572 for (i = 0; i < tp->irq_cnt; i++) { 8573 struct tg3_napi *tnapi = &tp->napi[i]; 8574 8575 tnapi->last_tag = 0; 8576 tnapi->last_irq_tag = 0; 8577 tnapi->hw_status->status = 0; 8578 tnapi->hw_status->status_tag = 0; 8579 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8580 8581 tnapi->tx_prod = 0; 8582 tnapi->tx_cons = 0; 8583 if (tnapi->tx_ring) 8584 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8585 8586 tnapi->rx_rcb_ptr = 0; 8587 if (tnapi->rx_rcb) 8588 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8589 8590 if (tnapi->prodring.rx_std && 8591 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8592 tg3_free_rings(tp); 8593 return -ENOMEM; 8594 } 8595 } 8596 8597 return 0; 8598 } 8599 8600 static void tg3_mem_tx_release(struct tg3 *tp) 8601 { 8602 int i; 8603 8604 for (i = 0; i < tp->irq_max; i++) { 8605 struct tg3_napi *tnapi = &tp->napi[i]; 8606 8607 if (tnapi->tx_ring) { 8608 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8609 tnapi->tx_ring, tnapi->tx_desc_mapping); 8610 tnapi->tx_ring = NULL; 8611 } 8612 8613 kfree(tnapi->tx_buffers); 8614 tnapi->tx_buffers = NULL; 8615 } 8616 } 8617 8618 static int tg3_mem_tx_acquire(struct tg3 *tp) 8619 { 8620 int i; 8621 struct tg3_napi *tnapi = &tp->napi[0]; 8622 8623 /* If multivector TSS is enabled, vector 0 does not handle 8624 * tx interrupts. Don't allocate any resources for it. 8625 */ 8626 if (tg3_flag(tp, ENABLE_TSS)) 8627 tnapi++; 8628 8629 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8630 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, 8631 sizeof(struct tg3_tx_ring_info), 8632 GFP_KERNEL); 8633 if (!tnapi->tx_buffers) 8634 goto err_out; 8635 8636 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8637 TG3_TX_RING_BYTES, 8638 &tnapi->tx_desc_mapping, 8639 GFP_KERNEL); 8640 if (!tnapi->tx_ring) 8641 goto err_out; 8642 } 8643 8644 return 0; 8645 8646 err_out: 8647 tg3_mem_tx_release(tp); 8648 return -ENOMEM; 8649 } 8650 8651 static void tg3_mem_rx_release(struct tg3 *tp) 8652 { 8653 int i; 8654 8655 for (i = 0; i < tp->irq_max; i++) { 8656 struct tg3_napi *tnapi = &tp->napi[i]; 8657 8658 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8659 8660 if (!tnapi->rx_rcb) 8661 continue; 8662 8663 dma_free_coherent(&tp->pdev->dev, 8664 TG3_RX_RCB_RING_BYTES(tp), 8665 tnapi->rx_rcb, 8666 tnapi->rx_rcb_mapping); 8667 tnapi->rx_rcb = NULL; 8668 } 8669 } 8670 8671 static int tg3_mem_rx_acquire(struct tg3 *tp) 8672 { 8673 unsigned int i, limit; 8674 8675 limit = tp->rxq_cnt; 8676 8677 /* If RSS is enabled, we need a (dummy) producer ring 8678 * set on vector zero. This is the true hw prodring. 8679 */ 8680 if (tg3_flag(tp, ENABLE_RSS)) 8681 limit++; 8682 8683 for (i = 0; i < limit; i++) { 8684 struct tg3_napi *tnapi = &tp->napi[i]; 8685 8686 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8687 goto err_out; 8688 8689 /* If multivector RSS is enabled, vector 0 8690 * does not handle rx or tx interrupts. 8691 * Don't allocate any resources for it. 8692 */ 8693 if (!i && tg3_flag(tp, ENABLE_RSS)) 8694 continue; 8695 8696 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8697 TG3_RX_RCB_RING_BYTES(tp), 8698 &tnapi->rx_rcb_mapping, 8699 GFP_KERNEL); 8700 if (!tnapi->rx_rcb) 8701 goto err_out; 8702 } 8703 8704 return 0; 8705 8706 err_out: 8707 tg3_mem_rx_release(tp); 8708 return -ENOMEM; 8709 } 8710 8711 /* 8712 * Must not be invoked with interrupt sources disabled and 8713 * the hardware shutdown down. 8714 */ 8715 static void tg3_free_consistent(struct tg3 *tp) 8716 { 8717 int i; 8718 8719 for (i = 0; i < tp->irq_cnt; i++) { 8720 struct tg3_napi *tnapi = &tp->napi[i]; 8721 8722 if (tnapi->hw_status) { 8723 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8724 tnapi->hw_status, 8725 tnapi->status_mapping); 8726 tnapi->hw_status = NULL; 8727 } 8728 } 8729 8730 tg3_mem_rx_release(tp); 8731 tg3_mem_tx_release(tp); 8732 8733 /* tp->hw_stats can be referenced safely: 8734 * 1. under rtnl_lock 8735 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. 8736 */ 8737 if (tp->hw_stats) { 8738 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8739 tp->hw_stats, tp->stats_mapping); 8740 tp->hw_stats = NULL; 8741 } 8742 } 8743 8744 /* 8745 * Must not be invoked with interrupt sources disabled and 8746 * the hardware shutdown down. Can sleep. 8747 */ 8748 static int tg3_alloc_consistent(struct tg3 *tp) 8749 { 8750 int i; 8751 8752 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8753 sizeof(struct tg3_hw_stats), 8754 &tp->stats_mapping, GFP_KERNEL); 8755 if (!tp->hw_stats) 8756 goto err_out; 8757 8758 for (i = 0; i < tp->irq_cnt; i++) { 8759 struct tg3_napi *tnapi = &tp->napi[i]; 8760 struct tg3_hw_status *sblk; 8761 8762 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8763 TG3_HW_STATUS_SIZE, 8764 &tnapi->status_mapping, 8765 GFP_KERNEL); 8766 if (!tnapi->hw_status) 8767 goto err_out; 8768 8769 sblk = tnapi->hw_status; 8770 8771 if (tg3_flag(tp, ENABLE_RSS)) { 8772 u16 *prodptr = NULL; 8773 8774 /* 8775 * When RSS is enabled, the status block format changes 8776 * slightly. The "rx_jumbo_consumer", "reserved", 8777 * and "rx_mini_consumer" members get mapped to the 8778 * other three rx return ring producer indexes. 8779 */ 8780 switch (i) { 8781 case 1: 8782 prodptr = &sblk->idx[0].rx_producer; 8783 break; 8784 case 2: 8785 prodptr = &sblk->rx_jumbo_consumer; 8786 break; 8787 case 3: 8788 prodptr = &sblk->reserved; 8789 break; 8790 case 4: 8791 prodptr = &sblk->rx_mini_consumer; 8792 break; 8793 } 8794 tnapi->rx_rcb_prod_idx = prodptr; 8795 } else { 8796 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8797 } 8798 } 8799 8800 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8801 goto err_out; 8802 8803 return 0; 8804 8805 err_out: 8806 tg3_free_consistent(tp); 8807 return -ENOMEM; 8808 } 8809 8810 #define MAX_WAIT_CNT 1000 8811 8812 /* To stop a block, clear the enable bit and poll till it 8813 * clears. tp->lock is held. 8814 */ 8815 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8816 { 8817 unsigned int i; 8818 u32 val; 8819 8820 if (tg3_flag(tp, 5705_PLUS)) { 8821 switch (ofs) { 8822 case RCVLSC_MODE: 8823 case DMAC_MODE: 8824 case MBFREE_MODE: 8825 case BUFMGR_MODE: 8826 case MEMARB_MODE: 8827 /* We can't enable/disable these bits of the 8828 * 5705/5750, just say success. 8829 */ 8830 return 0; 8831 8832 default: 8833 break; 8834 } 8835 } 8836 8837 val = tr32(ofs); 8838 val &= ~enable_bit; 8839 tw32_f(ofs, val); 8840 8841 for (i = 0; i < MAX_WAIT_CNT; i++) { 8842 if (pci_channel_offline(tp->pdev)) { 8843 dev_err(&tp->pdev->dev, 8844 "tg3_stop_block device offline, " 8845 "ofs=%lx enable_bit=%x\n", 8846 ofs, enable_bit); 8847 return -ENODEV; 8848 } 8849 8850 udelay(100); 8851 val = tr32(ofs); 8852 if ((val & enable_bit) == 0) 8853 break; 8854 } 8855 8856 if (i == MAX_WAIT_CNT && !silent) { 8857 dev_err(&tp->pdev->dev, 8858 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8859 ofs, enable_bit); 8860 return -ENODEV; 8861 } 8862 8863 return 0; 8864 } 8865 8866 /* tp->lock is held. */ 8867 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8868 { 8869 int i, err; 8870 8871 tg3_disable_ints(tp); 8872 8873 if (pci_channel_offline(tp->pdev)) { 8874 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8875 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8876 err = -ENODEV; 8877 goto err_no_dev; 8878 } 8879 8880 tp->rx_mode &= ~RX_MODE_ENABLE; 8881 tw32_f(MAC_RX_MODE, tp->rx_mode); 8882 udelay(10); 8883 8884 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8885 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8886 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8887 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8888 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8889 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8890 8891 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8892 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8893 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8894 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8895 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8896 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8897 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8898 8899 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8900 tw32_f(MAC_MODE, tp->mac_mode); 8901 udelay(40); 8902 8903 tp->tx_mode &= ~TX_MODE_ENABLE; 8904 tw32_f(MAC_TX_MODE, tp->tx_mode); 8905 8906 for (i = 0; i < MAX_WAIT_CNT; i++) { 8907 udelay(100); 8908 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8909 break; 8910 } 8911 if (i >= MAX_WAIT_CNT) { 8912 dev_err(&tp->pdev->dev, 8913 "%s timed out, TX_MODE_ENABLE will not clear " 8914 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8915 err |= -ENODEV; 8916 } 8917 8918 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8919 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8920 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8921 8922 tw32(FTQ_RESET, 0xffffffff); 8923 tw32(FTQ_RESET, 0x00000000); 8924 8925 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8926 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8927 8928 err_no_dev: 8929 for (i = 0; i < tp->irq_cnt; i++) { 8930 struct tg3_napi *tnapi = &tp->napi[i]; 8931 if (tnapi->hw_status) 8932 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8933 } 8934 8935 return err; 8936 } 8937 8938 /* Save PCI command register before chip reset */ 8939 static void tg3_save_pci_state(struct tg3 *tp) 8940 { 8941 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 8942 } 8943 8944 /* Restore PCI state after chip reset */ 8945 static void tg3_restore_pci_state(struct tg3 *tp) 8946 { 8947 u32 val; 8948 8949 /* Re-enable indirect register accesses. */ 8950 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 8951 tp->misc_host_ctrl); 8952 8953 /* Set MAX PCI retry to zero. */ 8954 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 8955 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 8956 tg3_flag(tp, PCIX_MODE)) 8957 val |= PCISTATE_RETRY_SAME_DMA; 8958 /* Allow reads and writes to the APE register and memory space. */ 8959 if (tg3_flag(tp, ENABLE_APE)) 8960 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 8961 PCISTATE_ALLOW_APE_SHMEM_WR | 8962 PCISTATE_ALLOW_APE_PSPACE_WR; 8963 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 8964 8965 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 8966 8967 if (!tg3_flag(tp, PCI_EXPRESS)) { 8968 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 8969 tp->pci_cacheline_sz); 8970 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 8971 tp->pci_lat_timer); 8972 } 8973 8974 /* Make sure PCI-X relaxed ordering bit is clear. */ 8975 if (tg3_flag(tp, PCIX_MODE)) { 8976 u16 pcix_cmd; 8977 8978 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8979 &pcix_cmd); 8980 pcix_cmd &= ~PCI_X_CMD_ERO; 8981 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8982 pcix_cmd); 8983 } 8984 8985 if (tg3_flag(tp, 5780_CLASS)) { 8986 8987 /* Chip reset on 5780 will reset MSI enable bit, 8988 * so need to restore it. 8989 */ 8990 if (tg3_flag(tp, USING_MSI)) { 8991 u16 ctrl; 8992 8993 pci_read_config_word(tp->pdev, 8994 tp->msi_cap + PCI_MSI_FLAGS, 8995 &ctrl); 8996 pci_write_config_word(tp->pdev, 8997 tp->msi_cap + PCI_MSI_FLAGS, 8998 ctrl | PCI_MSI_FLAGS_ENABLE); 8999 val = tr32(MSGINT_MODE); 9000 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 9001 } 9002 } 9003 } 9004 9005 static void tg3_override_clk(struct tg3 *tp) 9006 { 9007 u32 val; 9008 9009 switch (tg3_asic_rev(tp)) { 9010 case ASIC_REV_5717: 9011 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9012 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9013 TG3_CPMU_MAC_ORIDE_ENABLE); 9014 break; 9015 9016 case ASIC_REV_5719: 9017 case ASIC_REV_5720: 9018 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9019 break; 9020 9021 default: 9022 return; 9023 } 9024 } 9025 9026 static void tg3_restore_clk(struct tg3 *tp) 9027 { 9028 u32 val; 9029 9030 switch (tg3_asic_rev(tp)) { 9031 case ASIC_REV_5717: 9032 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9033 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9034 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9035 break; 9036 9037 case ASIC_REV_5719: 9038 case ASIC_REV_5720: 9039 val = tr32(TG3_CPMU_CLCK_ORIDE); 9040 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9041 break; 9042 9043 default: 9044 return; 9045 } 9046 } 9047 9048 /* tp->lock is held. */ 9049 static int tg3_chip_reset(struct tg3 *tp) 9050 __releases(tp->lock) 9051 __acquires(tp->lock) 9052 { 9053 u32 val; 9054 void (*write_op)(struct tg3 *, u32, u32); 9055 int i, err; 9056 9057 if (!pci_device_is_present(tp->pdev)) 9058 return -ENODEV; 9059 9060 tg3_nvram_lock(tp); 9061 9062 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9063 9064 /* No matching tg3_nvram_unlock() after this because 9065 * chip reset below will undo the nvram lock. 9066 */ 9067 tp->nvram_lock_cnt = 0; 9068 9069 /* GRC_MISC_CFG core clock reset will clear the memory 9070 * enable bit in PCI register 4 and the MSI enable bit 9071 * on some chips, so we save relevant registers here. 9072 */ 9073 tg3_save_pci_state(tp); 9074 9075 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9076 tg3_flag(tp, 5755_PLUS)) 9077 tw32(GRC_FASTBOOT_PC, 0); 9078 9079 /* 9080 * We must avoid the readl() that normally takes place. 9081 * It locks machines, causes machine checks, and other 9082 * fun things. So, temporarily disable the 5701 9083 * hardware workaround, while we do the reset. 9084 */ 9085 write_op = tp->write32; 9086 if (write_op == tg3_write_flush_reg32) 9087 tp->write32 = tg3_write32; 9088 9089 /* Prevent the irq handler from reading or writing PCI registers 9090 * during chip reset when the memory enable bit in the PCI command 9091 * register may be cleared. The chip does not generate interrupt 9092 * at this time, but the irq handler may still be called due to irq 9093 * sharing or irqpoll. 9094 */ 9095 tg3_flag_set(tp, CHIP_RESETTING); 9096 for (i = 0; i < tp->irq_cnt; i++) { 9097 struct tg3_napi *tnapi = &tp->napi[i]; 9098 if (tnapi->hw_status) { 9099 tnapi->hw_status->status = 0; 9100 tnapi->hw_status->status_tag = 0; 9101 } 9102 tnapi->last_tag = 0; 9103 tnapi->last_irq_tag = 0; 9104 } 9105 smp_mb(); 9106 9107 tg3_full_unlock(tp); 9108 9109 for (i = 0; i < tp->irq_cnt; i++) 9110 synchronize_irq(tp->napi[i].irq_vec); 9111 9112 tg3_full_lock(tp, 0); 9113 9114 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9115 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9116 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9117 } 9118 9119 /* do the reset */ 9120 val = GRC_MISC_CFG_CORECLK_RESET; 9121 9122 if (tg3_flag(tp, PCI_EXPRESS)) { 9123 /* Force PCIe 1.0a mode */ 9124 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9125 !tg3_flag(tp, 57765_PLUS) && 9126 tr32(TG3_PCIE_PHY_TSTCTL) == 9127 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9128 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9129 9130 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9131 tw32(GRC_MISC_CFG, (1 << 29)); 9132 val |= (1 << 29); 9133 } 9134 } 9135 9136 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9137 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9138 tw32(GRC_VCPU_EXT_CTRL, 9139 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9140 } 9141 9142 /* Set the clock to the highest frequency to avoid timeouts. With link 9143 * aware mode, the clock speed could be slow and bootcode does not 9144 * complete within the expected time. Override the clock to allow the 9145 * bootcode to finish sooner and then restore it. 9146 */ 9147 tg3_override_clk(tp); 9148 9149 /* Manage gphy power for all CPMU absent PCIe devices. */ 9150 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9151 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9152 9153 tw32(GRC_MISC_CFG, val); 9154 9155 /* restore 5701 hardware bug workaround write method */ 9156 tp->write32 = write_op; 9157 9158 /* Unfortunately, we have to delay before the PCI read back. 9159 * Some 575X chips even will not respond to a PCI cfg access 9160 * when the reset command is given to the chip. 9161 * 9162 * How do these hardware designers expect things to work 9163 * properly if the PCI write is posted for a long period 9164 * of time? It is always necessary to have some method by 9165 * which a register read back can occur to push the write 9166 * out which does the reset. 9167 * 9168 * For most tg3 variants the trick below was working. 9169 * Ho hum... 9170 */ 9171 udelay(120); 9172 9173 /* Flush PCI posted writes. The normal MMIO registers 9174 * are inaccessible at this time so this is the only 9175 * way to make this reliably (actually, this is no longer 9176 * the case, see above). I tried to use indirect 9177 * register read/write but this upset some 5701 variants. 9178 */ 9179 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9180 9181 udelay(120); 9182 9183 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9184 u16 val16; 9185 9186 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9187 int j; 9188 u32 cfg_val; 9189 9190 /* Wait for link training to complete. */ 9191 for (j = 0; j < 5000; j++) 9192 udelay(100); 9193 9194 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9195 pci_write_config_dword(tp->pdev, 0xc4, 9196 cfg_val | (1 << 15)); 9197 } 9198 9199 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9200 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9201 /* 9202 * Older PCIe devices only support the 128 byte 9203 * MPS setting. Enforce the restriction. 9204 */ 9205 if (!tg3_flag(tp, CPMU_PRESENT)) 9206 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9207 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9208 9209 /* Clear error status */ 9210 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9211 PCI_EXP_DEVSTA_CED | 9212 PCI_EXP_DEVSTA_NFED | 9213 PCI_EXP_DEVSTA_FED | 9214 PCI_EXP_DEVSTA_URD); 9215 } 9216 9217 tg3_restore_pci_state(tp); 9218 9219 tg3_flag_clear(tp, CHIP_RESETTING); 9220 tg3_flag_clear(tp, ERROR_PROCESSED); 9221 9222 val = 0; 9223 if (tg3_flag(tp, 5780_CLASS)) 9224 val = tr32(MEMARB_MODE); 9225 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9226 9227 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9228 tg3_stop_fw(tp); 9229 tw32(0x5000, 0x400); 9230 } 9231 9232 if (tg3_flag(tp, IS_SSB_CORE)) { 9233 /* 9234 * BCM4785: In order to avoid repercussions from using 9235 * potentially defective internal ROM, stop the Rx RISC CPU, 9236 * which is not required. 9237 */ 9238 tg3_stop_fw(tp); 9239 tg3_halt_cpu(tp, RX_CPU_BASE); 9240 } 9241 9242 err = tg3_poll_fw(tp); 9243 if (err) 9244 return err; 9245 9246 tw32(GRC_MODE, tp->grc_mode); 9247 9248 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9249 val = tr32(0xc4); 9250 9251 tw32(0xc4, val | (1 << 15)); 9252 } 9253 9254 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9255 tg3_asic_rev(tp) == ASIC_REV_5705) { 9256 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9257 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9258 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9259 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9260 } 9261 9262 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9263 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9264 val = tp->mac_mode; 9265 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9266 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9267 val = tp->mac_mode; 9268 } else 9269 val = 0; 9270 9271 tw32_f(MAC_MODE, val); 9272 udelay(40); 9273 9274 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9275 9276 tg3_mdio_start(tp); 9277 9278 if (tg3_flag(tp, PCI_EXPRESS) && 9279 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9280 tg3_asic_rev(tp) != ASIC_REV_5785 && 9281 !tg3_flag(tp, 57765_PLUS)) { 9282 val = tr32(0x7c00); 9283 9284 tw32(0x7c00, val | (1 << 25)); 9285 } 9286 9287 tg3_restore_clk(tp); 9288 9289 /* Increase the core clock speed to fix tx timeout issue for 5762 9290 * with 100Mbps link speed. 9291 */ 9292 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 9293 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9294 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9295 TG3_CPMU_MAC_ORIDE_ENABLE); 9296 } 9297 9298 /* Reprobe ASF enable state. */ 9299 tg3_flag_clear(tp, ENABLE_ASF); 9300 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9301 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9302 9303 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9304 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9305 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9306 u32 nic_cfg; 9307 9308 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9309 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9310 tg3_flag_set(tp, ENABLE_ASF); 9311 tp->last_event_jiffies = jiffies; 9312 if (tg3_flag(tp, 5750_PLUS)) 9313 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9314 9315 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9316 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9317 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9318 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9319 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9320 } 9321 } 9322 9323 return 0; 9324 } 9325 9326 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9327 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9328 static void __tg3_set_rx_mode(struct net_device *); 9329 9330 /* tp->lock is held. */ 9331 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9332 { 9333 int err; 9334 9335 tg3_stop_fw(tp); 9336 9337 tg3_write_sig_pre_reset(tp, kind); 9338 9339 tg3_abort_hw(tp, silent); 9340 err = tg3_chip_reset(tp); 9341 9342 __tg3_set_mac_addr(tp, false); 9343 9344 tg3_write_sig_legacy(tp, kind); 9345 tg3_write_sig_post_reset(tp, kind); 9346 9347 if (tp->hw_stats) { 9348 /* Save the stats across chip resets... */ 9349 tg3_get_nstats(tp, &tp->net_stats_prev); 9350 tg3_get_estats(tp, &tp->estats_prev); 9351 9352 /* And make sure the next sample is new data */ 9353 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9354 } 9355 9356 return err; 9357 } 9358 9359 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9360 { 9361 struct tg3 *tp = netdev_priv(dev); 9362 struct sockaddr *addr = p; 9363 int err = 0; 9364 bool skip_mac_1 = false; 9365 9366 if (!is_valid_ether_addr(addr->sa_data)) 9367 return -EADDRNOTAVAIL; 9368 9369 eth_hw_addr_set(dev, addr->sa_data); 9370 9371 if (!netif_running(dev)) 9372 return 0; 9373 9374 if (tg3_flag(tp, ENABLE_ASF)) { 9375 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9376 9377 addr0_high = tr32(MAC_ADDR_0_HIGH); 9378 addr0_low = tr32(MAC_ADDR_0_LOW); 9379 addr1_high = tr32(MAC_ADDR_1_HIGH); 9380 addr1_low = tr32(MAC_ADDR_1_LOW); 9381 9382 /* Skip MAC addr 1 if ASF is using it. */ 9383 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9384 !(addr1_high == 0 && addr1_low == 0)) 9385 skip_mac_1 = true; 9386 } 9387 spin_lock_bh(&tp->lock); 9388 __tg3_set_mac_addr(tp, skip_mac_1); 9389 __tg3_set_rx_mode(dev); 9390 spin_unlock_bh(&tp->lock); 9391 9392 return err; 9393 } 9394 9395 /* tp->lock is held. */ 9396 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9397 dma_addr_t mapping, u32 maxlen_flags, 9398 u32 nic_addr) 9399 { 9400 tg3_write_mem(tp, 9401 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9402 ((u64) mapping >> 32)); 9403 tg3_write_mem(tp, 9404 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9405 ((u64) mapping & 0xffffffff)); 9406 tg3_write_mem(tp, 9407 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9408 maxlen_flags); 9409 9410 if (!tg3_flag(tp, 5705_PLUS)) 9411 tg3_write_mem(tp, 9412 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9413 nic_addr); 9414 } 9415 9416 9417 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9418 { 9419 int i = 0; 9420 9421 if (!tg3_flag(tp, ENABLE_TSS)) { 9422 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9423 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9424 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9425 } else { 9426 tw32(HOSTCC_TXCOL_TICKS, 0); 9427 tw32(HOSTCC_TXMAX_FRAMES, 0); 9428 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9429 9430 for (; i < tp->txq_cnt; i++) { 9431 u32 reg; 9432 9433 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9434 tw32(reg, ec->tx_coalesce_usecs); 9435 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9436 tw32(reg, ec->tx_max_coalesced_frames); 9437 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9438 tw32(reg, ec->tx_max_coalesced_frames_irq); 9439 } 9440 } 9441 9442 for (; i < tp->irq_max - 1; i++) { 9443 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9444 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9445 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9446 } 9447 } 9448 9449 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9450 { 9451 int i = 0; 9452 u32 limit = tp->rxq_cnt; 9453 9454 if (!tg3_flag(tp, ENABLE_RSS)) { 9455 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9456 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9457 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9458 limit--; 9459 } else { 9460 tw32(HOSTCC_RXCOL_TICKS, 0); 9461 tw32(HOSTCC_RXMAX_FRAMES, 0); 9462 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9463 } 9464 9465 for (; i < limit; i++) { 9466 u32 reg; 9467 9468 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9469 tw32(reg, ec->rx_coalesce_usecs); 9470 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9471 tw32(reg, ec->rx_max_coalesced_frames); 9472 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9473 tw32(reg, ec->rx_max_coalesced_frames_irq); 9474 } 9475 9476 for (; i < tp->irq_max - 1; i++) { 9477 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9478 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9479 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9480 } 9481 } 9482 9483 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9484 { 9485 tg3_coal_tx_init(tp, ec); 9486 tg3_coal_rx_init(tp, ec); 9487 9488 if (!tg3_flag(tp, 5705_PLUS)) { 9489 u32 val = ec->stats_block_coalesce_usecs; 9490 9491 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9492 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9493 9494 if (!tp->link_up) 9495 val = 0; 9496 9497 tw32(HOSTCC_STAT_COAL_TICKS, val); 9498 } 9499 } 9500 9501 /* tp->lock is held. */ 9502 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9503 { 9504 u32 txrcb, limit; 9505 9506 /* Disable all transmit rings but the first. */ 9507 if (!tg3_flag(tp, 5705_PLUS)) 9508 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9509 else if (tg3_flag(tp, 5717_PLUS)) 9510 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9511 else if (tg3_flag(tp, 57765_CLASS) || 9512 tg3_asic_rev(tp) == ASIC_REV_5762) 9513 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9514 else 9515 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9516 9517 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9518 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9519 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9520 BDINFO_FLAGS_DISABLED); 9521 } 9522 9523 /* tp->lock is held. */ 9524 static void tg3_tx_rcbs_init(struct tg3 *tp) 9525 { 9526 int i = 0; 9527 u32 txrcb = NIC_SRAM_SEND_RCB; 9528 9529 if (tg3_flag(tp, ENABLE_TSS)) 9530 i++; 9531 9532 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9533 struct tg3_napi *tnapi = &tp->napi[i]; 9534 9535 if (!tnapi->tx_ring) 9536 continue; 9537 9538 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9539 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9540 NIC_SRAM_TX_BUFFER_DESC); 9541 } 9542 } 9543 9544 /* tp->lock is held. */ 9545 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9546 { 9547 u32 rxrcb, limit; 9548 9549 /* Disable all receive return rings but the first. */ 9550 if (tg3_flag(tp, 5717_PLUS)) 9551 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9552 else if (!tg3_flag(tp, 5705_PLUS)) 9553 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9554 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9555 tg3_asic_rev(tp) == ASIC_REV_5762 || 9556 tg3_flag(tp, 57765_CLASS)) 9557 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9558 else 9559 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9560 9561 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9562 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9563 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9564 BDINFO_FLAGS_DISABLED); 9565 } 9566 9567 /* tp->lock is held. */ 9568 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9569 { 9570 int i = 0; 9571 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9572 9573 if (tg3_flag(tp, ENABLE_RSS)) 9574 i++; 9575 9576 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9577 struct tg3_napi *tnapi = &tp->napi[i]; 9578 9579 if (!tnapi->rx_rcb) 9580 continue; 9581 9582 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9583 (tp->rx_ret_ring_mask + 1) << 9584 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9585 } 9586 } 9587 9588 /* tp->lock is held. */ 9589 static void tg3_rings_reset(struct tg3 *tp) 9590 { 9591 int i; 9592 u32 stblk; 9593 struct tg3_napi *tnapi = &tp->napi[0]; 9594 9595 tg3_tx_rcbs_disable(tp); 9596 9597 tg3_rx_ret_rcbs_disable(tp); 9598 9599 /* Disable interrupts */ 9600 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9601 tp->napi[0].chk_msi_cnt = 0; 9602 tp->napi[0].last_rx_cons = 0; 9603 tp->napi[0].last_tx_cons = 0; 9604 9605 /* Zero mailbox registers. */ 9606 if (tg3_flag(tp, SUPPORT_MSIX)) { 9607 for (i = 1; i < tp->irq_max; i++) { 9608 tp->napi[i].tx_prod = 0; 9609 tp->napi[i].tx_cons = 0; 9610 if (tg3_flag(tp, ENABLE_TSS)) 9611 tw32_mailbox(tp->napi[i].prodmbox, 0); 9612 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9613 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9614 tp->napi[i].chk_msi_cnt = 0; 9615 tp->napi[i].last_rx_cons = 0; 9616 tp->napi[i].last_tx_cons = 0; 9617 } 9618 if (!tg3_flag(tp, ENABLE_TSS)) 9619 tw32_mailbox(tp->napi[0].prodmbox, 0); 9620 } else { 9621 tp->napi[0].tx_prod = 0; 9622 tp->napi[0].tx_cons = 0; 9623 tw32_mailbox(tp->napi[0].prodmbox, 0); 9624 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9625 } 9626 9627 /* Make sure the NIC-based send BD rings are disabled. */ 9628 if (!tg3_flag(tp, 5705_PLUS)) { 9629 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9630 for (i = 0; i < 16; i++) 9631 tw32_tx_mbox(mbox + i * 8, 0); 9632 } 9633 9634 /* Clear status block in ram. */ 9635 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9636 9637 /* Set status block DMA address */ 9638 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9639 ((u64) tnapi->status_mapping >> 32)); 9640 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9641 ((u64) tnapi->status_mapping & 0xffffffff)); 9642 9643 stblk = HOSTCC_STATBLCK_RING1; 9644 9645 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9646 u64 mapping = (u64)tnapi->status_mapping; 9647 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9648 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9649 stblk += 8; 9650 9651 /* Clear status block in ram. */ 9652 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9653 } 9654 9655 tg3_tx_rcbs_init(tp); 9656 tg3_rx_ret_rcbs_init(tp); 9657 } 9658 9659 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9660 { 9661 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9662 9663 if (!tg3_flag(tp, 5750_PLUS) || 9664 tg3_flag(tp, 5780_CLASS) || 9665 tg3_asic_rev(tp) == ASIC_REV_5750 || 9666 tg3_asic_rev(tp) == ASIC_REV_5752 || 9667 tg3_flag(tp, 57765_PLUS)) 9668 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9669 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9670 tg3_asic_rev(tp) == ASIC_REV_5787) 9671 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9672 else 9673 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9674 9675 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9676 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9677 9678 val = min(nic_rep_thresh, host_rep_thresh); 9679 tw32(RCVBDI_STD_THRESH, val); 9680 9681 if (tg3_flag(tp, 57765_PLUS)) 9682 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9683 9684 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9685 return; 9686 9687 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9688 9689 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9690 9691 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9692 tw32(RCVBDI_JUMBO_THRESH, val); 9693 9694 if (tg3_flag(tp, 57765_PLUS)) 9695 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9696 } 9697 9698 static inline u32 calc_crc(unsigned char *buf, int len) 9699 { 9700 u32 reg; 9701 u32 tmp; 9702 int j, k; 9703 9704 reg = 0xffffffff; 9705 9706 for (j = 0; j < len; j++) { 9707 reg ^= buf[j]; 9708 9709 for (k = 0; k < 8; k++) { 9710 tmp = reg & 0x01; 9711 9712 reg >>= 1; 9713 9714 if (tmp) 9715 reg ^= CRC32_POLY_LE; 9716 } 9717 } 9718 9719 return ~reg; 9720 } 9721 9722 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9723 { 9724 /* accept or reject all multicast frames */ 9725 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9726 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9727 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9728 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9729 } 9730 9731 static void __tg3_set_rx_mode(struct net_device *dev) 9732 { 9733 struct tg3 *tp = netdev_priv(dev); 9734 u32 rx_mode; 9735 9736 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9737 RX_MODE_KEEP_VLAN_TAG); 9738 9739 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9740 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9741 * flag clear. 9742 */ 9743 if (!tg3_flag(tp, ENABLE_ASF)) 9744 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9745 #endif 9746 9747 if (dev->flags & IFF_PROMISC) { 9748 /* Promiscuous mode. */ 9749 rx_mode |= RX_MODE_PROMISC; 9750 } else if (dev->flags & IFF_ALLMULTI) { 9751 /* Accept all multicast. */ 9752 tg3_set_multi(tp, 1); 9753 } else if (netdev_mc_empty(dev)) { 9754 /* Reject all multicast. */ 9755 tg3_set_multi(tp, 0); 9756 } else { 9757 /* Accept one or more multicast(s). */ 9758 struct netdev_hw_addr *ha; 9759 u32 mc_filter[4] = { 0, }; 9760 u32 regidx; 9761 u32 bit; 9762 u32 crc; 9763 9764 netdev_for_each_mc_addr(ha, dev) { 9765 crc = calc_crc(ha->addr, ETH_ALEN); 9766 bit = ~crc & 0x7f; 9767 regidx = (bit & 0x60) >> 5; 9768 bit &= 0x1f; 9769 mc_filter[regidx] |= (1 << bit); 9770 } 9771 9772 tw32(MAC_HASH_REG_0, mc_filter[0]); 9773 tw32(MAC_HASH_REG_1, mc_filter[1]); 9774 tw32(MAC_HASH_REG_2, mc_filter[2]); 9775 tw32(MAC_HASH_REG_3, mc_filter[3]); 9776 } 9777 9778 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9779 rx_mode |= RX_MODE_PROMISC; 9780 } else if (!(dev->flags & IFF_PROMISC)) { 9781 /* Add all entries into to the mac addr filter list */ 9782 int i = 0; 9783 struct netdev_hw_addr *ha; 9784 9785 netdev_for_each_uc_addr(ha, dev) { 9786 __tg3_set_one_mac_addr(tp, ha->addr, 9787 i + TG3_UCAST_ADDR_IDX(tp)); 9788 i++; 9789 } 9790 } 9791 9792 if (rx_mode != tp->rx_mode) { 9793 tp->rx_mode = rx_mode; 9794 tw32_f(MAC_RX_MODE, rx_mode); 9795 udelay(10); 9796 } 9797 } 9798 9799 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9800 { 9801 int i; 9802 9803 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9804 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9805 } 9806 9807 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9808 { 9809 int i; 9810 9811 if (!tg3_flag(tp, SUPPORT_MSIX)) 9812 return; 9813 9814 if (tp->rxq_cnt == 1) { 9815 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9816 return; 9817 } 9818 9819 /* Validate table against current IRQ count */ 9820 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9821 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9822 break; 9823 } 9824 9825 if (i != TG3_RSS_INDIR_TBL_SIZE) 9826 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9827 } 9828 9829 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9830 { 9831 int i = 0; 9832 u32 reg = MAC_RSS_INDIR_TBL_0; 9833 9834 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9835 u32 val = tp->rss_ind_tbl[i]; 9836 i++; 9837 for (; i % 8; i++) { 9838 val <<= 4; 9839 val |= tp->rss_ind_tbl[i]; 9840 } 9841 tw32(reg, val); 9842 reg += 4; 9843 } 9844 } 9845 9846 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9847 { 9848 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9849 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9850 else 9851 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9852 } 9853 9854 /* tp->lock is held. */ 9855 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9856 { 9857 u32 val, rdmac_mode; 9858 int i, err, limit; 9859 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9860 9861 tg3_disable_ints(tp); 9862 9863 tg3_stop_fw(tp); 9864 9865 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9866 9867 if (tg3_flag(tp, INIT_COMPLETE)) 9868 tg3_abort_hw(tp, 1); 9869 9870 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9871 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9872 tg3_phy_pull_config(tp); 9873 tg3_eee_pull_config(tp, NULL); 9874 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9875 } 9876 9877 /* Enable MAC control of LPI */ 9878 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9879 tg3_setup_eee(tp); 9880 9881 if (reset_phy) 9882 tg3_phy_reset(tp); 9883 9884 err = tg3_chip_reset(tp); 9885 if (err) 9886 return err; 9887 9888 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9889 9890 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9891 val = tr32(TG3_CPMU_CTRL); 9892 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9893 tw32(TG3_CPMU_CTRL, val); 9894 9895 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9896 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9897 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9898 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9899 9900 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9901 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9902 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9903 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9904 9905 val = tr32(TG3_CPMU_HST_ACC); 9906 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9907 val |= CPMU_HST_ACC_MACCLK_6_25; 9908 tw32(TG3_CPMU_HST_ACC, val); 9909 } 9910 9911 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9912 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9913 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9914 PCIE_PWR_MGMT_L1_THRESH_4MS; 9915 tw32(PCIE_PWR_MGMT_THRESH, val); 9916 9917 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 9918 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 9919 9920 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 9921 9922 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9923 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9924 } 9925 9926 if (tg3_flag(tp, L1PLLPD_EN)) { 9927 u32 grc_mode = tr32(GRC_MODE); 9928 9929 /* Access the lower 1K of PL PCIE block registers. */ 9930 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9931 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9932 9933 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 9934 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 9935 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 9936 9937 tw32(GRC_MODE, grc_mode); 9938 } 9939 9940 if (tg3_flag(tp, 57765_CLASS)) { 9941 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 9942 u32 grc_mode = tr32(GRC_MODE); 9943 9944 /* Access the lower 1K of PL PCIE block registers. */ 9945 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9946 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 9947 9948 val = tr32(TG3_PCIE_TLDLPL_PORT + 9949 TG3_PCIE_PL_LO_PHYCTL5); 9950 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 9951 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 9952 9953 tw32(GRC_MODE, grc_mode); 9954 } 9955 9956 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 9957 u32 grc_mode; 9958 9959 /* Fix transmit hangs */ 9960 val = tr32(TG3_CPMU_PADRNG_CTL); 9961 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 9962 tw32(TG3_CPMU_PADRNG_CTL, val); 9963 9964 grc_mode = tr32(GRC_MODE); 9965 9966 /* Access the lower 1K of DL PCIE block registers. */ 9967 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 9968 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 9969 9970 val = tr32(TG3_PCIE_TLDLPL_PORT + 9971 TG3_PCIE_DL_LO_FTSMAX); 9972 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 9973 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 9974 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 9975 9976 tw32(GRC_MODE, grc_mode); 9977 } 9978 9979 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9980 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9981 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9982 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9983 } 9984 9985 /* This works around an issue with Athlon chipsets on 9986 * B3 tigon3 silicon. This bit has no effect on any 9987 * other revision. But do not set this on PCI Express 9988 * chips and don't even touch the clocks if the CPMU is present. 9989 */ 9990 if (!tg3_flag(tp, CPMU_PRESENT)) { 9991 if (!tg3_flag(tp, PCI_EXPRESS)) 9992 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 9993 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9994 } 9995 9996 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 9997 tg3_flag(tp, PCIX_MODE)) { 9998 val = tr32(TG3PCI_PCISTATE); 9999 val |= PCISTATE_RETRY_SAME_DMA; 10000 tw32(TG3PCI_PCISTATE, val); 10001 } 10002 10003 if (tg3_flag(tp, ENABLE_APE)) { 10004 /* Allow reads and writes to the 10005 * APE register and memory space. 10006 */ 10007 val = tr32(TG3PCI_PCISTATE); 10008 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 10009 PCISTATE_ALLOW_APE_SHMEM_WR | 10010 PCISTATE_ALLOW_APE_PSPACE_WR; 10011 tw32(TG3PCI_PCISTATE, val); 10012 } 10013 10014 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 10015 /* Enable some hw fixes. */ 10016 val = tr32(TG3PCI_MSI_DATA); 10017 val |= (1 << 26) | (1 << 28) | (1 << 29); 10018 tw32(TG3PCI_MSI_DATA, val); 10019 } 10020 10021 /* Descriptor ring init may make accesses to the 10022 * NIC SRAM area to setup the TX descriptors, so we 10023 * can only do this after the hardware has been 10024 * successfully reset. 10025 */ 10026 err = tg3_init_rings(tp); 10027 if (err) 10028 return err; 10029 10030 if (tg3_flag(tp, 57765_PLUS)) { 10031 val = tr32(TG3PCI_DMA_RW_CTRL) & 10032 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 10033 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 10034 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 10035 if (!tg3_flag(tp, 57765_CLASS) && 10036 tg3_asic_rev(tp) != ASIC_REV_5717 && 10037 tg3_asic_rev(tp) != ASIC_REV_5762) 10038 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10039 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10040 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10041 tg3_asic_rev(tp) != ASIC_REV_5761) { 10042 /* This value is determined during the probe time DMA 10043 * engine test, tg3_test_dma. 10044 */ 10045 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10046 } 10047 10048 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10049 GRC_MODE_4X_NIC_SEND_RINGS | 10050 GRC_MODE_NO_TX_PHDR_CSUM | 10051 GRC_MODE_NO_RX_PHDR_CSUM); 10052 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10053 10054 /* Pseudo-header checksum is done by hardware logic and not 10055 * the offload processers, so make the chip do the pseudo- 10056 * header checksums on receive. For transmit it is more 10057 * convenient to do the pseudo-header checksum in software 10058 * as Linux does that on transmit for us in all cases. 10059 */ 10060 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10061 10062 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10063 if (tp->rxptpctl) 10064 tw32(TG3_RX_PTP_CTL, 10065 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10066 10067 if (tg3_flag(tp, PTP_CAPABLE)) 10068 val |= GRC_MODE_TIME_SYNC_ENABLE; 10069 10070 tw32(GRC_MODE, tp->grc_mode | val); 10071 10072 /* On one of the AMD platform, MRRS is restricted to 4000 because of 10073 * south bridge limitation. As a workaround, Driver is setting MRRS 10074 * to 2048 instead of default 4096. 10075 */ 10076 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10077 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { 10078 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; 10079 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); 10080 } 10081 10082 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10083 val = tr32(GRC_MISC_CFG); 10084 val &= ~0xff; 10085 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10086 tw32(GRC_MISC_CFG, val); 10087 10088 /* Initialize MBUF/DESC pool. */ 10089 if (tg3_flag(tp, 5750_PLUS)) { 10090 /* Do nothing. */ 10091 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10092 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10093 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10094 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10095 else 10096 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10097 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10098 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10099 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10100 int fw_len; 10101 10102 fw_len = tp->fw_len; 10103 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10104 tw32(BUFMGR_MB_POOL_ADDR, 10105 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10106 tw32(BUFMGR_MB_POOL_SIZE, 10107 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10108 } 10109 10110 if (tp->dev->mtu <= ETH_DATA_LEN) { 10111 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10112 tp->bufmgr_config.mbuf_read_dma_low_water); 10113 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10114 tp->bufmgr_config.mbuf_mac_rx_low_water); 10115 tw32(BUFMGR_MB_HIGH_WATER, 10116 tp->bufmgr_config.mbuf_high_water); 10117 } else { 10118 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10119 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10120 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10121 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10122 tw32(BUFMGR_MB_HIGH_WATER, 10123 tp->bufmgr_config.mbuf_high_water_jumbo); 10124 } 10125 tw32(BUFMGR_DMA_LOW_WATER, 10126 tp->bufmgr_config.dma_low_water); 10127 tw32(BUFMGR_DMA_HIGH_WATER, 10128 tp->bufmgr_config.dma_high_water); 10129 10130 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10131 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10132 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10133 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10134 tg3_asic_rev(tp) == ASIC_REV_5762 || 10135 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10136 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10137 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10138 tw32(BUFMGR_MODE, val); 10139 for (i = 0; i < 2000; i++) { 10140 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10141 break; 10142 udelay(10); 10143 } 10144 if (i >= 2000) { 10145 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10146 return -ENODEV; 10147 } 10148 10149 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10150 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10151 10152 tg3_setup_rxbd_thresholds(tp); 10153 10154 /* Initialize TG3_BDINFO's at: 10155 * RCVDBDI_STD_BD: standard eth size rx ring 10156 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10157 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10158 * 10159 * like so: 10160 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10161 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10162 * ring attribute flags 10163 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10164 * 10165 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10166 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10167 * 10168 * The size of each ring is fixed in the firmware, but the location is 10169 * configurable. 10170 */ 10171 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10172 ((u64) tpr->rx_std_mapping >> 32)); 10173 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10174 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10175 if (!tg3_flag(tp, 5717_PLUS)) 10176 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10177 NIC_SRAM_RX_BUFFER_DESC); 10178 10179 /* Disable the mini ring */ 10180 if (!tg3_flag(tp, 5705_PLUS)) 10181 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10182 BDINFO_FLAGS_DISABLED); 10183 10184 /* Program the jumbo buffer descriptor ring control 10185 * blocks on those devices that have them. 10186 */ 10187 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10188 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10189 10190 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10191 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10192 ((u64) tpr->rx_jmb_mapping >> 32)); 10193 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10194 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10195 val = TG3_RX_JMB_RING_SIZE(tp) << 10196 BDINFO_FLAGS_MAXLEN_SHIFT; 10197 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10198 val | BDINFO_FLAGS_USE_EXT_RECV); 10199 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10200 tg3_flag(tp, 57765_CLASS) || 10201 tg3_asic_rev(tp) == ASIC_REV_5762) 10202 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10203 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10204 } else { 10205 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10206 BDINFO_FLAGS_DISABLED); 10207 } 10208 10209 if (tg3_flag(tp, 57765_PLUS)) { 10210 val = TG3_RX_STD_RING_SIZE(tp); 10211 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10212 val |= (TG3_RX_STD_DMA_SZ << 2); 10213 } else 10214 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10215 } else 10216 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10217 10218 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10219 10220 tpr->rx_std_prod_idx = tp->rx_pending; 10221 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10222 10223 tpr->rx_jmb_prod_idx = 10224 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10225 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10226 10227 tg3_rings_reset(tp); 10228 10229 /* Initialize MAC address and backoff seed. */ 10230 __tg3_set_mac_addr(tp, false); 10231 10232 /* MTU + ethernet header + FCS + optional VLAN tag */ 10233 tw32(MAC_RX_MTU_SIZE, 10234 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10235 10236 /* The slot time is changed by tg3_setup_phy if we 10237 * run at gigabit with half duplex. 10238 */ 10239 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10240 (6 << TX_LENGTHS_IPG_SHIFT) | 10241 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10242 10243 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10244 tg3_asic_rev(tp) == ASIC_REV_5762) 10245 val |= tr32(MAC_TX_LENGTHS) & 10246 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10247 TX_LENGTHS_CNT_DWN_VAL_MSK); 10248 10249 tw32(MAC_TX_LENGTHS, val); 10250 10251 /* Receive rules. */ 10252 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10253 tw32(RCVLPC_CONFIG, 0x0181); 10254 10255 /* Calculate RDMAC_MODE setting early, we need it to determine 10256 * the RCVLPC_STATE_ENABLE mask. 10257 */ 10258 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10259 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10260 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10261 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10262 RDMAC_MODE_LNGREAD_ENAB); 10263 10264 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10265 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10266 10267 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10268 tg3_asic_rev(tp) == ASIC_REV_5785 || 10269 tg3_asic_rev(tp) == ASIC_REV_57780) 10270 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10271 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10272 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10273 10274 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10275 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10276 if (tg3_flag(tp, TSO_CAPABLE)) { 10277 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10278 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10279 !tg3_flag(tp, IS_5788)) { 10280 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10281 } 10282 } 10283 10284 if (tg3_flag(tp, PCI_EXPRESS)) 10285 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10286 10287 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10288 tp->dma_limit = 0; 10289 if (tp->dev->mtu <= ETH_DATA_LEN) { 10290 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10291 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10292 } 10293 } 10294 10295 if (tg3_flag(tp, HW_TSO_1) || 10296 tg3_flag(tp, HW_TSO_2) || 10297 tg3_flag(tp, HW_TSO_3)) 10298 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10299 10300 if (tg3_flag(tp, 57765_PLUS) || 10301 tg3_asic_rev(tp) == ASIC_REV_5785 || 10302 tg3_asic_rev(tp) == ASIC_REV_57780) 10303 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10304 10305 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10306 tg3_asic_rev(tp) == ASIC_REV_5762) 10307 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10308 10309 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10310 tg3_asic_rev(tp) == ASIC_REV_5784 || 10311 tg3_asic_rev(tp) == ASIC_REV_5785 || 10312 tg3_asic_rev(tp) == ASIC_REV_57780 || 10313 tg3_flag(tp, 57765_PLUS)) { 10314 u32 tgtreg; 10315 10316 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10317 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10318 else 10319 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10320 10321 val = tr32(tgtreg); 10322 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10323 tg3_asic_rev(tp) == ASIC_REV_5762) { 10324 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10325 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10326 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10327 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10328 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10329 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10330 } 10331 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10332 } 10333 10334 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10335 tg3_asic_rev(tp) == ASIC_REV_5720 || 10336 tg3_asic_rev(tp) == ASIC_REV_5762) { 10337 u32 tgtreg; 10338 10339 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10340 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10341 else 10342 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10343 10344 val = tr32(tgtreg); 10345 tw32(tgtreg, val | 10346 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10347 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10348 } 10349 10350 /* Receive/send statistics. */ 10351 if (tg3_flag(tp, 5750_PLUS)) { 10352 val = tr32(RCVLPC_STATS_ENABLE); 10353 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10354 tw32(RCVLPC_STATS_ENABLE, val); 10355 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10356 tg3_flag(tp, TSO_CAPABLE)) { 10357 val = tr32(RCVLPC_STATS_ENABLE); 10358 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10359 tw32(RCVLPC_STATS_ENABLE, val); 10360 } else { 10361 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10362 } 10363 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10364 tw32(SNDDATAI_STATSENAB, 0xffffff); 10365 tw32(SNDDATAI_STATSCTRL, 10366 (SNDDATAI_SCTRL_ENABLE | 10367 SNDDATAI_SCTRL_FASTUPD)); 10368 10369 /* Setup host coalescing engine. */ 10370 tw32(HOSTCC_MODE, 0); 10371 for (i = 0; i < 2000; i++) { 10372 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10373 break; 10374 udelay(10); 10375 } 10376 10377 __tg3_set_coalesce(tp, &tp->coal); 10378 10379 if (!tg3_flag(tp, 5705_PLUS)) { 10380 /* Status/statistics block address. See tg3_timer, 10381 * the tg3_periodic_fetch_stats call there, and 10382 * tg3_get_stats to see how this works for 5705/5750 chips. 10383 */ 10384 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10385 ((u64) tp->stats_mapping >> 32)); 10386 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10387 ((u64) tp->stats_mapping & 0xffffffff)); 10388 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10389 10390 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10391 10392 /* Clear statistics and status block memory areas */ 10393 for (i = NIC_SRAM_STATS_BLK; 10394 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10395 i += sizeof(u32)) { 10396 tg3_write_mem(tp, i, 0); 10397 udelay(40); 10398 } 10399 } 10400 10401 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10402 10403 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10404 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10405 if (!tg3_flag(tp, 5705_PLUS)) 10406 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10407 10408 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10409 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10410 /* reset to prevent losing 1st rx packet intermittently */ 10411 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10412 udelay(10); 10413 } 10414 10415 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10416 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10417 MAC_MODE_FHDE_ENABLE; 10418 if (tg3_flag(tp, ENABLE_APE)) 10419 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10420 if (!tg3_flag(tp, 5705_PLUS) && 10421 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10422 tg3_asic_rev(tp) != ASIC_REV_5700) 10423 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10424 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10425 udelay(40); 10426 10427 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10428 * If TG3_FLAG_IS_NIC is zero, we should read the 10429 * register to preserve the GPIO settings for LOMs. The GPIOs, 10430 * whether used as inputs or outputs, are set by boot code after 10431 * reset. 10432 */ 10433 if (!tg3_flag(tp, IS_NIC)) { 10434 u32 gpio_mask; 10435 10436 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10437 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10438 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10439 10440 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10441 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10442 GRC_LCLCTRL_GPIO_OUTPUT3; 10443 10444 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10445 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10446 10447 tp->grc_local_ctrl &= ~gpio_mask; 10448 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10449 10450 /* GPIO1 must be driven high for eeprom write protect */ 10451 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10452 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10453 GRC_LCLCTRL_GPIO_OUTPUT1); 10454 } 10455 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10456 udelay(100); 10457 10458 if (tg3_flag(tp, USING_MSIX)) { 10459 val = tr32(MSGINT_MODE); 10460 val |= MSGINT_MODE_ENABLE; 10461 if (tp->irq_cnt > 1) 10462 val |= MSGINT_MODE_MULTIVEC_EN; 10463 if (!tg3_flag(tp, 1SHOT_MSI)) 10464 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10465 tw32(MSGINT_MODE, val); 10466 } 10467 10468 if (!tg3_flag(tp, 5705_PLUS)) { 10469 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10470 udelay(40); 10471 } 10472 10473 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10474 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10475 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10476 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10477 WDMAC_MODE_LNGREAD_ENAB); 10478 10479 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10480 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10481 if (tg3_flag(tp, TSO_CAPABLE) && 10482 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10483 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10484 /* nothing */ 10485 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10486 !tg3_flag(tp, IS_5788)) { 10487 val |= WDMAC_MODE_RX_ACCEL; 10488 } 10489 } 10490 10491 /* Enable host coalescing bug fix */ 10492 if (tg3_flag(tp, 5755_PLUS)) 10493 val |= WDMAC_MODE_STATUS_TAG_FIX; 10494 10495 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10496 val |= WDMAC_MODE_BURST_ALL_DATA; 10497 10498 tw32_f(WDMAC_MODE, val); 10499 udelay(40); 10500 10501 if (tg3_flag(tp, PCIX_MODE)) { 10502 u16 pcix_cmd; 10503 10504 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10505 &pcix_cmd); 10506 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10507 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10508 pcix_cmd |= PCI_X_CMD_READ_2K; 10509 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10510 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10511 pcix_cmd |= PCI_X_CMD_READ_2K; 10512 } 10513 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10514 pcix_cmd); 10515 } 10516 10517 tw32_f(RDMAC_MODE, rdmac_mode); 10518 udelay(40); 10519 10520 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10521 tg3_asic_rev(tp) == ASIC_REV_5720) { 10522 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10523 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10524 break; 10525 } 10526 if (i < TG3_NUM_RDMA_CHANNELS) { 10527 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10528 val |= tg3_lso_rd_dma_workaround_bit(tp); 10529 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10530 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10531 } 10532 } 10533 10534 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10535 if (!tg3_flag(tp, 5705_PLUS)) 10536 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10537 10538 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10539 tw32(SNDDATAC_MODE, 10540 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10541 else 10542 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10543 10544 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10545 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10546 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10547 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10548 val |= RCVDBDI_MODE_LRG_RING_SZ; 10549 tw32(RCVDBDI_MODE, val); 10550 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10551 if (tg3_flag(tp, HW_TSO_1) || 10552 tg3_flag(tp, HW_TSO_2) || 10553 tg3_flag(tp, HW_TSO_3)) 10554 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10555 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10556 if (tg3_flag(tp, ENABLE_TSS)) 10557 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10558 tw32(SNDBDI_MODE, val); 10559 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10560 10561 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10562 err = tg3_load_5701_a0_firmware_fix(tp); 10563 if (err) 10564 return err; 10565 } 10566 10567 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10568 /* Ignore any errors for the firmware download. If download 10569 * fails, the device will operate with EEE disabled 10570 */ 10571 tg3_load_57766_firmware(tp); 10572 } 10573 10574 if (tg3_flag(tp, TSO_CAPABLE)) { 10575 err = tg3_load_tso_firmware(tp); 10576 if (err) 10577 return err; 10578 } 10579 10580 tp->tx_mode = TX_MODE_ENABLE; 10581 10582 if (tg3_flag(tp, 5755_PLUS) || 10583 tg3_asic_rev(tp) == ASIC_REV_5906) 10584 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10585 10586 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10587 tg3_asic_rev(tp) == ASIC_REV_5762) { 10588 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10589 tp->tx_mode &= ~val; 10590 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10591 } 10592 10593 tw32_f(MAC_TX_MODE, tp->tx_mode); 10594 udelay(100); 10595 10596 if (tg3_flag(tp, ENABLE_RSS)) { 10597 u32 rss_key[10]; 10598 10599 tg3_rss_write_indir_tbl(tp); 10600 10601 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10602 10603 for (i = 0; i < 10 ; i++) 10604 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10605 } 10606 10607 tp->rx_mode = RX_MODE_ENABLE; 10608 if (tg3_flag(tp, 5755_PLUS)) 10609 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10610 10611 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10612 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10613 10614 if (tg3_flag(tp, ENABLE_RSS)) 10615 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10616 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10617 RX_MODE_RSS_IPV6_HASH_EN | 10618 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10619 RX_MODE_RSS_IPV4_HASH_EN | 10620 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10621 10622 tw32_f(MAC_RX_MODE, tp->rx_mode); 10623 udelay(10); 10624 10625 tw32(MAC_LED_CTRL, tp->led_ctrl); 10626 10627 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10628 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10629 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10630 udelay(10); 10631 } 10632 tw32_f(MAC_RX_MODE, tp->rx_mode); 10633 udelay(10); 10634 10635 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10636 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10637 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10638 /* Set drive transmission level to 1.2V */ 10639 /* only if the signal pre-emphasis bit is not set */ 10640 val = tr32(MAC_SERDES_CFG); 10641 val &= 0xfffff000; 10642 val |= 0x880; 10643 tw32(MAC_SERDES_CFG, val); 10644 } 10645 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10646 tw32(MAC_SERDES_CFG, 0x616000); 10647 } 10648 10649 /* Prevent chip from dropping frames when flow control 10650 * is enabled. 10651 */ 10652 if (tg3_flag(tp, 57765_CLASS)) 10653 val = 1; 10654 else 10655 val = 2; 10656 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10657 10658 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10659 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10660 /* Use hardware link auto-negotiation */ 10661 tg3_flag_set(tp, HW_AUTONEG); 10662 } 10663 10664 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10665 tg3_asic_rev(tp) == ASIC_REV_5714) { 10666 u32 tmp; 10667 10668 tmp = tr32(SERDES_RX_CTRL); 10669 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10670 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10671 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10672 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10673 } 10674 10675 if (!tg3_flag(tp, USE_PHYLIB)) { 10676 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10677 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10678 10679 err = tg3_setup_phy(tp, false); 10680 if (err) 10681 return err; 10682 10683 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10684 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10685 u32 tmp; 10686 10687 /* Clear CRC stats. */ 10688 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10689 tg3_writephy(tp, MII_TG3_TEST1, 10690 tmp | MII_TG3_TEST1_CRC_EN); 10691 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10692 } 10693 } 10694 } 10695 10696 __tg3_set_rx_mode(tp->dev); 10697 10698 /* Initialize receive rules. */ 10699 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10700 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10701 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10702 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10703 10704 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10705 limit = 8; 10706 else 10707 limit = 16; 10708 if (tg3_flag(tp, ENABLE_ASF)) 10709 limit -= 4; 10710 switch (limit) { 10711 case 16: 10712 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10713 fallthrough; 10714 case 15: 10715 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10716 fallthrough; 10717 case 14: 10718 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10719 fallthrough; 10720 case 13: 10721 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10722 fallthrough; 10723 case 12: 10724 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10725 fallthrough; 10726 case 11: 10727 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10728 fallthrough; 10729 case 10: 10730 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10731 fallthrough; 10732 case 9: 10733 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10734 fallthrough; 10735 case 8: 10736 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10737 fallthrough; 10738 case 7: 10739 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10740 fallthrough; 10741 case 6: 10742 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10743 fallthrough; 10744 case 5: 10745 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10746 fallthrough; 10747 case 4: 10748 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10749 case 3: 10750 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10751 case 2: 10752 case 1: 10753 10754 default: 10755 break; 10756 } 10757 10758 if (tg3_flag(tp, ENABLE_APE)) 10759 /* Write our heartbeat update interval to APE. */ 10760 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10761 APE_HOST_HEARTBEAT_INT_5SEC); 10762 10763 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10764 10765 return 0; 10766 } 10767 10768 /* Called at device open time to get the chip ready for 10769 * packet processing. Invoked with tp->lock held. 10770 */ 10771 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10772 { 10773 /* Chip may have been just powered on. If so, the boot code may still 10774 * be running initialization. Wait for it to finish to avoid races in 10775 * accessing the hardware. 10776 */ 10777 tg3_enable_register_access(tp); 10778 tg3_poll_fw(tp); 10779 10780 tg3_switch_clocks(tp); 10781 10782 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10783 10784 return tg3_reset_hw(tp, reset_phy); 10785 } 10786 10787 #ifdef CONFIG_TIGON3_HWMON 10788 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10789 { 10790 u32 off, len = TG3_OCIR_LEN; 10791 int i; 10792 10793 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) { 10794 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10795 10796 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10797 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10798 memset(ocir, 0, len); 10799 } 10800 } 10801 10802 /* sysfs attributes for hwmon */ 10803 static ssize_t tg3_show_temp(struct device *dev, 10804 struct device_attribute *devattr, char *buf) 10805 { 10806 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10807 struct tg3 *tp = dev_get_drvdata(dev); 10808 u32 temperature; 10809 10810 spin_lock_bh(&tp->lock); 10811 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10812 sizeof(temperature)); 10813 spin_unlock_bh(&tp->lock); 10814 return sprintf(buf, "%u\n", temperature * 1000); 10815 } 10816 10817 10818 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, 10819 TG3_TEMP_SENSOR_OFFSET); 10820 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, 10821 TG3_TEMP_CAUTION_OFFSET); 10822 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, 10823 TG3_TEMP_MAX_OFFSET); 10824 10825 static struct attribute *tg3_attrs[] = { 10826 &sensor_dev_attr_temp1_input.dev_attr.attr, 10827 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10828 &sensor_dev_attr_temp1_max.dev_attr.attr, 10829 NULL 10830 }; 10831 ATTRIBUTE_GROUPS(tg3); 10832 10833 static void tg3_hwmon_close(struct tg3 *tp) 10834 { 10835 if (tp->hwmon_dev) { 10836 hwmon_device_unregister(tp->hwmon_dev); 10837 tp->hwmon_dev = NULL; 10838 } 10839 } 10840 10841 static void tg3_hwmon_open(struct tg3 *tp) 10842 { 10843 int i; 10844 u32 size = 0; 10845 struct pci_dev *pdev = tp->pdev; 10846 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10847 10848 tg3_sd_scan_scratchpad(tp, ocirs); 10849 10850 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10851 if (!ocirs[i].src_data_length) 10852 continue; 10853 10854 size += ocirs[i].src_hdr_length; 10855 size += ocirs[i].src_data_length; 10856 } 10857 10858 if (!size) 10859 return; 10860 10861 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10862 tp, tg3_groups); 10863 if (IS_ERR(tp->hwmon_dev)) { 10864 tp->hwmon_dev = NULL; 10865 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10866 } 10867 } 10868 #else 10869 static inline void tg3_hwmon_close(struct tg3 *tp) { } 10870 static inline void tg3_hwmon_open(struct tg3 *tp) { } 10871 #endif /* CONFIG_TIGON3_HWMON */ 10872 10873 10874 #define TG3_STAT_ADD32(PSTAT, REG) \ 10875 do { u32 __val = tr32(REG); \ 10876 (PSTAT)->low += __val; \ 10877 if ((PSTAT)->low < __val) \ 10878 (PSTAT)->high += 1; \ 10879 } while (0) 10880 10881 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10882 { 10883 struct tg3_hw_stats *sp = tp->hw_stats; 10884 10885 if (!tp->link_up) 10886 return; 10887 10888 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10889 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10890 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10891 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10892 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10893 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10894 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10895 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10896 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10897 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10898 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10899 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10900 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10901 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10902 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10903 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10904 u32 val; 10905 10906 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10907 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10908 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10909 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10910 } 10911 10912 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10913 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10914 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10915 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 10916 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 10917 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 10918 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 10919 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 10920 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 10921 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 10922 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 10923 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 10924 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 10925 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 10926 10927 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 10928 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 10929 tg3_asic_rev(tp) != ASIC_REV_5762 && 10930 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 10931 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 10932 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 10933 } else { 10934 u32 val = tr32(HOSTCC_FLOW_ATTN); 10935 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 10936 if (val) { 10937 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 10938 sp->rx_discards.low += val; 10939 if (sp->rx_discards.low < val) 10940 sp->rx_discards.high += 1; 10941 } 10942 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 10943 } 10944 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 10945 } 10946 10947 static void tg3_chk_missed_msi(struct tg3 *tp) 10948 { 10949 u32 i; 10950 10951 for (i = 0; i < tp->irq_cnt; i++) { 10952 struct tg3_napi *tnapi = &tp->napi[i]; 10953 10954 if (tg3_has_work(tnapi)) { 10955 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 10956 tnapi->last_tx_cons == tnapi->tx_cons) { 10957 if (tnapi->chk_msi_cnt < 1) { 10958 tnapi->chk_msi_cnt++; 10959 return; 10960 } 10961 tg3_msi(0, tnapi); 10962 } 10963 } 10964 tnapi->chk_msi_cnt = 0; 10965 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 10966 tnapi->last_tx_cons = tnapi->tx_cons; 10967 } 10968 } 10969 10970 static void tg3_timer(struct timer_list *t) 10971 { 10972 struct tg3 *tp = from_timer(tp, t, timer); 10973 10974 spin_lock(&tp->lock); 10975 10976 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 10977 spin_unlock(&tp->lock); 10978 goto restart_timer; 10979 } 10980 10981 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10982 tg3_flag(tp, 57765_CLASS)) 10983 tg3_chk_missed_msi(tp); 10984 10985 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 10986 /* BCM4785: Flush posted writes from GbE to host memory. */ 10987 tr32(HOSTCC_MODE); 10988 } 10989 10990 if (!tg3_flag(tp, TAGGED_STATUS)) { 10991 /* All of this garbage is because when using non-tagged 10992 * IRQ status the mailbox/status_block protocol the chip 10993 * uses with the cpu is race prone. 10994 */ 10995 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 10996 tw32(GRC_LOCAL_CTRL, 10997 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 10998 } else { 10999 tw32(HOSTCC_MODE, tp->coalesce_mode | 11000 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 11001 } 11002 11003 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 11004 spin_unlock(&tp->lock); 11005 tg3_reset_task_schedule(tp); 11006 goto restart_timer; 11007 } 11008 } 11009 11010 /* This part only runs once per second. */ 11011 if (!--tp->timer_counter) { 11012 if (tg3_flag(tp, 5705_PLUS)) 11013 tg3_periodic_fetch_stats(tp); 11014 11015 if (tp->setlpicnt && !--tp->setlpicnt) 11016 tg3_phy_eee_enable(tp); 11017 11018 if (tg3_flag(tp, USE_LINKCHG_REG)) { 11019 u32 mac_stat; 11020 int phy_event; 11021 11022 mac_stat = tr32(MAC_STATUS); 11023 11024 phy_event = 0; 11025 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 11026 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 11027 phy_event = 1; 11028 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 11029 phy_event = 1; 11030 11031 if (phy_event) 11032 tg3_setup_phy(tp, false); 11033 } else if (tg3_flag(tp, POLL_SERDES)) { 11034 u32 mac_stat = tr32(MAC_STATUS); 11035 int need_setup = 0; 11036 11037 if (tp->link_up && 11038 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 11039 need_setup = 1; 11040 } 11041 if (!tp->link_up && 11042 (mac_stat & (MAC_STATUS_PCS_SYNCED | 11043 MAC_STATUS_SIGNAL_DET))) { 11044 need_setup = 1; 11045 } 11046 if (need_setup) { 11047 if (!tp->serdes_counter) { 11048 tw32_f(MAC_MODE, 11049 (tp->mac_mode & 11050 ~MAC_MODE_PORT_MODE_MASK)); 11051 udelay(40); 11052 tw32_f(MAC_MODE, tp->mac_mode); 11053 udelay(40); 11054 } 11055 tg3_setup_phy(tp, false); 11056 } 11057 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 11058 tg3_flag(tp, 5780_CLASS)) { 11059 tg3_serdes_parallel_detect(tp); 11060 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11061 u32 cpmu = tr32(TG3_CPMU_STATUS); 11062 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11063 TG3_CPMU_STATUS_LINK_MASK); 11064 11065 if (link_up != tp->link_up) 11066 tg3_setup_phy(tp, false); 11067 } 11068 11069 tp->timer_counter = tp->timer_multiplier; 11070 } 11071 11072 /* Heartbeat is only sent once every 2 seconds. 11073 * 11074 * The heartbeat is to tell the ASF firmware that the host 11075 * driver is still alive. In the event that the OS crashes, 11076 * ASF needs to reset the hardware to free up the FIFO space 11077 * that may be filled with rx packets destined for the host. 11078 * If the FIFO is full, ASF will no longer function properly. 11079 * 11080 * Unintended resets have been reported on real time kernels 11081 * where the timer doesn't run on time. Netpoll will also have 11082 * same problem. 11083 * 11084 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11085 * to check the ring condition when the heartbeat is expiring 11086 * before doing the reset. This will prevent most unintended 11087 * resets. 11088 */ 11089 if (!--tp->asf_counter) { 11090 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11091 tg3_wait_for_event_ack(tp); 11092 11093 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11094 FWCMD_NICDRV_ALIVE3); 11095 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11096 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11097 TG3_FW_UPDATE_TIMEOUT_SEC); 11098 11099 tg3_generate_fw_event(tp); 11100 } 11101 tp->asf_counter = tp->asf_multiplier; 11102 } 11103 11104 /* Update the APE heartbeat every 5 seconds.*/ 11105 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); 11106 11107 spin_unlock(&tp->lock); 11108 11109 restart_timer: 11110 tp->timer.expires = jiffies + tp->timer_offset; 11111 add_timer(&tp->timer); 11112 } 11113 11114 static void tg3_timer_init(struct tg3 *tp) 11115 { 11116 if (tg3_flag(tp, TAGGED_STATUS) && 11117 tg3_asic_rev(tp) != ASIC_REV_5717 && 11118 !tg3_flag(tp, 57765_CLASS)) 11119 tp->timer_offset = HZ; 11120 else 11121 tp->timer_offset = HZ / 10; 11122 11123 BUG_ON(tp->timer_offset > HZ); 11124 11125 tp->timer_multiplier = (HZ / tp->timer_offset); 11126 tp->asf_multiplier = (HZ / tp->timer_offset) * 11127 TG3_FW_UPDATE_FREQ_SEC; 11128 11129 timer_setup(&tp->timer, tg3_timer, 0); 11130 } 11131 11132 static void tg3_timer_start(struct tg3 *tp) 11133 { 11134 tp->asf_counter = tp->asf_multiplier; 11135 tp->timer_counter = tp->timer_multiplier; 11136 11137 tp->timer.expires = jiffies + tp->timer_offset; 11138 add_timer(&tp->timer); 11139 } 11140 11141 static void tg3_timer_stop(struct tg3 *tp) 11142 { 11143 del_timer_sync(&tp->timer); 11144 } 11145 11146 /* Restart hardware after configuration changes, self-test, etc. 11147 * Invoked with tp->lock held. 11148 */ 11149 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11150 __releases(tp->lock) 11151 __acquires(tp->lock) 11152 { 11153 int err; 11154 11155 err = tg3_init_hw(tp, reset_phy); 11156 if (err) { 11157 netdev_err(tp->dev, 11158 "Failed to re-initialize device, aborting\n"); 11159 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11160 tg3_full_unlock(tp); 11161 tg3_timer_stop(tp); 11162 tp->irq_sync = 0; 11163 tg3_napi_enable(tp); 11164 dev_close(tp->dev); 11165 tg3_full_lock(tp, 0); 11166 } 11167 return err; 11168 } 11169 11170 static void tg3_reset_task(struct work_struct *work) 11171 { 11172 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11173 int err; 11174 11175 rtnl_lock(); 11176 tg3_full_lock(tp, 0); 11177 11178 if (!netif_running(tp->dev)) { 11179 tg3_flag_clear(tp, RESET_TASK_PENDING); 11180 tg3_full_unlock(tp); 11181 rtnl_unlock(); 11182 return; 11183 } 11184 11185 tg3_full_unlock(tp); 11186 11187 tg3_phy_stop(tp); 11188 11189 tg3_netif_stop(tp); 11190 11191 tg3_full_lock(tp, 1); 11192 11193 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11194 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11195 tp->write32_rx_mbox = tg3_write_flush_reg32; 11196 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11197 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11198 } 11199 11200 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11201 err = tg3_init_hw(tp, true); 11202 if (err) { 11203 tg3_full_unlock(tp); 11204 tp->irq_sync = 0; 11205 tg3_napi_enable(tp); 11206 /* Clear this flag so that tg3_reset_task_cancel() will not 11207 * call cancel_work_sync() and wait forever. 11208 */ 11209 tg3_flag_clear(tp, RESET_TASK_PENDING); 11210 dev_close(tp->dev); 11211 goto out; 11212 } 11213 11214 tg3_netif_start(tp); 11215 tg3_full_unlock(tp); 11216 tg3_phy_start(tp); 11217 tg3_flag_clear(tp, RESET_TASK_PENDING); 11218 out: 11219 rtnl_unlock(); 11220 } 11221 11222 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11223 { 11224 irq_handler_t fn; 11225 unsigned long flags; 11226 char *name; 11227 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11228 11229 if (tp->irq_cnt == 1) 11230 name = tp->dev->name; 11231 else { 11232 name = &tnapi->irq_lbl[0]; 11233 if (tnapi->tx_buffers && tnapi->rx_rcb) 11234 snprintf(name, IFNAMSIZ, 11235 "%s-txrx-%d", tp->dev->name, irq_num); 11236 else if (tnapi->tx_buffers) 11237 snprintf(name, IFNAMSIZ, 11238 "%s-tx-%d", tp->dev->name, irq_num); 11239 else if (tnapi->rx_rcb) 11240 snprintf(name, IFNAMSIZ, 11241 "%s-rx-%d", tp->dev->name, irq_num); 11242 else 11243 snprintf(name, IFNAMSIZ, 11244 "%s-%d", tp->dev->name, irq_num); 11245 name[IFNAMSIZ-1] = 0; 11246 } 11247 11248 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11249 fn = tg3_msi; 11250 if (tg3_flag(tp, 1SHOT_MSI)) 11251 fn = tg3_msi_1shot; 11252 flags = 0; 11253 } else { 11254 fn = tg3_interrupt; 11255 if (tg3_flag(tp, TAGGED_STATUS)) 11256 fn = tg3_interrupt_tagged; 11257 flags = IRQF_SHARED; 11258 } 11259 11260 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11261 } 11262 11263 static int tg3_test_interrupt(struct tg3 *tp) 11264 { 11265 struct tg3_napi *tnapi = &tp->napi[0]; 11266 struct net_device *dev = tp->dev; 11267 int err, i, intr_ok = 0; 11268 u32 val; 11269 11270 if (!netif_running(dev)) 11271 return -ENODEV; 11272 11273 tg3_disable_ints(tp); 11274 11275 free_irq(tnapi->irq_vec, tnapi); 11276 11277 /* 11278 * Turn off MSI one shot mode. Otherwise this test has no 11279 * observable way to know whether the interrupt was delivered. 11280 */ 11281 if (tg3_flag(tp, 57765_PLUS)) { 11282 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11283 tw32(MSGINT_MODE, val); 11284 } 11285 11286 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11287 IRQF_SHARED, dev->name, tnapi); 11288 if (err) 11289 return err; 11290 11291 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11292 tg3_enable_ints(tp); 11293 11294 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11295 tnapi->coal_now); 11296 11297 for (i = 0; i < 5; i++) { 11298 u32 int_mbox, misc_host_ctrl; 11299 11300 int_mbox = tr32_mailbox(tnapi->int_mbox); 11301 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11302 11303 if ((int_mbox != 0) || 11304 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11305 intr_ok = 1; 11306 break; 11307 } 11308 11309 if (tg3_flag(tp, 57765_PLUS) && 11310 tnapi->hw_status->status_tag != tnapi->last_tag) 11311 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11312 11313 msleep(10); 11314 } 11315 11316 tg3_disable_ints(tp); 11317 11318 free_irq(tnapi->irq_vec, tnapi); 11319 11320 err = tg3_request_irq(tp, 0); 11321 11322 if (err) 11323 return err; 11324 11325 if (intr_ok) { 11326 /* Reenable MSI one shot mode. */ 11327 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11328 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11329 tw32(MSGINT_MODE, val); 11330 } 11331 return 0; 11332 } 11333 11334 return -EIO; 11335 } 11336 11337 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11338 * successfully restored 11339 */ 11340 static int tg3_test_msi(struct tg3 *tp) 11341 { 11342 int err; 11343 u16 pci_cmd; 11344 11345 if (!tg3_flag(tp, USING_MSI)) 11346 return 0; 11347 11348 /* Turn off SERR reporting in case MSI terminates with Master 11349 * Abort. 11350 */ 11351 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11352 pci_write_config_word(tp->pdev, PCI_COMMAND, 11353 pci_cmd & ~PCI_COMMAND_SERR); 11354 11355 err = tg3_test_interrupt(tp); 11356 11357 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11358 11359 if (!err) 11360 return 0; 11361 11362 /* other failures */ 11363 if (err != -EIO) 11364 return err; 11365 11366 /* MSI test failed, go back to INTx mode */ 11367 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11368 "to INTx mode. Please report this failure to the PCI " 11369 "maintainer and include system chipset information\n"); 11370 11371 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11372 11373 pci_disable_msi(tp->pdev); 11374 11375 tg3_flag_clear(tp, USING_MSI); 11376 tp->napi[0].irq_vec = tp->pdev->irq; 11377 11378 err = tg3_request_irq(tp, 0); 11379 if (err) 11380 return err; 11381 11382 /* Need to reset the chip because the MSI cycle may have terminated 11383 * with Master Abort. 11384 */ 11385 tg3_full_lock(tp, 1); 11386 11387 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11388 err = tg3_init_hw(tp, true); 11389 11390 tg3_full_unlock(tp); 11391 11392 if (err) 11393 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11394 11395 return err; 11396 } 11397 11398 static int tg3_request_firmware(struct tg3 *tp) 11399 { 11400 const struct tg3_firmware_hdr *fw_hdr; 11401 11402 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11403 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11404 tp->fw_needed); 11405 return -ENOENT; 11406 } 11407 11408 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11409 11410 /* Firmware blob starts with version numbers, followed by 11411 * start address and _full_ length including BSS sections 11412 * (which must be longer than the actual data, of course 11413 */ 11414 11415 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11416 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11417 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11418 tp->fw_len, tp->fw_needed); 11419 release_firmware(tp->fw); 11420 tp->fw = NULL; 11421 return -EINVAL; 11422 } 11423 11424 /* We no longer need firmware; we have it. */ 11425 tp->fw_needed = NULL; 11426 return 0; 11427 } 11428 11429 static u32 tg3_irq_count(struct tg3 *tp) 11430 { 11431 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11432 11433 if (irq_cnt > 1) { 11434 /* We want as many rx rings enabled as there are cpus. 11435 * In multiqueue MSI-X mode, the first MSI-X vector 11436 * only deals with link interrupts, etc, so we add 11437 * one to the number of vectors we are requesting. 11438 */ 11439 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11440 } 11441 11442 return irq_cnt; 11443 } 11444 11445 static bool tg3_enable_msix(struct tg3 *tp) 11446 { 11447 int i, rc; 11448 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11449 11450 tp->txq_cnt = tp->txq_req; 11451 tp->rxq_cnt = tp->rxq_req; 11452 if (!tp->rxq_cnt) 11453 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11454 if (tp->rxq_cnt > tp->rxq_max) 11455 tp->rxq_cnt = tp->rxq_max; 11456 11457 /* Disable multiple TX rings by default. Simple round-robin hardware 11458 * scheduling of the TX rings can cause starvation of rings with 11459 * small packets when other rings have TSO or jumbo packets. 11460 */ 11461 if (!tp->txq_req) 11462 tp->txq_cnt = 1; 11463 11464 tp->irq_cnt = tg3_irq_count(tp); 11465 11466 for (i = 0; i < tp->irq_max; i++) { 11467 msix_ent[i].entry = i; 11468 msix_ent[i].vector = 0; 11469 } 11470 11471 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11472 if (rc < 0) { 11473 return false; 11474 } else if (rc < tp->irq_cnt) { 11475 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11476 tp->irq_cnt, rc); 11477 tp->irq_cnt = rc; 11478 tp->rxq_cnt = max(rc - 1, 1); 11479 if (tp->txq_cnt) 11480 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11481 } 11482 11483 for (i = 0; i < tp->irq_max; i++) 11484 tp->napi[i].irq_vec = msix_ent[i].vector; 11485 11486 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11487 pci_disable_msix(tp->pdev); 11488 return false; 11489 } 11490 11491 if (tp->irq_cnt == 1) 11492 return true; 11493 11494 tg3_flag_set(tp, ENABLE_RSS); 11495 11496 if (tp->txq_cnt > 1) 11497 tg3_flag_set(tp, ENABLE_TSS); 11498 11499 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11500 11501 return true; 11502 } 11503 11504 static void tg3_ints_init(struct tg3 *tp) 11505 { 11506 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11507 !tg3_flag(tp, TAGGED_STATUS)) { 11508 /* All MSI supporting chips should support tagged 11509 * status. Assert that this is the case. 11510 */ 11511 netdev_warn(tp->dev, 11512 "MSI without TAGGED_STATUS? Not using MSI\n"); 11513 goto defcfg; 11514 } 11515 11516 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11517 tg3_flag_set(tp, USING_MSIX); 11518 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11519 tg3_flag_set(tp, USING_MSI); 11520 11521 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11522 u32 msi_mode = tr32(MSGINT_MODE); 11523 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11524 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11525 if (!tg3_flag(tp, 1SHOT_MSI)) 11526 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11527 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11528 } 11529 defcfg: 11530 if (!tg3_flag(tp, USING_MSIX)) { 11531 tp->irq_cnt = 1; 11532 tp->napi[0].irq_vec = tp->pdev->irq; 11533 } 11534 11535 if (tp->irq_cnt == 1) { 11536 tp->txq_cnt = 1; 11537 tp->rxq_cnt = 1; 11538 netif_set_real_num_tx_queues(tp->dev, 1); 11539 netif_set_real_num_rx_queues(tp->dev, 1); 11540 } 11541 } 11542 11543 static void tg3_ints_fini(struct tg3 *tp) 11544 { 11545 if (tg3_flag(tp, USING_MSIX)) 11546 pci_disable_msix(tp->pdev); 11547 else if (tg3_flag(tp, USING_MSI)) 11548 pci_disable_msi(tp->pdev); 11549 tg3_flag_clear(tp, USING_MSI); 11550 tg3_flag_clear(tp, USING_MSIX); 11551 tg3_flag_clear(tp, ENABLE_RSS); 11552 tg3_flag_clear(tp, ENABLE_TSS); 11553 } 11554 11555 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11556 bool init) 11557 { 11558 struct net_device *dev = tp->dev; 11559 int i, err; 11560 11561 /* 11562 * Setup interrupts first so we know how 11563 * many NAPI resources to allocate 11564 */ 11565 tg3_ints_init(tp); 11566 11567 tg3_rss_check_indir_tbl(tp); 11568 11569 /* The placement of this call is tied 11570 * to the setup and use of Host TX descriptors. 11571 */ 11572 err = tg3_alloc_consistent(tp); 11573 if (err) 11574 goto out_ints_fini; 11575 11576 tg3_napi_init(tp); 11577 11578 tg3_napi_enable(tp); 11579 11580 for (i = 0; i < tp->irq_cnt; i++) { 11581 err = tg3_request_irq(tp, i); 11582 if (err) { 11583 for (i--; i >= 0; i--) { 11584 struct tg3_napi *tnapi = &tp->napi[i]; 11585 11586 free_irq(tnapi->irq_vec, tnapi); 11587 } 11588 goto out_napi_fini; 11589 } 11590 } 11591 11592 tg3_full_lock(tp, 0); 11593 11594 if (init) 11595 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11596 11597 err = tg3_init_hw(tp, reset_phy); 11598 if (err) { 11599 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11600 tg3_free_rings(tp); 11601 } 11602 11603 tg3_full_unlock(tp); 11604 11605 if (err) 11606 goto out_free_irq; 11607 11608 if (test_irq && tg3_flag(tp, USING_MSI)) { 11609 err = tg3_test_msi(tp); 11610 11611 if (err) { 11612 tg3_full_lock(tp, 0); 11613 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11614 tg3_free_rings(tp); 11615 tg3_full_unlock(tp); 11616 11617 goto out_napi_fini; 11618 } 11619 11620 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11621 u32 val = tr32(PCIE_TRANSACTION_CFG); 11622 11623 tw32(PCIE_TRANSACTION_CFG, 11624 val | PCIE_TRANS_CFG_1SHOT_MSI); 11625 } 11626 } 11627 11628 tg3_phy_start(tp); 11629 11630 tg3_hwmon_open(tp); 11631 11632 tg3_full_lock(tp, 0); 11633 11634 tg3_timer_start(tp); 11635 tg3_flag_set(tp, INIT_COMPLETE); 11636 tg3_enable_ints(tp); 11637 11638 tg3_ptp_resume(tp); 11639 11640 tg3_full_unlock(tp); 11641 11642 netif_tx_start_all_queues(dev); 11643 11644 /* 11645 * Reset loopback feature if it was turned on while the device was down 11646 * make sure that it's installed properly now. 11647 */ 11648 if (dev->features & NETIF_F_LOOPBACK) 11649 tg3_set_loopback(dev, dev->features); 11650 11651 return 0; 11652 11653 out_free_irq: 11654 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11655 struct tg3_napi *tnapi = &tp->napi[i]; 11656 free_irq(tnapi->irq_vec, tnapi); 11657 } 11658 11659 out_napi_fini: 11660 tg3_napi_disable(tp); 11661 tg3_napi_fini(tp); 11662 tg3_free_consistent(tp); 11663 11664 out_ints_fini: 11665 tg3_ints_fini(tp); 11666 11667 return err; 11668 } 11669 11670 static void tg3_stop(struct tg3 *tp) 11671 { 11672 int i; 11673 11674 tg3_reset_task_cancel(tp); 11675 tg3_netif_stop(tp); 11676 11677 tg3_timer_stop(tp); 11678 11679 tg3_hwmon_close(tp); 11680 11681 tg3_phy_stop(tp); 11682 11683 tg3_full_lock(tp, 1); 11684 11685 tg3_disable_ints(tp); 11686 11687 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11688 tg3_free_rings(tp); 11689 tg3_flag_clear(tp, INIT_COMPLETE); 11690 11691 tg3_full_unlock(tp); 11692 11693 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11694 struct tg3_napi *tnapi = &tp->napi[i]; 11695 free_irq(tnapi->irq_vec, tnapi); 11696 } 11697 11698 tg3_ints_fini(tp); 11699 11700 tg3_napi_fini(tp); 11701 11702 tg3_free_consistent(tp); 11703 } 11704 11705 static int tg3_open(struct net_device *dev) 11706 { 11707 struct tg3 *tp = netdev_priv(dev); 11708 int err; 11709 11710 if (tp->pcierr_recovery) { 11711 netdev_err(dev, "Failed to open device. PCI error recovery " 11712 "in progress\n"); 11713 return -EAGAIN; 11714 } 11715 11716 if (tp->fw_needed) { 11717 err = tg3_request_firmware(tp); 11718 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11719 if (err) { 11720 netdev_warn(tp->dev, "EEE capability disabled\n"); 11721 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11722 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11723 netdev_warn(tp->dev, "EEE capability restored\n"); 11724 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11725 } 11726 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11727 if (err) 11728 return err; 11729 } else if (err) { 11730 netdev_warn(tp->dev, "TSO capability disabled\n"); 11731 tg3_flag_clear(tp, TSO_CAPABLE); 11732 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11733 netdev_notice(tp->dev, "TSO capability restored\n"); 11734 tg3_flag_set(tp, TSO_CAPABLE); 11735 } 11736 } 11737 11738 tg3_carrier_off(tp); 11739 11740 err = tg3_power_up(tp); 11741 if (err) 11742 return err; 11743 11744 tg3_full_lock(tp, 0); 11745 11746 tg3_disable_ints(tp); 11747 tg3_flag_clear(tp, INIT_COMPLETE); 11748 11749 tg3_full_unlock(tp); 11750 11751 err = tg3_start(tp, 11752 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11753 true, true); 11754 if (err) { 11755 tg3_frob_aux_power(tp, false); 11756 pci_set_power_state(tp->pdev, PCI_D3hot); 11757 } 11758 11759 return err; 11760 } 11761 11762 static int tg3_close(struct net_device *dev) 11763 { 11764 struct tg3 *tp = netdev_priv(dev); 11765 11766 if (tp->pcierr_recovery) { 11767 netdev_err(dev, "Failed to close device. PCI error recovery " 11768 "in progress\n"); 11769 return -EAGAIN; 11770 } 11771 11772 tg3_stop(tp); 11773 11774 if (pci_device_is_present(tp->pdev)) { 11775 tg3_power_down_prepare(tp); 11776 11777 tg3_carrier_off(tp); 11778 } 11779 return 0; 11780 } 11781 11782 static inline u64 get_stat64(tg3_stat64_t *val) 11783 { 11784 return ((u64)val->high << 32) | ((u64)val->low); 11785 } 11786 11787 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11788 { 11789 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11790 11791 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11792 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11793 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11794 u32 val; 11795 11796 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11797 tg3_writephy(tp, MII_TG3_TEST1, 11798 val | MII_TG3_TEST1_CRC_EN); 11799 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11800 } else 11801 val = 0; 11802 11803 tp->phy_crc_errors += val; 11804 11805 return tp->phy_crc_errors; 11806 } 11807 11808 return get_stat64(&hw_stats->rx_fcs_errors); 11809 } 11810 11811 #define ESTAT_ADD(member) \ 11812 estats->member = old_estats->member + \ 11813 get_stat64(&hw_stats->member) 11814 11815 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11816 { 11817 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11818 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11819 11820 ESTAT_ADD(rx_octets); 11821 ESTAT_ADD(rx_fragments); 11822 ESTAT_ADD(rx_ucast_packets); 11823 ESTAT_ADD(rx_mcast_packets); 11824 ESTAT_ADD(rx_bcast_packets); 11825 ESTAT_ADD(rx_fcs_errors); 11826 ESTAT_ADD(rx_align_errors); 11827 ESTAT_ADD(rx_xon_pause_rcvd); 11828 ESTAT_ADD(rx_xoff_pause_rcvd); 11829 ESTAT_ADD(rx_mac_ctrl_rcvd); 11830 ESTAT_ADD(rx_xoff_entered); 11831 ESTAT_ADD(rx_frame_too_long_errors); 11832 ESTAT_ADD(rx_jabbers); 11833 ESTAT_ADD(rx_undersize_packets); 11834 ESTAT_ADD(rx_in_length_errors); 11835 ESTAT_ADD(rx_out_length_errors); 11836 ESTAT_ADD(rx_64_or_less_octet_packets); 11837 ESTAT_ADD(rx_65_to_127_octet_packets); 11838 ESTAT_ADD(rx_128_to_255_octet_packets); 11839 ESTAT_ADD(rx_256_to_511_octet_packets); 11840 ESTAT_ADD(rx_512_to_1023_octet_packets); 11841 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11842 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11843 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11844 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11845 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11846 11847 ESTAT_ADD(tx_octets); 11848 ESTAT_ADD(tx_collisions); 11849 ESTAT_ADD(tx_xon_sent); 11850 ESTAT_ADD(tx_xoff_sent); 11851 ESTAT_ADD(tx_flow_control); 11852 ESTAT_ADD(tx_mac_errors); 11853 ESTAT_ADD(tx_single_collisions); 11854 ESTAT_ADD(tx_mult_collisions); 11855 ESTAT_ADD(tx_deferred); 11856 ESTAT_ADD(tx_excessive_collisions); 11857 ESTAT_ADD(tx_late_collisions); 11858 ESTAT_ADD(tx_collide_2times); 11859 ESTAT_ADD(tx_collide_3times); 11860 ESTAT_ADD(tx_collide_4times); 11861 ESTAT_ADD(tx_collide_5times); 11862 ESTAT_ADD(tx_collide_6times); 11863 ESTAT_ADD(tx_collide_7times); 11864 ESTAT_ADD(tx_collide_8times); 11865 ESTAT_ADD(tx_collide_9times); 11866 ESTAT_ADD(tx_collide_10times); 11867 ESTAT_ADD(tx_collide_11times); 11868 ESTAT_ADD(tx_collide_12times); 11869 ESTAT_ADD(tx_collide_13times); 11870 ESTAT_ADD(tx_collide_14times); 11871 ESTAT_ADD(tx_collide_15times); 11872 ESTAT_ADD(tx_ucast_packets); 11873 ESTAT_ADD(tx_mcast_packets); 11874 ESTAT_ADD(tx_bcast_packets); 11875 ESTAT_ADD(tx_carrier_sense_errors); 11876 ESTAT_ADD(tx_discards); 11877 ESTAT_ADD(tx_errors); 11878 11879 ESTAT_ADD(dma_writeq_full); 11880 ESTAT_ADD(dma_write_prioq_full); 11881 ESTAT_ADD(rxbds_empty); 11882 ESTAT_ADD(rx_discards); 11883 ESTAT_ADD(rx_errors); 11884 ESTAT_ADD(rx_threshold_hit); 11885 11886 ESTAT_ADD(dma_readq_full); 11887 ESTAT_ADD(dma_read_prioq_full); 11888 ESTAT_ADD(tx_comp_queue_full); 11889 11890 ESTAT_ADD(ring_set_send_prod_index); 11891 ESTAT_ADD(ring_status_update); 11892 ESTAT_ADD(nic_irqs); 11893 ESTAT_ADD(nic_avoided_irqs); 11894 ESTAT_ADD(nic_tx_threshold_hit); 11895 11896 ESTAT_ADD(mbuf_lwm_thresh_hit); 11897 } 11898 11899 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11900 { 11901 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11902 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11903 11904 stats->rx_packets = old_stats->rx_packets + 11905 get_stat64(&hw_stats->rx_ucast_packets) + 11906 get_stat64(&hw_stats->rx_mcast_packets) + 11907 get_stat64(&hw_stats->rx_bcast_packets); 11908 11909 stats->tx_packets = old_stats->tx_packets + 11910 get_stat64(&hw_stats->tx_ucast_packets) + 11911 get_stat64(&hw_stats->tx_mcast_packets) + 11912 get_stat64(&hw_stats->tx_bcast_packets); 11913 11914 stats->rx_bytes = old_stats->rx_bytes + 11915 get_stat64(&hw_stats->rx_octets); 11916 stats->tx_bytes = old_stats->tx_bytes + 11917 get_stat64(&hw_stats->tx_octets); 11918 11919 stats->rx_errors = old_stats->rx_errors + 11920 get_stat64(&hw_stats->rx_errors); 11921 stats->tx_errors = old_stats->tx_errors + 11922 get_stat64(&hw_stats->tx_errors) + 11923 get_stat64(&hw_stats->tx_mac_errors) + 11924 get_stat64(&hw_stats->tx_carrier_sense_errors) + 11925 get_stat64(&hw_stats->tx_discards); 11926 11927 stats->multicast = old_stats->multicast + 11928 get_stat64(&hw_stats->rx_mcast_packets); 11929 stats->collisions = old_stats->collisions + 11930 get_stat64(&hw_stats->tx_collisions); 11931 11932 stats->rx_length_errors = old_stats->rx_length_errors + 11933 get_stat64(&hw_stats->rx_frame_too_long_errors) + 11934 get_stat64(&hw_stats->rx_undersize_packets); 11935 11936 stats->rx_frame_errors = old_stats->rx_frame_errors + 11937 get_stat64(&hw_stats->rx_align_errors); 11938 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 11939 get_stat64(&hw_stats->tx_discards); 11940 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 11941 get_stat64(&hw_stats->tx_carrier_sense_errors); 11942 11943 stats->rx_crc_errors = old_stats->rx_crc_errors + 11944 tg3_calc_crc_errors(tp); 11945 11946 stats->rx_missed_errors = old_stats->rx_missed_errors + 11947 get_stat64(&hw_stats->rx_discards); 11948 11949 stats->rx_dropped = tp->rx_dropped; 11950 stats->tx_dropped = tp->tx_dropped; 11951 } 11952 11953 static int tg3_get_regs_len(struct net_device *dev) 11954 { 11955 return TG3_REG_BLK_SIZE; 11956 } 11957 11958 static void tg3_get_regs(struct net_device *dev, 11959 struct ethtool_regs *regs, void *_p) 11960 { 11961 struct tg3 *tp = netdev_priv(dev); 11962 11963 regs->version = 0; 11964 11965 memset(_p, 0, TG3_REG_BLK_SIZE); 11966 11967 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11968 return; 11969 11970 tg3_full_lock(tp, 0); 11971 11972 tg3_dump_legacy_regs(tp, (u32 *)_p); 11973 11974 tg3_full_unlock(tp); 11975 } 11976 11977 static int tg3_get_eeprom_len(struct net_device *dev) 11978 { 11979 struct tg3 *tp = netdev_priv(dev); 11980 11981 return tp->nvram_size; 11982 } 11983 11984 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 11985 { 11986 struct tg3 *tp = netdev_priv(dev); 11987 int ret, cpmu_restore = 0; 11988 u8 *pd; 11989 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 11990 __be32 val; 11991 11992 if (tg3_flag(tp, NO_NVRAM)) 11993 return -EINVAL; 11994 11995 offset = eeprom->offset; 11996 len = eeprom->len; 11997 eeprom->len = 0; 11998 11999 eeprom->magic = TG3_EEPROM_MAGIC; 12000 12001 /* Override clock, link aware and link idle modes */ 12002 if (tg3_flag(tp, CPMU_PRESENT)) { 12003 cpmu_val = tr32(TG3_CPMU_CTRL); 12004 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 12005 CPMU_CTRL_LINK_IDLE_MODE)) { 12006 tw32(TG3_CPMU_CTRL, cpmu_val & 12007 ~(CPMU_CTRL_LINK_AWARE_MODE | 12008 CPMU_CTRL_LINK_IDLE_MODE)); 12009 cpmu_restore = 1; 12010 } 12011 } 12012 tg3_override_clk(tp); 12013 12014 if (offset & 3) { 12015 /* adjustments to start on required 4 byte boundary */ 12016 b_offset = offset & 3; 12017 b_count = 4 - b_offset; 12018 if (b_count > len) { 12019 /* i.e. offset=1 len=2 */ 12020 b_count = len; 12021 } 12022 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 12023 if (ret) 12024 goto eeprom_done; 12025 memcpy(data, ((char *)&val) + b_offset, b_count); 12026 len -= b_count; 12027 offset += b_count; 12028 eeprom->len += b_count; 12029 } 12030 12031 /* read bytes up to the last 4 byte boundary */ 12032 pd = &data[eeprom->len]; 12033 for (i = 0; i < (len - (len & 3)); i += 4) { 12034 ret = tg3_nvram_read_be32(tp, offset + i, &val); 12035 if (ret) { 12036 if (i) 12037 i -= 4; 12038 eeprom->len += i; 12039 goto eeprom_done; 12040 } 12041 memcpy(pd + i, &val, 4); 12042 if (need_resched()) { 12043 if (signal_pending(current)) { 12044 eeprom->len += i; 12045 ret = -EINTR; 12046 goto eeprom_done; 12047 } 12048 cond_resched(); 12049 } 12050 } 12051 eeprom->len += i; 12052 12053 if (len & 3) { 12054 /* read last bytes not ending on 4 byte boundary */ 12055 pd = &data[eeprom->len]; 12056 b_count = len & 3; 12057 b_offset = offset + len - b_count; 12058 ret = tg3_nvram_read_be32(tp, b_offset, &val); 12059 if (ret) 12060 goto eeprom_done; 12061 memcpy(pd, &val, b_count); 12062 eeprom->len += b_count; 12063 } 12064 ret = 0; 12065 12066 eeprom_done: 12067 /* Restore clock, link aware and link idle modes */ 12068 tg3_restore_clk(tp); 12069 if (cpmu_restore) 12070 tw32(TG3_CPMU_CTRL, cpmu_val); 12071 12072 return ret; 12073 } 12074 12075 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12076 { 12077 struct tg3 *tp = netdev_priv(dev); 12078 int ret; 12079 u32 offset, len, b_offset, odd_len; 12080 u8 *buf; 12081 __be32 start = 0, end; 12082 12083 if (tg3_flag(tp, NO_NVRAM) || 12084 eeprom->magic != TG3_EEPROM_MAGIC) 12085 return -EINVAL; 12086 12087 offset = eeprom->offset; 12088 len = eeprom->len; 12089 12090 if ((b_offset = (offset & 3))) { 12091 /* adjustments to start on required 4 byte boundary */ 12092 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12093 if (ret) 12094 return ret; 12095 len += b_offset; 12096 offset &= ~3; 12097 if (len < 4) 12098 len = 4; 12099 } 12100 12101 odd_len = 0; 12102 if (len & 3) { 12103 /* adjustments to end on required 4 byte boundary */ 12104 odd_len = 1; 12105 len = (len + 3) & ~3; 12106 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12107 if (ret) 12108 return ret; 12109 } 12110 12111 buf = data; 12112 if (b_offset || odd_len) { 12113 buf = kmalloc(len, GFP_KERNEL); 12114 if (!buf) 12115 return -ENOMEM; 12116 if (b_offset) 12117 memcpy(buf, &start, 4); 12118 if (odd_len) 12119 memcpy(buf+len-4, &end, 4); 12120 memcpy(buf + b_offset, data, eeprom->len); 12121 } 12122 12123 ret = tg3_nvram_write_block(tp, offset, len, buf); 12124 12125 if (buf != data) 12126 kfree(buf); 12127 12128 return ret; 12129 } 12130 12131 static int tg3_get_link_ksettings(struct net_device *dev, 12132 struct ethtool_link_ksettings *cmd) 12133 { 12134 struct tg3 *tp = netdev_priv(dev); 12135 u32 supported, advertising; 12136 12137 if (tg3_flag(tp, USE_PHYLIB)) { 12138 struct phy_device *phydev; 12139 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12140 return -EAGAIN; 12141 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12142 phy_ethtool_ksettings_get(phydev, cmd); 12143 12144 return 0; 12145 } 12146 12147 supported = (SUPPORTED_Autoneg); 12148 12149 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12150 supported |= (SUPPORTED_1000baseT_Half | 12151 SUPPORTED_1000baseT_Full); 12152 12153 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12154 supported |= (SUPPORTED_100baseT_Half | 12155 SUPPORTED_100baseT_Full | 12156 SUPPORTED_10baseT_Half | 12157 SUPPORTED_10baseT_Full | 12158 SUPPORTED_TP); 12159 cmd->base.port = PORT_TP; 12160 } else { 12161 supported |= SUPPORTED_FIBRE; 12162 cmd->base.port = PORT_FIBRE; 12163 } 12164 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 12165 supported); 12166 12167 advertising = tp->link_config.advertising; 12168 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12169 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12170 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12171 advertising |= ADVERTISED_Pause; 12172 } else { 12173 advertising |= ADVERTISED_Pause | 12174 ADVERTISED_Asym_Pause; 12175 } 12176 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12177 advertising |= ADVERTISED_Asym_Pause; 12178 } 12179 } 12180 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 12181 advertising); 12182 12183 if (netif_running(dev) && tp->link_up) { 12184 cmd->base.speed = tp->link_config.active_speed; 12185 cmd->base.duplex = tp->link_config.active_duplex; 12186 ethtool_convert_legacy_u32_to_link_mode( 12187 cmd->link_modes.lp_advertising, 12188 tp->link_config.rmt_adv); 12189 12190 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12191 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12192 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; 12193 else 12194 cmd->base.eth_tp_mdix = ETH_TP_MDI; 12195 } 12196 } else { 12197 cmd->base.speed = SPEED_UNKNOWN; 12198 cmd->base.duplex = DUPLEX_UNKNOWN; 12199 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 12200 } 12201 cmd->base.phy_address = tp->phy_addr; 12202 cmd->base.autoneg = tp->link_config.autoneg; 12203 return 0; 12204 } 12205 12206 static int tg3_set_link_ksettings(struct net_device *dev, 12207 const struct ethtool_link_ksettings *cmd) 12208 { 12209 struct tg3 *tp = netdev_priv(dev); 12210 u32 speed = cmd->base.speed; 12211 u32 advertising; 12212 12213 if (tg3_flag(tp, USE_PHYLIB)) { 12214 struct phy_device *phydev; 12215 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12216 return -EAGAIN; 12217 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12218 return phy_ethtool_ksettings_set(phydev, cmd); 12219 } 12220 12221 if (cmd->base.autoneg != AUTONEG_ENABLE && 12222 cmd->base.autoneg != AUTONEG_DISABLE) 12223 return -EINVAL; 12224 12225 if (cmd->base.autoneg == AUTONEG_DISABLE && 12226 cmd->base.duplex != DUPLEX_FULL && 12227 cmd->base.duplex != DUPLEX_HALF) 12228 return -EINVAL; 12229 12230 ethtool_convert_link_mode_to_legacy_u32(&advertising, 12231 cmd->link_modes.advertising); 12232 12233 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12234 u32 mask = ADVERTISED_Autoneg | 12235 ADVERTISED_Pause | 12236 ADVERTISED_Asym_Pause; 12237 12238 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12239 mask |= ADVERTISED_1000baseT_Half | 12240 ADVERTISED_1000baseT_Full; 12241 12242 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12243 mask |= ADVERTISED_100baseT_Half | 12244 ADVERTISED_100baseT_Full | 12245 ADVERTISED_10baseT_Half | 12246 ADVERTISED_10baseT_Full | 12247 ADVERTISED_TP; 12248 else 12249 mask |= ADVERTISED_FIBRE; 12250 12251 if (advertising & ~mask) 12252 return -EINVAL; 12253 12254 mask &= (ADVERTISED_1000baseT_Half | 12255 ADVERTISED_1000baseT_Full | 12256 ADVERTISED_100baseT_Half | 12257 ADVERTISED_100baseT_Full | 12258 ADVERTISED_10baseT_Half | 12259 ADVERTISED_10baseT_Full); 12260 12261 advertising &= mask; 12262 } else { 12263 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12264 if (speed != SPEED_1000) 12265 return -EINVAL; 12266 12267 if (cmd->base.duplex != DUPLEX_FULL) 12268 return -EINVAL; 12269 } else { 12270 if (speed != SPEED_100 && 12271 speed != SPEED_10) 12272 return -EINVAL; 12273 } 12274 } 12275 12276 tg3_full_lock(tp, 0); 12277 12278 tp->link_config.autoneg = cmd->base.autoneg; 12279 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12280 tp->link_config.advertising = (advertising | 12281 ADVERTISED_Autoneg); 12282 tp->link_config.speed = SPEED_UNKNOWN; 12283 tp->link_config.duplex = DUPLEX_UNKNOWN; 12284 } else { 12285 tp->link_config.advertising = 0; 12286 tp->link_config.speed = speed; 12287 tp->link_config.duplex = cmd->base.duplex; 12288 } 12289 12290 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12291 12292 tg3_warn_mgmt_link_flap(tp); 12293 12294 if (netif_running(dev)) 12295 tg3_setup_phy(tp, true); 12296 12297 tg3_full_unlock(tp); 12298 12299 return 0; 12300 } 12301 12302 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12303 { 12304 struct tg3 *tp = netdev_priv(dev); 12305 12306 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12307 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12308 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12309 } 12310 12311 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12312 { 12313 struct tg3 *tp = netdev_priv(dev); 12314 12315 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12316 wol->supported = WAKE_MAGIC; 12317 else 12318 wol->supported = 0; 12319 wol->wolopts = 0; 12320 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12321 wol->wolopts = WAKE_MAGIC; 12322 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12323 } 12324 12325 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12326 { 12327 struct tg3 *tp = netdev_priv(dev); 12328 struct device *dp = &tp->pdev->dev; 12329 12330 if (wol->wolopts & ~WAKE_MAGIC) 12331 return -EINVAL; 12332 if ((wol->wolopts & WAKE_MAGIC) && 12333 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12334 return -EINVAL; 12335 12336 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12337 12338 if (device_may_wakeup(dp)) 12339 tg3_flag_set(tp, WOL_ENABLE); 12340 else 12341 tg3_flag_clear(tp, WOL_ENABLE); 12342 12343 return 0; 12344 } 12345 12346 static u32 tg3_get_msglevel(struct net_device *dev) 12347 { 12348 struct tg3 *tp = netdev_priv(dev); 12349 return tp->msg_enable; 12350 } 12351 12352 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12353 { 12354 struct tg3 *tp = netdev_priv(dev); 12355 tp->msg_enable = value; 12356 } 12357 12358 static int tg3_nway_reset(struct net_device *dev) 12359 { 12360 struct tg3 *tp = netdev_priv(dev); 12361 int r; 12362 12363 if (!netif_running(dev)) 12364 return -EAGAIN; 12365 12366 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12367 return -EINVAL; 12368 12369 tg3_warn_mgmt_link_flap(tp); 12370 12371 if (tg3_flag(tp, USE_PHYLIB)) { 12372 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12373 return -EAGAIN; 12374 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12375 } else { 12376 u32 bmcr; 12377 12378 spin_lock_bh(&tp->lock); 12379 r = -EINVAL; 12380 tg3_readphy(tp, MII_BMCR, &bmcr); 12381 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12382 ((bmcr & BMCR_ANENABLE) || 12383 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12384 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12385 BMCR_ANENABLE); 12386 r = 0; 12387 } 12388 spin_unlock_bh(&tp->lock); 12389 } 12390 12391 return r; 12392 } 12393 12394 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12395 { 12396 struct tg3 *tp = netdev_priv(dev); 12397 12398 ering->rx_max_pending = tp->rx_std_ring_mask; 12399 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12400 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12401 else 12402 ering->rx_jumbo_max_pending = 0; 12403 12404 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12405 12406 ering->rx_pending = tp->rx_pending; 12407 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12408 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12409 else 12410 ering->rx_jumbo_pending = 0; 12411 12412 ering->tx_pending = tp->napi[0].tx_pending; 12413 } 12414 12415 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 12416 { 12417 struct tg3 *tp = netdev_priv(dev); 12418 int i, irq_sync = 0, err = 0; 12419 bool reset_phy = false; 12420 12421 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12422 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12423 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12424 (ering->tx_pending <= MAX_SKB_FRAGS) || 12425 (tg3_flag(tp, TSO_BUG) && 12426 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12427 return -EINVAL; 12428 12429 if (netif_running(dev)) { 12430 tg3_phy_stop(tp); 12431 tg3_netif_stop(tp); 12432 irq_sync = 1; 12433 } 12434 12435 tg3_full_lock(tp, irq_sync); 12436 12437 tp->rx_pending = ering->rx_pending; 12438 12439 if (tg3_flag(tp, MAX_RXPEND_64) && 12440 tp->rx_pending > 63) 12441 tp->rx_pending = 63; 12442 12443 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12444 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12445 12446 for (i = 0; i < tp->irq_max; i++) 12447 tp->napi[i].tx_pending = ering->tx_pending; 12448 12449 if (netif_running(dev)) { 12450 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12451 /* Reset PHY to avoid PHY lock up */ 12452 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12453 tg3_asic_rev(tp) == ASIC_REV_5719 || 12454 tg3_asic_rev(tp) == ASIC_REV_5720) 12455 reset_phy = true; 12456 12457 err = tg3_restart_hw(tp, reset_phy); 12458 if (!err) 12459 tg3_netif_start(tp); 12460 } 12461 12462 tg3_full_unlock(tp); 12463 12464 if (irq_sync && !err) 12465 tg3_phy_start(tp); 12466 12467 return err; 12468 } 12469 12470 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12471 { 12472 struct tg3 *tp = netdev_priv(dev); 12473 12474 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12475 12476 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12477 epause->rx_pause = 1; 12478 else 12479 epause->rx_pause = 0; 12480 12481 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12482 epause->tx_pause = 1; 12483 else 12484 epause->tx_pause = 0; 12485 } 12486 12487 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12488 { 12489 struct tg3 *tp = netdev_priv(dev); 12490 int err = 0; 12491 bool reset_phy = false; 12492 12493 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12494 tg3_warn_mgmt_link_flap(tp); 12495 12496 if (tg3_flag(tp, USE_PHYLIB)) { 12497 struct phy_device *phydev; 12498 12499 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12500 12501 if (!phy_validate_pause(phydev, epause)) 12502 return -EINVAL; 12503 12504 tp->link_config.flowctrl = 0; 12505 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 12506 if (epause->rx_pause) { 12507 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12508 12509 if (epause->tx_pause) { 12510 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12511 } 12512 } else if (epause->tx_pause) { 12513 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12514 } 12515 12516 if (epause->autoneg) 12517 tg3_flag_set(tp, PAUSE_AUTONEG); 12518 else 12519 tg3_flag_clear(tp, PAUSE_AUTONEG); 12520 12521 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12522 if (phydev->autoneg) { 12523 /* phy_set_asym_pause() will 12524 * renegotiate the link to inform our 12525 * link partner of our flow control 12526 * settings, even if the flow control 12527 * is forced. Let tg3_adjust_link() 12528 * do the final flow control setup. 12529 */ 12530 return 0; 12531 } 12532 12533 if (!epause->autoneg) 12534 tg3_setup_flow_control(tp, 0, 0); 12535 } 12536 } else { 12537 int irq_sync = 0; 12538 12539 if (netif_running(dev)) { 12540 tg3_netif_stop(tp); 12541 irq_sync = 1; 12542 } 12543 12544 tg3_full_lock(tp, irq_sync); 12545 12546 if (epause->autoneg) 12547 tg3_flag_set(tp, PAUSE_AUTONEG); 12548 else 12549 tg3_flag_clear(tp, PAUSE_AUTONEG); 12550 if (epause->rx_pause) 12551 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12552 else 12553 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12554 if (epause->tx_pause) 12555 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12556 else 12557 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12558 12559 if (netif_running(dev)) { 12560 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12561 /* Reset PHY to avoid PHY lock up */ 12562 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12563 tg3_asic_rev(tp) == ASIC_REV_5719 || 12564 tg3_asic_rev(tp) == ASIC_REV_5720) 12565 reset_phy = true; 12566 12567 err = tg3_restart_hw(tp, reset_phy); 12568 if (!err) 12569 tg3_netif_start(tp); 12570 } 12571 12572 tg3_full_unlock(tp); 12573 } 12574 12575 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12576 12577 return err; 12578 } 12579 12580 static int tg3_get_sset_count(struct net_device *dev, int sset) 12581 { 12582 switch (sset) { 12583 case ETH_SS_TEST: 12584 return TG3_NUM_TEST; 12585 case ETH_SS_STATS: 12586 return TG3_NUM_STATS; 12587 default: 12588 return -EOPNOTSUPP; 12589 } 12590 } 12591 12592 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12593 u32 *rules __always_unused) 12594 { 12595 struct tg3 *tp = netdev_priv(dev); 12596 12597 if (!tg3_flag(tp, SUPPORT_MSIX)) 12598 return -EOPNOTSUPP; 12599 12600 switch (info->cmd) { 12601 case ETHTOOL_GRXRINGS: 12602 if (netif_running(tp->dev)) 12603 info->data = tp->rxq_cnt; 12604 else { 12605 info->data = num_online_cpus(); 12606 if (info->data > TG3_RSS_MAX_NUM_QS) 12607 info->data = TG3_RSS_MAX_NUM_QS; 12608 } 12609 12610 return 0; 12611 12612 default: 12613 return -EOPNOTSUPP; 12614 } 12615 } 12616 12617 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12618 { 12619 u32 size = 0; 12620 struct tg3 *tp = netdev_priv(dev); 12621 12622 if (tg3_flag(tp, SUPPORT_MSIX)) 12623 size = TG3_RSS_INDIR_TBL_SIZE; 12624 12625 return size; 12626 } 12627 12628 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 12629 { 12630 struct tg3 *tp = netdev_priv(dev); 12631 int i; 12632 12633 if (hfunc) 12634 *hfunc = ETH_RSS_HASH_TOP; 12635 if (!indir) 12636 return 0; 12637 12638 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12639 indir[i] = tp->rss_ind_tbl[i]; 12640 12641 return 0; 12642 } 12643 12644 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 12645 const u8 hfunc) 12646 { 12647 struct tg3 *tp = netdev_priv(dev); 12648 size_t i; 12649 12650 /* We require at least one supported parameter to be changed and no 12651 * change in any of the unsupported parameters 12652 */ 12653 if (key || 12654 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 12655 return -EOPNOTSUPP; 12656 12657 if (!indir) 12658 return 0; 12659 12660 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12661 tp->rss_ind_tbl[i] = indir[i]; 12662 12663 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12664 return 0; 12665 12666 /* It is legal to write the indirection 12667 * table while the device is running. 12668 */ 12669 tg3_full_lock(tp, 0); 12670 tg3_rss_write_indir_tbl(tp); 12671 tg3_full_unlock(tp); 12672 12673 return 0; 12674 } 12675 12676 static void tg3_get_channels(struct net_device *dev, 12677 struct ethtool_channels *channel) 12678 { 12679 struct tg3 *tp = netdev_priv(dev); 12680 u32 deflt_qs = netif_get_num_default_rss_queues(); 12681 12682 channel->max_rx = tp->rxq_max; 12683 channel->max_tx = tp->txq_max; 12684 12685 if (netif_running(dev)) { 12686 channel->rx_count = tp->rxq_cnt; 12687 channel->tx_count = tp->txq_cnt; 12688 } else { 12689 if (tp->rxq_req) 12690 channel->rx_count = tp->rxq_req; 12691 else 12692 channel->rx_count = min(deflt_qs, tp->rxq_max); 12693 12694 if (tp->txq_req) 12695 channel->tx_count = tp->txq_req; 12696 else 12697 channel->tx_count = min(deflt_qs, tp->txq_max); 12698 } 12699 } 12700 12701 static int tg3_set_channels(struct net_device *dev, 12702 struct ethtool_channels *channel) 12703 { 12704 struct tg3 *tp = netdev_priv(dev); 12705 12706 if (!tg3_flag(tp, SUPPORT_MSIX)) 12707 return -EOPNOTSUPP; 12708 12709 if (channel->rx_count > tp->rxq_max || 12710 channel->tx_count > tp->txq_max) 12711 return -EINVAL; 12712 12713 tp->rxq_req = channel->rx_count; 12714 tp->txq_req = channel->tx_count; 12715 12716 if (!netif_running(dev)) 12717 return 0; 12718 12719 tg3_stop(tp); 12720 12721 tg3_carrier_off(tp); 12722 12723 tg3_start(tp, true, false, false); 12724 12725 return 0; 12726 } 12727 12728 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12729 { 12730 switch (stringset) { 12731 case ETH_SS_STATS: 12732 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12733 break; 12734 case ETH_SS_TEST: 12735 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12736 break; 12737 default: 12738 WARN_ON(1); /* we need a WARN() */ 12739 break; 12740 } 12741 } 12742 12743 static int tg3_set_phys_id(struct net_device *dev, 12744 enum ethtool_phys_id_state state) 12745 { 12746 struct tg3 *tp = netdev_priv(dev); 12747 12748 switch (state) { 12749 case ETHTOOL_ID_ACTIVE: 12750 return 1; /* cycle on/off once per second */ 12751 12752 case ETHTOOL_ID_ON: 12753 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12754 LED_CTRL_1000MBPS_ON | 12755 LED_CTRL_100MBPS_ON | 12756 LED_CTRL_10MBPS_ON | 12757 LED_CTRL_TRAFFIC_OVERRIDE | 12758 LED_CTRL_TRAFFIC_BLINK | 12759 LED_CTRL_TRAFFIC_LED); 12760 break; 12761 12762 case ETHTOOL_ID_OFF: 12763 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12764 LED_CTRL_TRAFFIC_OVERRIDE); 12765 break; 12766 12767 case ETHTOOL_ID_INACTIVE: 12768 tw32(MAC_LED_CTRL, tp->led_ctrl); 12769 break; 12770 } 12771 12772 return 0; 12773 } 12774 12775 static void tg3_get_ethtool_stats(struct net_device *dev, 12776 struct ethtool_stats *estats, u64 *tmp_stats) 12777 { 12778 struct tg3 *tp = netdev_priv(dev); 12779 12780 if (tp->hw_stats) 12781 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12782 else 12783 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12784 } 12785 12786 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen) 12787 { 12788 int i; 12789 __be32 *buf; 12790 u32 offset = 0, len = 0; 12791 u32 magic, val; 12792 12793 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12794 return NULL; 12795 12796 if (magic == TG3_EEPROM_MAGIC) { 12797 for (offset = TG3_NVM_DIR_START; 12798 offset < TG3_NVM_DIR_END; 12799 offset += TG3_NVM_DIRENT_SIZE) { 12800 if (tg3_nvram_read(tp, offset, &val)) 12801 return NULL; 12802 12803 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12804 TG3_NVM_DIRTYPE_EXTVPD) 12805 break; 12806 } 12807 12808 if (offset != TG3_NVM_DIR_END) { 12809 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12810 if (tg3_nvram_read(tp, offset + 4, &offset)) 12811 return NULL; 12812 12813 offset = tg3_nvram_logical_addr(tp, offset); 12814 } 12815 12816 if (!offset || !len) { 12817 offset = TG3_NVM_VPD_OFF; 12818 len = TG3_NVM_VPD_LEN; 12819 } 12820 12821 buf = kmalloc(len, GFP_KERNEL); 12822 if (!buf) 12823 return NULL; 12824 12825 for (i = 0; i < len; i += 4) { 12826 /* The data is in little-endian format in NVRAM. 12827 * Use the big-endian read routines to preserve 12828 * the byte order as it exists in NVRAM. 12829 */ 12830 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12831 goto error; 12832 } 12833 *vpdlen = len; 12834 } else { 12835 buf = pci_vpd_alloc(tp->pdev, vpdlen); 12836 if (IS_ERR(buf)) 12837 return NULL; 12838 } 12839 12840 return buf; 12841 12842 error: 12843 kfree(buf); 12844 return NULL; 12845 } 12846 12847 #define NVRAM_TEST_SIZE 0x100 12848 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12849 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12850 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12851 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12852 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12853 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12854 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12855 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12856 12857 static int tg3_test_nvram(struct tg3 *tp) 12858 { 12859 u32 csum, magic; 12860 __be32 *buf; 12861 int i, j, k, err = 0, size; 12862 unsigned int len; 12863 12864 if (tg3_flag(tp, NO_NVRAM)) 12865 return 0; 12866 12867 if (tg3_nvram_read(tp, 0, &magic) != 0) 12868 return -EIO; 12869 12870 if (magic == TG3_EEPROM_MAGIC) 12871 size = NVRAM_TEST_SIZE; 12872 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12873 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12874 TG3_EEPROM_SB_FORMAT_1) { 12875 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12876 case TG3_EEPROM_SB_REVISION_0: 12877 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12878 break; 12879 case TG3_EEPROM_SB_REVISION_2: 12880 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12881 break; 12882 case TG3_EEPROM_SB_REVISION_3: 12883 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12884 break; 12885 case TG3_EEPROM_SB_REVISION_4: 12886 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12887 break; 12888 case TG3_EEPROM_SB_REVISION_5: 12889 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 12890 break; 12891 case TG3_EEPROM_SB_REVISION_6: 12892 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 12893 break; 12894 default: 12895 return -EIO; 12896 } 12897 } else 12898 return 0; 12899 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 12900 size = NVRAM_SELFBOOT_HW_SIZE; 12901 else 12902 return -EIO; 12903 12904 buf = kmalloc(size, GFP_KERNEL); 12905 if (buf == NULL) 12906 return -ENOMEM; 12907 12908 err = -EIO; 12909 for (i = 0, j = 0; i < size; i += 4, j++) { 12910 err = tg3_nvram_read_be32(tp, i, &buf[j]); 12911 if (err) 12912 break; 12913 } 12914 if (i < size) 12915 goto out; 12916 12917 /* Selfboot format */ 12918 magic = be32_to_cpu(buf[0]); 12919 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 12920 TG3_EEPROM_MAGIC_FW) { 12921 u8 *buf8 = (u8 *) buf, csum8 = 0; 12922 12923 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 12924 TG3_EEPROM_SB_REVISION_2) { 12925 /* For rev 2, the csum doesn't include the MBA. */ 12926 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 12927 csum8 += buf8[i]; 12928 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 12929 csum8 += buf8[i]; 12930 } else { 12931 for (i = 0; i < size; i++) 12932 csum8 += buf8[i]; 12933 } 12934 12935 if (csum8 == 0) { 12936 err = 0; 12937 goto out; 12938 } 12939 12940 err = -EIO; 12941 goto out; 12942 } 12943 12944 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 12945 TG3_EEPROM_MAGIC_HW) { 12946 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 12947 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 12948 u8 *buf8 = (u8 *) buf; 12949 12950 /* Separate the parity bits and the data bytes. */ 12951 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 12952 if ((i == 0) || (i == 8)) { 12953 int l; 12954 u8 msk; 12955 12956 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 12957 parity[k++] = buf8[i] & msk; 12958 i++; 12959 } else if (i == 16) { 12960 int l; 12961 u8 msk; 12962 12963 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 12964 parity[k++] = buf8[i] & msk; 12965 i++; 12966 12967 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 12968 parity[k++] = buf8[i] & msk; 12969 i++; 12970 } 12971 data[j++] = buf8[i]; 12972 } 12973 12974 err = -EIO; 12975 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 12976 u8 hw8 = hweight8(data[i]); 12977 12978 if ((hw8 & 0x1) && parity[i]) 12979 goto out; 12980 else if (!(hw8 & 0x1) && !parity[i]) 12981 goto out; 12982 } 12983 err = 0; 12984 goto out; 12985 } 12986 12987 err = -EIO; 12988 12989 /* Bootstrap checksum at offset 0x10 */ 12990 csum = calc_crc((unsigned char *) buf, 0x10); 12991 if (csum != le32_to_cpu(buf[0x10/4])) 12992 goto out; 12993 12994 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 12995 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 12996 if (csum != le32_to_cpu(buf[0xfc/4])) 12997 goto out; 12998 12999 kfree(buf); 13000 13001 buf = tg3_vpd_readblock(tp, &len); 13002 if (!buf) 13003 return -ENOMEM; 13004 13005 err = pci_vpd_check_csum(buf, len); 13006 /* go on if no checksum found */ 13007 if (err == 1) 13008 err = 0; 13009 out: 13010 kfree(buf); 13011 return err; 13012 } 13013 13014 #define TG3_SERDES_TIMEOUT_SEC 2 13015 #define TG3_COPPER_TIMEOUT_SEC 6 13016 13017 static int tg3_test_link(struct tg3 *tp) 13018 { 13019 int i, max; 13020 13021 if (!netif_running(tp->dev)) 13022 return -ENODEV; 13023 13024 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13025 max = TG3_SERDES_TIMEOUT_SEC; 13026 else 13027 max = TG3_COPPER_TIMEOUT_SEC; 13028 13029 for (i = 0; i < max; i++) { 13030 if (tp->link_up) 13031 return 0; 13032 13033 if (msleep_interruptible(1000)) 13034 break; 13035 } 13036 13037 return -EIO; 13038 } 13039 13040 /* Only test the commonly used registers */ 13041 static int tg3_test_registers(struct tg3 *tp) 13042 { 13043 int i, is_5705, is_5750; 13044 u32 offset, read_mask, write_mask, val, save_val, read_val; 13045 static struct { 13046 u16 offset; 13047 u16 flags; 13048 #define TG3_FL_5705 0x1 13049 #define TG3_FL_NOT_5705 0x2 13050 #define TG3_FL_NOT_5788 0x4 13051 #define TG3_FL_NOT_5750 0x8 13052 u32 read_mask; 13053 u32 write_mask; 13054 } reg_tbl[] = { 13055 /* MAC Control Registers */ 13056 { MAC_MODE, TG3_FL_NOT_5705, 13057 0x00000000, 0x00ef6f8c }, 13058 { MAC_MODE, TG3_FL_5705, 13059 0x00000000, 0x01ef6b8c }, 13060 { MAC_STATUS, TG3_FL_NOT_5705, 13061 0x03800107, 0x00000000 }, 13062 { MAC_STATUS, TG3_FL_5705, 13063 0x03800100, 0x00000000 }, 13064 { MAC_ADDR_0_HIGH, 0x0000, 13065 0x00000000, 0x0000ffff }, 13066 { MAC_ADDR_0_LOW, 0x0000, 13067 0x00000000, 0xffffffff }, 13068 { MAC_RX_MTU_SIZE, 0x0000, 13069 0x00000000, 0x0000ffff }, 13070 { MAC_TX_MODE, 0x0000, 13071 0x00000000, 0x00000070 }, 13072 { MAC_TX_LENGTHS, 0x0000, 13073 0x00000000, 0x00003fff }, 13074 { MAC_RX_MODE, TG3_FL_NOT_5705, 13075 0x00000000, 0x000007fc }, 13076 { MAC_RX_MODE, TG3_FL_5705, 13077 0x00000000, 0x000007dc }, 13078 { MAC_HASH_REG_0, 0x0000, 13079 0x00000000, 0xffffffff }, 13080 { MAC_HASH_REG_1, 0x0000, 13081 0x00000000, 0xffffffff }, 13082 { MAC_HASH_REG_2, 0x0000, 13083 0x00000000, 0xffffffff }, 13084 { MAC_HASH_REG_3, 0x0000, 13085 0x00000000, 0xffffffff }, 13086 13087 /* Receive Data and Receive BD Initiator Control Registers. */ 13088 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13089 0x00000000, 0xffffffff }, 13090 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13091 0x00000000, 0xffffffff }, 13092 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13093 0x00000000, 0x00000003 }, 13094 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13095 0x00000000, 0xffffffff }, 13096 { RCVDBDI_STD_BD+0, 0x0000, 13097 0x00000000, 0xffffffff }, 13098 { RCVDBDI_STD_BD+4, 0x0000, 13099 0x00000000, 0xffffffff }, 13100 { RCVDBDI_STD_BD+8, 0x0000, 13101 0x00000000, 0xffff0002 }, 13102 { RCVDBDI_STD_BD+0xc, 0x0000, 13103 0x00000000, 0xffffffff }, 13104 13105 /* Receive BD Initiator Control Registers. */ 13106 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13107 0x00000000, 0xffffffff }, 13108 { RCVBDI_STD_THRESH, TG3_FL_5705, 13109 0x00000000, 0x000003ff }, 13110 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13111 0x00000000, 0xffffffff }, 13112 13113 /* Host Coalescing Control Registers. */ 13114 { HOSTCC_MODE, TG3_FL_NOT_5705, 13115 0x00000000, 0x00000004 }, 13116 { HOSTCC_MODE, TG3_FL_5705, 13117 0x00000000, 0x000000f6 }, 13118 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13119 0x00000000, 0xffffffff }, 13120 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13121 0x00000000, 0x000003ff }, 13122 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13123 0x00000000, 0xffffffff }, 13124 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13125 0x00000000, 0x000003ff }, 13126 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13127 0x00000000, 0xffffffff }, 13128 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13129 0x00000000, 0x000000ff }, 13130 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13131 0x00000000, 0xffffffff }, 13132 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13133 0x00000000, 0x000000ff }, 13134 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13135 0x00000000, 0xffffffff }, 13136 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13137 0x00000000, 0xffffffff }, 13138 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13139 0x00000000, 0xffffffff }, 13140 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13141 0x00000000, 0x000000ff }, 13142 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13143 0x00000000, 0xffffffff }, 13144 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13145 0x00000000, 0x000000ff }, 13146 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13147 0x00000000, 0xffffffff }, 13148 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13149 0x00000000, 0xffffffff }, 13150 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13151 0x00000000, 0xffffffff }, 13152 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13153 0x00000000, 0xffffffff }, 13154 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13155 0x00000000, 0xffffffff }, 13156 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13157 0xffffffff, 0x00000000 }, 13158 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13159 0xffffffff, 0x00000000 }, 13160 13161 /* Buffer Manager Control Registers. */ 13162 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13163 0x00000000, 0x007fff80 }, 13164 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13165 0x00000000, 0x007fffff }, 13166 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13167 0x00000000, 0x0000003f }, 13168 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13169 0x00000000, 0x000001ff }, 13170 { BUFMGR_MB_HIGH_WATER, 0x0000, 13171 0x00000000, 0x000001ff }, 13172 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13173 0xffffffff, 0x00000000 }, 13174 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13175 0xffffffff, 0x00000000 }, 13176 13177 /* Mailbox Registers */ 13178 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13179 0x00000000, 0x000001ff }, 13180 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13181 0x00000000, 0x000001ff }, 13182 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13183 0x00000000, 0x000007ff }, 13184 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13185 0x00000000, 0x000001ff }, 13186 13187 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13188 }; 13189 13190 is_5705 = is_5750 = 0; 13191 if (tg3_flag(tp, 5705_PLUS)) { 13192 is_5705 = 1; 13193 if (tg3_flag(tp, 5750_PLUS)) 13194 is_5750 = 1; 13195 } 13196 13197 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13198 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13199 continue; 13200 13201 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13202 continue; 13203 13204 if (tg3_flag(tp, IS_5788) && 13205 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13206 continue; 13207 13208 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13209 continue; 13210 13211 offset = (u32) reg_tbl[i].offset; 13212 read_mask = reg_tbl[i].read_mask; 13213 write_mask = reg_tbl[i].write_mask; 13214 13215 /* Save the original register content */ 13216 save_val = tr32(offset); 13217 13218 /* Determine the read-only value. */ 13219 read_val = save_val & read_mask; 13220 13221 /* Write zero to the register, then make sure the read-only bits 13222 * are not changed and the read/write bits are all zeros. 13223 */ 13224 tw32(offset, 0); 13225 13226 val = tr32(offset); 13227 13228 /* Test the read-only and read/write bits. */ 13229 if (((val & read_mask) != read_val) || (val & write_mask)) 13230 goto out; 13231 13232 /* Write ones to all the bits defined by RdMask and WrMask, then 13233 * make sure the read-only bits are not changed and the 13234 * read/write bits are all ones. 13235 */ 13236 tw32(offset, read_mask | write_mask); 13237 13238 val = tr32(offset); 13239 13240 /* Test the read-only bits. */ 13241 if ((val & read_mask) != read_val) 13242 goto out; 13243 13244 /* Test the read/write bits. */ 13245 if ((val & write_mask) != write_mask) 13246 goto out; 13247 13248 tw32(offset, save_val); 13249 } 13250 13251 return 0; 13252 13253 out: 13254 if (netif_msg_hw(tp)) 13255 netdev_err(tp->dev, 13256 "Register test failed at offset %x\n", offset); 13257 tw32(offset, save_val); 13258 return -EIO; 13259 } 13260 13261 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13262 { 13263 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13264 int i; 13265 u32 j; 13266 13267 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13268 for (j = 0; j < len; j += 4) { 13269 u32 val; 13270 13271 tg3_write_mem(tp, offset + j, test_pattern[i]); 13272 tg3_read_mem(tp, offset + j, &val); 13273 if (val != test_pattern[i]) 13274 return -EIO; 13275 } 13276 } 13277 return 0; 13278 } 13279 13280 static int tg3_test_memory(struct tg3 *tp) 13281 { 13282 static struct mem_entry { 13283 u32 offset; 13284 u32 len; 13285 } mem_tbl_570x[] = { 13286 { 0x00000000, 0x00b50}, 13287 { 0x00002000, 0x1c000}, 13288 { 0xffffffff, 0x00000} 13289 }, mem_tbl_5705[] = { 13290 { 0x00000100, 0x0000c}, 13291 { 0x00000200, 0x00008}, 13292 { 0x00004000, 0x00800}, 13293 { 0x00006000, 0x01000}, 13294 { 0x00008000, 0x02000}, 13295 { 0x00010000, 0x0e000}, 13296 { 0xffffffff, 0x00000} 13297 }, mem_tbl_5755[] = { 13298 { 0x00000200, 0x00008}, 13299 { 0x00004000, 0x00800}, 13300 { 0x00006000, 0x00800}, 13301 { 0x00008000, 0x02000}, 13302 { 0x00010000, 0x0c000}, 13303 { 0xffffffff, 0x00000} 13304 }, mem_tbl_5906[] = { 13305 { 0x00000200, 0x00008}, 13306 { 0x00004000, 0x00400}, 13307 { 0x00006000, 0x00400}, 13308 { 0x00008000, 0x01000}, 13309 { 0x00010000, 0x01000}, 13310 { 0xffffffff, 0x00000} 13311 }, mem_tbl_5717[] = { 13312 { 0x00000200, 0x00008}, 13313 { 0x00010000, 0x0a000}, 13314 { 0x00020000, 0x13c00}, 13315 { 0xffffffff, 0x00000} 13316 }, mem_tbl_57765[] = { 13317 { 0x00000200, 0x00008}, 13318 { 0x00004000, 0x00800}, 13319 { 0x00006000, 0x09800}, 13320 { 0x00010000, 0x0a000}, 13321 { 0xffffffff, 0x00000} 13322 }; 13323 struct mem_entry *mem_tbl; 13324 int err = 0; 13325 int i; 13326 13327 if (tg3_flag(tp, 5717_PLUS)) 13328 mem_tbl = mem_tbl_5717; 13329 else if (tg3_flag(tp, 57765_CLASS) || 13330 tg3_asic_rev(tp) == ASIC_REV_5762) 13331 mem_tbl = mem_tbl_57765; 13332 else if (tg3_flag(tp, 5755_PLUS)) 13333 mem_tbl = mem_tbl_5755; 13334 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13335 mem_tbl = mem_tbl_5906; 13336 else if (tg3_flag(tp, 5705_PLUS)) 13337 mem_tbl = mem_tbl_5705; 13338 else 13339 mem_tbl = mem_tbl_570x; 13340 13341 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13342 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13343 if (err) 13344 break; 13345 } 13346 13347 return err; 13348 } 13349 13350 #define TG3_TSO_MSS 500 13351 13352 #define TG3_TSO_IP_HDR_LEN 20 13353 #define TG3_TSO_TCP_HDR_LEN 20 13354 #define TG3_TSO_TCP_OPT_LEN 12 13355 13356 static const u8 tg3_tso_header[] = { 13357 0x08, 0x00, 13358 0x45, 0x00, 0x00, 0x00, 13359 0x00, 0x00, 0x40, 0x00, 13360 0x40, 0x06, 0x00, 0x00, 13361 0x0a, 0x00, 0x00, 0x01, 13362 0x0a, 0x00, 0x00, 0x02, 13363 0x0d, 0x00, 0xe0, 0x00, 13364 0x00, 0x00, 0x01, 0x00, 13365 0x00, 0x00, 0x02, 0x00, 13366 0x80, 0x10, 0x10, 0x00, 13367 0x14, 0x09, 0x00, 0x00, 13368 0x01, 0x01, 0x08, 0x0a, 13369 0x11, 0x11, 0x11, 0x11, 13370 0x11, 0x11, 0x11, 0x11, 13371 }; 13372 13373 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13374 { 13375 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13376 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13377 u32 budget; 13378 struct sk_buff *skb; 13379 u8 *tx_data, *rx_data; 13380 dma_addr_t map; 13381 int num_pkts, tx_len, rx_len, i, err; 13382 struct tg3_rx_buffer_desc *desc; 13383 struct tg3_napi *tnapi, *rnapi; 13384 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13385 13386 tnapi = &tp->napi[0]; 13387 rnapi = &tp->napi[0]; 13388 if (tp->irq_cnt > 1) { 13389 if (tg3_flag(tp, ENABLE_RSS)) 13390 rnapi = &tp->napi[1]; 13391 if (tg3_flag(tp, ENABLE_TSS)) 13392 tnapi = &tp->napi[1]; 13393 } 13394 coal_now = tnapi->coal_now | rnapi->coal_now; 13395 13396 err = -EIO; 13397 13398 tx_len = pktsz; 13399 skb = netdev_alloc_skb(tp->dev, tx_len); 13400 if (!skb) 13401 return -ENOMEM; 13402 13403 tx_data = skb_put(skb, tx_len); 13404 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13405 memset(tx_data + ETH_ALEN, 0x0, 8); 13406 13407 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13408 13409 if (tso_loopback) { 13410 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13411 13412 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13413 TG3_TSO_TCP_OPT_LEN; 13414 13415 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13416 sizeof(tg3_tso_header)); 13417 mss = TG3_TSO_MSS; 13418 13419 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13420 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13421 13422 /* Set the total length field in the IP header */ 13423 iph->tot_len = htons((u16)(mss + hdr_len)); 13424 13425 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13426 TXD_FLAG_CPU_POST_DMA); 13427 13428 if (tg3_flag(tp, HW_TSO_1) || 13429 tg3_flag(tp, HW_TSO_2) || 13430 tg3_flag(tp, HW_TSO_3)) { 13431 struct tcphdr *th; 13432 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13433 th = (struct tcphdr *)&tx_data[val]; 13434 th->check = 0; 13435 } else 13436 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13437 13438 if (tg3_flag(tp, HW_TSO_3)) { 13439 mss |= (hdr_len & 0xc) << 12; 13440 if (hdr_len & 0x10) 13441 base_flags |= 0x00000010; 13442 base_flags |= (hdr_len & 0x3e0) << 5; 13443 } else if (tg3_flag(tp, HW_TSO_2)) 13444 mss |= hdr_len << 9; 13445 else if (tg3_flag(tp, HW_TSO_1) || 13446 tg3_asic_rev(tp) == ASIC_REV_5705) { 13447 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13448 } else { 13449 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13450 } 13451 13452 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13453 } else { 13454 num_pkts = 1; 13455 data_off = ETH_HLEN; 13456 13457 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13458 tx_len > VLAN_ETH_FRAME_LEN) 13459 base_flags |= TXD_FLAG_JMB_PKT; 13460 } 13461 13462 for (i = data_off; i < tx_len; i++) 13463 tx_data[i] = (u8) (i & 0xff); 13464 13465 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); 13466 if (dma_mapping_error(&tp->pdev->dev, map)) { 13467 dev_kfree_skb(skb); 13468 return -EIO; 13469 } 13470 13471 val = tnapi->tx_prod; 13472 tnapi->tx_buffers[val].skb = skb; 13473 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13474 13475 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13476 rnapi->coal_now); 13477 13478 udelay(10); 13479 13480 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13481 13482 budget = tg3_tx_avail(tnapi); 13483 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13484 base_flags | TXD_FLAG_END, mss, 0)) { 13485 tnapi->tx_buffers[val].skb = NULL; 13486 dev_kfree_skb(skb); 13487 return -EIO; 13488 } 13489 13490 tnapi->tx_prod++; 13491 13492 /* Sync BD data before updating mailbox */ 13493 wmb(); 13494 13495 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13496 tr32_mailbox(tnapi->prodmbox); 13497 13498 udelay(10); 13499 13500 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13501 for (i = 0; i < 35; i++) { 13502 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13503 coal_now); 13504 13505 udelay(10); 13506 13507 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13508 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13509 if ((tx_idx == tnapi->tx_prod) && 13510 (rx_idx == (rx_start_idx + num_pkts))) 13511 break; 13512 } 13513 13514 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13515 dev_kfree_skb(skb); 13516 13517 if (tx_idx != tnapi->tx_prod) 13518 goto out; 13519 13520 if (rx_idx != rx_start_idx + num_pkts) 13521 goto out; 13522 13523 val = data_off; 13524 while (rx_idx != rx_start_idx) { 13525 desc = &rnapi->rx_rcb[rx_start_idx++]; 13526 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13527 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13528 13529 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13530 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13531 goto out; 13532 13533 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13534 - ETH_FCS_LEN; 13535 13536 if (!tso_loopback) { 13537 if (rx_len != tx_len) 13538 goto out; 13539 13540 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13541 if (opaque_key != RXD_OPAQUE_RING_STD) 13542 goto out; 13543 } else { 13544 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13545 goto out; 13546 } 13547 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13548 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13549 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13550 goto out; 13551 } 13552 13553 if (opaque_key == RXD_OPAQUE_RING_STD) { 13554 rx_data = tpr->rx_std_buffers[desc_idx].data; 13555 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13556 mapping); 13557 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13558 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13559 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13560 mapping); 13561 } else 13562 goto out; 13563 13564 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, 13565 DMA_FROM_DEVICE); 13566 13567 rx_data += TG3_RX_OFFSET(tp); 13568 for (i = data_off; i < rx_len; i++, val++) { 13569 if (*(rx_data + i) != (u8) (val & 0xff)) 13570 goto out; 13571 } 13572 } 13573 13574 err = 0; 13575 13576 /* tg3_free_rings will unmap and free the rx_data */ 13577 out: 13578 return err; 13579 } 13580 13581 #define TG3_STD_LOOPBACK_FAILED 1 13582 #define TG3_JMB_LOOPBACK_FAILED 2 13583 #define TG3_TSO_LOOPBACK_FAILED 4 13584 #define TG3_LOOPBACK_FAILED \ 13585 (TG3_STD_LOOPBACK_FAILED | \ 13586 TG3_JMB_LOOPBACK_FAILED | \ 13587 TG3_TSO_LOOPBACK_FAILED) 13588 13589 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13590 { 13591 int err = -EIO; 13592 u32 eee_cap; 13593 u32 jmb_pkt_sz = 9000; 13594 13595 if (tp->dma_limit) 13596 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13597 13598 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13599 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13600 13601 if (!netif_running(tp->dev)) { 13602 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13603 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13604 if (do_extlpbk) 13605 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13606 goto done; 13607 } 13608 13609 err = tg3_reset_hw(tp, true); 13610 if (err) { 13611 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13612 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13613 if (do_extlpbk) 13614 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13615 goto done; 13616 } 13617 13618 if (tg3_flag(tp, ENABLE_RSS)) { 13619 int i; 13620 13621 /* Reroute all rx packets to the 1st queue */ 13622 for (i = MAC_RSS_INDIR_TBL_0; 13623 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13624 tw32(i, 0x0); 13625 } 13626 13627 /* HW errata - mac loopback fails in some cases on 5780. 13628 * Normal traffic and PHY loopback are not affected by 13629 * errata. Also, the MAC loopback test is deprecated for 13630 * all newer ASIC revisions. 13631 */ 13632 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13633 !tg3_flag(tp, CPMU_PRESENT)) { 13634 tg3_mac_loopback(tp, true); 13635 13636 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13637 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13638 13639 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13640 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13641 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13642 13643 tg3_mac_loopback(tp, false); 13644 } 13645 13646 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13647 !tg3_flag(tp, USE_PHYLIB)) { 13648 int i; 13649 13650 tg3_phy_lpbk_set(tp, 0, false); 13651 13652 /* Wait for link */ 13653 for (i = 0; i < 100; i++) { 13654 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13655 break; 13656 mdelay(1); 13657 } 13658 13659 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13660 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13661 if (tg3_flag(tp, TSO_CAPABLE) && 13662 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13663 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13664 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13665 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13666 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13667 13668 if (do_extlpbk) { 13669 tg3_phy_lpbk_set(tp, 0, true); 13670 13671 /* All link indications report up, but the hardware 13672 * isn't really ready for about 20 msec. Double it 13673 * to be sure. 13674 */ 13675 mdelay(40); 13676 13677 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13678 data[TG3_EXT_LOOPB_TEST] |= 13679 TG3_STD_LOOPBACK_FAILED; 13680 if (tg3_flag(tp, TSO_CAPABLE) && 13681 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13682 data[TG3_EXT_LOOPB_TEST] |= 13683 TG3_TSO_LOOPBACK_FAILED; 13684 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13685 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13686 data[TG3_EXT_LOOPB_TEST] |= 13687 TG3_JMB_LOOPBACK_FAILED; 13688 } 13689 13690 /* Re-enable gphy autopowerdown. */ 13691 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13692 tg3_phy_toggle_apd(tp, true); 13693 } 13694 13695 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13696 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13697 13698 done: 13699 tp->phy_flags |= eee_cap; 13700 13701 return err; 13702 } 13703 13704 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13705 u64 *data) 13706 { 13707 struct tg3 *tp = netdev_priv(dev); 13708 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13709 13710 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13711 if (tg3_power_up(tp)) { 13712 etest->flags |= ETH_TEST_FL_FAILED; 13713 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13714 return; 13715 } 13716 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13717 } 13718 13719 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13720 13721 if (tg3_test_nvram(tp) != 0) { 13722 etest->flags |= ETH_TEST_FL_FAILED; 13723 data[TG3_NVRAM_TEST] = 1; 13724 } 13725 if (!doextlpbk && tg3_test_link(tp)) { 13726 etest->flags |= ETH_TEST_FL_FAILED; 13727 data[TG3_LINK_TEST] = 1; 13728 } 13729 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13730 int err, err2 = 0, irq_sync = 0; 13731 13732 if (netif_running(dev)) { 13733 tg3_phy_stop(tp); 13734 tg3_netif_stop(tp); 13735 irq_sync = 1; 13736 } 13737 13738 tg3_full_lock(tp, irq_sync); 13739 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13740 err = tg3_nvram_lock(tp); 13741 tg3_halt_cpu(tp, RX_CPU_BASE); 13742 if (!tg3_flag(tp, 5705_PLUS)) 13743 tg3_halt_cpu(tp, TX_CPU_BASE); 13744 if (!err) 13745 tg3_nvram_unlock(tp); 13746 13747 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13748 tg3_phy_reset(tp); 13749 13750 if (tg3_test_registers(tp) != 0) { 13751 etest->flags |= ETH_TEST_FL_FAILED; 13752 data[TG3_REGISTER_TEST] = 1; 13753 } 13754 13755 if (tg3_test_memory(tp) != 0) { 13756 etest->flags |= ETH_TEST_FL_FAILED; 13757 data[TG3_MEMORY_TEST] = 1; 13758 } 13759 13760 if (doextlpbk) 13761 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13762 13763 if (tg3_test_loopback(tp, data, doextlpbk)) 13764 etest->flags |= ETH_TEST_FL_FAILED; 13765 13766 tg3_full_unlock(tp); 13767 13768 if (tg3_test_interrupt(tp) != 0) { 13769 etest->flags |= ETH_TEST_FL_FAILED; 13770 data[TG3_INTERRUPT_TEST] = 1; 13771 } 13772 13773 tg3_full_lock(tp, 0); 13774 13775 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13776 if (netif_running(dev)) { 13777 tg3_flag_set(tp, INIT_COMPLETE); 13778 err2 = tg3_restart_hw(tp, true); 13779 if (!err2) 13780 tg3_netif_start(tp); 13781 } 13782 13783 tg3_full_unlock(tp); 13784 13785 if (irq_sync && !err2) 13786 tg3_phy_start(tp); 13787 } 13788 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13789 tg3_power_down_prepare(tp); 13790 13791 } 13792 13793 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13794 { 13795 struct tg3 *tp = netdev_priv(dev); 13796 struct hwtstamp_config stmpconf; 13797 13798 if (!tg3_flag(tp, PTP_CAPABLE)) 13799 return -EOPNOTSUPP; 13800 13801 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13802 return -EFAULT; 13803 13804 if (stmpconf.flags) 13805 return -EINVAL; 13806 13807 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13808 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13809 return -ERANGE; 13810 13811 switch (stmpconf.rx_filter) { 13812 case HWTSTAMP_FILTER_NONE: 13813 tp->rxptpctl = 0; 13814 break; 13815 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13816 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13817 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13818 break; 13819 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13820 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13821 TG3_RX_PTP_CTL_SYNC_EVNT; 13822 break; 13823 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13824 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13825 TG3_RX_PTP_CTL_DELAY_REQ; 13826 break; 13827 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13828 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13829 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13830 break; 13831 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13832 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13833 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13834 break; 13835 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13836 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13837 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13838 break; 13839 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13840 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13841 TG3_RX_PTP_CTL_SYNC_EVNT; 13842 break; 13843 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13844 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13845 TG3_RX_PTP_CTL_SYNC_EVNT; 13846 break; 13847 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13848 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13849 TG3_RX_PTP_CTL_SYNC_EVNT; 13850 break; 13851 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13852 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13853 TG3_RX_PTP_CTL_DELAY_REQ; 13854 break; 13855 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13856 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13857 TG3_RX_PTP_CTL_DELAY_REQ; 13858 break; 13859 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13860 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13861 TG3_RX_PTP_CTL_DELAY_REQ; 13862 break; 13863 default: 13864 return -ERANGE; 13865 } 13866 13867 if (netif_running(dev) && tp->rxptpctl) 13868 tw32(TG3_RX_PTP_CTL, 13869 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13870 13871 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13872 tg3_flag_set(tp, TX_TSTAMP_EN); 13873 else 13874 tg3_flag_clear(tp, TX_TSTAMP_EN); 13875 13876 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13877 -EFAULT : 0; 13878 } 13879 13880 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13881 { 13882 struct tg3 *tp = netdev_priv(dev); 13883 struct hwtstamp_config stmpconf; 13884 13885 if (!tg3_flag(tp, PTP_CAPABLE)) 13886 return -EOPNOTSUPP; 13887 13888 stmpconf.flags = 0; 13889 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13890 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13891 13892 switch (tp->rxptpctl) { 13893 case 0: 13894 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 13895 break; 13896 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 13897 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 13898 break; 13899 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13900 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 13901 break; 13902 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13903 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 13904 break; 13905 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13906 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 13907 break; 13908 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13909 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 13910 break; 13911 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 13912 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 13913 break; 13914 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13915 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 13916 break; 13917 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13918 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 13919 break; 13920 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 13921 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 13922 break; 13923 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13924 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 13925 break; 13926 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13927 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 13928 break; 13929 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 13930 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 13931 break; 13932 default: 13933 WARN_ON_ONCE(1); 13934 return -ERANGE; 13935 } 13936 13937 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13938 -EFAULT : 0; 13939 } 13940 13941 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13942 { 13943 struct mii_ioctl_data *data = if_mii(ifr); 13944 struct tg3 *tp = netdev_priv(dev); 13945 int err; 13946 13947 if (tg3_flag(tp, USE_PHYLIB)) { 13948 struct phy_device *phydev; 13949 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 13950 return -EAGAIN; 13951 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 13952 return phy_mii_ioctl(phydev, ifr, cmd); 13953 } 13954 13955 switch (cmd) { 13956 case SIOCGMIIPHY: 13957 data->phy_id = tp->phy_addr; 13958 13959 fallthrough; 13960 case SIOCGMIIREG: { 13961 u32 mii_regval; 13962 13963 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13964 break; /* We have no PHY */ 13965 13966 if (!netif_running(dev)) 13967 return -EAGAIN; 13968 13969 spin_lock_bh(&tp->lock); 13970 err = __tg3_readphy(tp, data->phy_id & 0x1f, 13971 data->reg_num & 0x1f, &mii_regval); 13972 spin_unlock_bh(&tp->lock); 13973 13974 data->val_out = mii_regval; 13975 13976 return err; 13977 } 13978 13979 case SIOCSMIIREG: 13980 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 13981 break; /* We have no PHY */ 13982 13983 if (!netif_running(dev)) 13984 return -EAGAIN; 13985 13986 spin_lock_bh(&tp->lock); 13987 err = __tg3_writephy(tp, data->phy_id & 0x1f, 13988 data->reg_num & 0x1f, data->val_in); 13989 spin_unlock_bh(&tp->lock); 13990 13991 return err; 13992 13993 case SIOCSHWTSTAMP: 13994 return tg3_hwtstamp_set(dev, ifr); 13995 13996 case SIOCGHWTSTAMP: 13997 return tg3_hwtstamp_get(dev, ifr); 13998 13999 default: 14000 /* do nothing */ 14001 break; 14002 } 14003 return -EOPNOTSUPP; 14004 } 14005 14006 static int tg3_get_coalesce(struct net_device *dev, 14007 struct ethtool_coalesce *ec, 14008 struct kernel_ethtool_coalesce *kernel_coal, 14009 struct netlink_ext_ack *extack) 14010 { 14011 struct tg3 *tp = netdev_priv(dev); 14012 14013 memcpy(ec, &tp->coal, sizeof(*ec)); 14014 return 0; 14015 } 14016 14017 static int tg3_set_coalesce(struct net_device *dev, 14018 struct ethtool_coalesce *ec, 14019 struct kernel_ethtool_coalesce *kernel_coal, 14020 struct netlink_ext_ack *extack) 14021 { 14022 struct tg3 *tp = netdev_priv(dev); 14023 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 14024 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 14025 14026 if (!tg3_flag(tp, 5705_PLUS)) { 14027 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 14028 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 14029 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14030 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14031 } 14032 14033 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14034 (!ec->rx_coalesce_usecs) || 14035 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14036 (!ec->tx_coalesce_usecs) || 14037 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14038 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14039 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14040 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14041 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14042 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14043 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14044 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14045 return -EINVAL; 14046 14047 /* Only copy relevant parameters, ignore all others. */ 14048 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14049 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14050 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14051 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14052 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14053 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14054 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14055 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14056 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14057 14058 if (netif_running(dev)) { 14059 tg3_full_lock(tp, 0); 14060 __tg3_set_coalesce(tp, &tp->coal); 14061 tg3_full_unlock(tp); 14062 } 14063 return 0; 14064 } 14065 14066 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14067 { 14068 struct tg3 *tp = netdev_priv(dev); 14069 14070 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14071 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14072 return -EOPNOTSUPP; 14073 } 14074 14075 if (edata->advertised != tp->eee.advertised) { 14076 netdev_warn(tp->dev, 14077 "Direct manipulation of EEE advertisement is not supported\n"); 14078 return -EINVAL; 14079 } 14080 14081 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14082 netdev_warn(tp->dev, 14083 "Maximal Tx Lpi timer supported is %#x(u)\n", 14084 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14085 return -EINVAL; 14086 } 14087 14088 tp->eee = *edata; 14089 14090 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14091 tg3_warn_mgmt_link_flap(tp); 14092 14093 if (netif_running(tp->dev)) { 14094 tg3_full_lock(tp, 0); 14095 tg3_setup_eee(tp); 14096 tg3_phy_reset(tp); 14097 tg3_full_unlock(tp); 14098 } 14099 14100 return 0; 14101 } 14102 14103 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14104 { 14105 struct tg3 *tp = netdev_priv(dev); 14106 14107 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14108 netdev_warn(tp->dev, 14109 "Board does not support EEE!\n"); 14110 return -EOPNOTSUPP; 14111 } 14112 14113 *edata = tp->eee; 14114 return 0; 14115 } 14116 14117 static const struct ethtool_ops tg3_ethtool_ops = { 14118 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 14119 ETHTOOL_COALESCE_MAX_FRAMES | 14120 ETHTOOL_COALESCE_USECS_IRQ | 14121 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 14122 ETHTOOL_COALESCE_STATS_BLOCK_USECS, 14123 .get_drvinfo = tg3_get_drvinfo, 14124 .get_regs_len = tg3_get_regs_len, 14125 .get_regs = tg3_get_regs, 14126 .get_wol = tg3_get_wol, 14127 .set_wol = tg3_set_wol, 14128 .get_msglevel = tg3_get_msglevel, 14129 .set_msglevel = tg3_set_msglevel, 14130 .nway_reset = tg3_nway_reset, 14131 .get_link = ethtool_op_get_link, 14132 .get_eeprom_len = tg3_get_eeprom_len, 14133 .get_eeprom = tg3_get_eeprom, 14134 .set_eeprom = tg3_set_eeprom, 14135 .get_ringparam = tg3_get_ringparam, 14136 .set_ringparam = tg3_set_ringparam, 14137 .get_pauseparam = tg3_get_pauseparam, 14138 .set_pauseparam = tg3_set_pauseparam, 14139 .self_test = tg3_self_test, 14140 .get_strings = tg3_get_strings, 14141 .set_phys_id = tg3_set_phys_id, 14142 .get_ethtool_stats = tg3_get_ethtool_stats, 14143 .get_coalesce = tg3_get_coalesce, 14144 .set_coalesce = tg3_set_coalesce, 14145 .get_sset_count = tg3_get_sset_count, 14146 .get_rxnfc = tg3_get_rxnfc, 14147 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14148 .get_rxfh = tg3_get_rxfh, 14149 .set_rxfh = tg3_set_rxfh, 14150 .get_channels = tg3_get_channels, 14151 .set_channels = tg3_set_channels, 14152 .get_ts_info = tg3_get_ts_info, 14153 .get_eee = tg3_get_eee, 14154 .set_eee = tg3_set_eee, 14155 .get_link_ksettings = tg3_get_link_ksettings, 14156 .set_link_ksettings = tg3_set_link_ksettings, 14157 }; 14158 14159 static void tg3_get_stats64(struct net_device *dev, 14160 struct rtnl_link_stats64 *stats) 14161 { 14162 struct tg3 *tp = netdev_priv(dev); 14163 14164 spin_lock_bh(&tp->lock); 14165 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { 14166 *stats = tp->net_stats_prev; 14167 spin_unlock_bh(&tp->lock); 14168 return; 14169 } 14170 14171 tg3_get_nstats(tp, stats); 14172 spin_unlock_bh(&tp->lock); 14173 } 14174 14175 static void tg3_set_rx_mode(struct net_device *dev) 14176 { 14177 struct tg3 *tp = netdev_priv(dev); 14178 14179 if (!netif_running(dev)) 14180 return; 14181 14182 tg3_full_lock(tp, 0); 14183 __tg3_set_rx_mode(dev); 14184 tg3_full_unlock(tp); 14185 } 14186 14187 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14188 int new_mtu) 14189 { 14190 dev->mtu = new_mtu; 14191 14192 if (new_mtu > ETH_DATA_LEN) { 14193 if (tg3_flag(tp, 5780_CLASS)) { 14194 netdev_update_features(dev); 14195 tg3_flag_clear(tp, TSO_CAPABLE); 14196 } else { 14197 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14198 } 14199 } else { 14200 if (tg3_flag(tp, 5780_CLASS)) { 14201 tg3_flag_set(tp, TSO_CAPABLE); 14202 netdev_update_features(dev); 14203 } 14204 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14205 } 14206 } 14207 14208 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14209 { 14210 struct tg3 *tp = netdev_priv(dev); 14211 int err; 14212 bool reset_phy = false; 14213 14214 if (!netif_running(dev)) { 14215 /* We'll just catch it later when the 14216 * device is up'd. 14217 */ 14218 tg3_set_mtu(dev, tp, new_mtu); 14219 return 0; 14220 } 14221 14222 tg3_phy_stop(tp); 14223 14224 tg3_netif_stop(tp); 14225 14226 tg3_set_mtu(dev, tp, new_mtu); 14227 14228 tg3_full_lock(tp, 1); 14229 14230 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14231 14232 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14233 * breaks all requests to 256 bytes. 14234 */ 14235 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14236 tg3_asic_rev(tp) == ASIC_REV_5717 || 14237 tg3_asic_rev(tp) == ASIC_REV_5719 || 14238 tg3_asic_rev(tp) == ASIC_REV_5720) 14239 reset_phy = true; 14240 14241 err = tg3_restart_hw(tp, reset_phy); 14242 14243 if (!err) 14244 tg3_netif_start(tp); 14245 14246 tg3_full_unlock(tp); 14247 14248 if (!err) 14249 tg3_phy_start(tp); 14250 14251 return err; 14252 } 14253 14254 static const struct net_device_ops tg3_netdev_ops = { 14255 .ndo_open = tg3_open, 14256 .ndo_stop = tg3_close, 14257 .ndo_start_xmit = tg3_start_xmit, 14258 .ndo_get_stats64 = tg3_get_stats64, 14259 .ndo_validate_addr = eth_validate_addr, 14260 .ndo_set_rx_mode = tg3_set_rx_mode, 14261 .ndo_set_mac_address = tg3_set_mac_addr, 14262 .ndo_eth_ioctl = tg3_ioctl, 14263 .ndo_tx_timeout = tg3_tx_timeout, 14264 .ndo_change_mtu = tg3_change_mtu, 14265 .ndo_fix_features = tg3_fix_features, 14266 .ndo_set_features = tg3_set_features, 14267 #ifdef CONFIG_NET_POLL_CONTROLLER 14268 .ndo_poll_controller = tg3_poll_controller, 14269 #endif 14270 }; 14271 14272 static void tg3_get_eeprom_size(struct tg3 *tp) 14273 { 14274 u32 cursize, val, magic; 14275 14276 tp->nvram_size = EEPROM_CHIP_SIZE; 14277 14278 if (tg3_nvram_read(tp, 0, &magic) != 0) 14279 return; 14280 14281 if ((magic != TG3_EEPROM_MAGIC) && 14282 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14283 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14284 return; 14285 14286 /* 14287 * Size the chip by reading offsets at increasing powers of two. 14288 * When we encounter our validation signature, we know the addressing 14289 * has wrapped around, and thus have our chip size. 14290 */ 14291 cursize = 0x10; 14292 14293 while (cursize < tp->nvram_size) { 14294 if (tg3_nvram_read(tp, cursize, &val) != 0) 14295 return; 14296 14297 if (val == magic) 14298 break; 14299 14300 cursize <<= 1; 14301 } 14302 14303 tp->nvram_size = cursize; 14304 } 14305 14306 static void tg3_get_nvram_size(struct tg3 *tp) 14307 { 14308 u32 val; 14309 14310 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14311 return; 14312 14313 /* Selfboot format */ 14314 if (val != TG3_EEPROM_MAGIC) { 14315 tg3_get_eeprom_size(tp); 14316 return; 14317 } 14318 14319 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14320 if (val != 0) { 14321 /* This is confusing. We want to operate on the 14322 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14323 * call will read from NVRAM and byteswap the data 14324 * according to the byteswapping settings for all 14325 * other register accesses. This ensures the data we 14326 * want will always reside in the lower 16-bits. 14327 * However, the data in NVRAM is in LE format, which 14328 * means the data from the NVRAM read will always be 14329 * opposite the endianness of the CPU. The 16-bit 14330 * byteswap then brings the data to CPU endianness. 14331 */ 14332 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14333 return; 14334 } 14335 } 14336 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14337 } 14338 14339 static void tg3_get_nvram_info(struct tg3 *tp) 14340 { 14341 u32 nvcfg1; 14342 14343 nvcfg1 = tr32(NVRAM_CFG1); 14344 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14345 tg3_flag_set(tp, FLASH); 14346 } else { 14347 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14348 tw32(NVRAM_CFG1, nvcfg1); 14349 } 14350 14351 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14352 tg3_flag(tp, 5780_CLASS)) { 14353 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14354 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14355 tp->nvram_jedecnum = JEDEC_ATMEL; 14356 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14357 tg3_flag_set(tp, NVRAM_BUFFERED); 14358 break; 14359 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14360 tp->nvram_jedecnum = JEDEC_ATMEL; 14361 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14362 break; 14363 case FLASH_VENDOR_ATMEL_EEPROM: 14364 tp->nvram_jedecnum = JEDEC_ATMEL; 14365 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14366 tg3_flag_set(tp, NVRAM_BUFFERED); 14367 break; 14368 case FLASH_VENDOR_ST: 14369 tp->nvram_jedecnum = JEDEC_ST; 14370 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14371 tg3_flag_set(tp, NVRAM_BUFFERED); 14372 break; 14373 case FLASH_VENDOR_SAIFUN: 14374 tp->nvram_jedecnum = JEDEC_SAIFUN; 14375 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14376 break; 14377 case FLASH_VENDOR_SST_SMALL: 14378 case FLASH_VENDOR_SST_LARGE: 14379 tp->nvram_jedecnum = JEDEC_SST; 14380 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14381 break; 14382 } 14383 } else { 14384 tp->nvram_jedecnum = JEDEC_ATMEL; 14385 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14386 tg3_flag_set(tp, NVRAM_BUFFERED); 14387 } 14388 } 14389 14390 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14391 { 14392 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14393 case FLASH_5752PAGE_SIZE_256: 14394 tp->nvram_pagesize = 256; 14395 break; 14396 case FLASH_5752PAGE_SIZE_512: 14397 tp->nvram_pagesize = 512; 14398 break; 14399 case FLASH_5752PAGE_SIZE_1K: 14400 tp->nvram_pagesize = 1024; 14401 break; 14402 case FLASH_5752PAGE_SIZE_2K: 14403 tp->nvram_pagesize = 2048; 14404 break; 14405 case FLASH_5752PAGE_SIZE_4K: 14406 tp->nvram_pagesize = 4096; 14407 break; 14408 case FLASH_5752PAGE_SIZE_264: 14409 tp->nvram_pagesize = 264; 14410 break; 14411 case FLASH_5752PAGE_SIZE_528: 14412 tp->nvram_pagesize = 528; 14413 break; 14414 } 14415 } 14416 14417 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14418 { 14419 u32 nvcfg1; 14420 14421 nvcfg1 = tr32(NVRAM_CFG1); 14422 14423 /* NVRAM protection for TPM */ 14424 if (nvcfg1 & (1 << 27)) 14425 tg3_flag_set(tp, PROTECTED_NVRAM); 14426 14427 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14428 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14429 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14430 tp->nvram_jedecnum = JEDEC_ATMEL; 14431 tg3_flag_set(tp, NVRAM_BUFFERED); 14432 break; 14433 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14434 tp->nvram_jedecnum = JEDEC_ATMEL; 14435 tg3_flag_set(tp, NVRAM_BUFFERED); 14436 tg3_flag_set(tp, FLASH); 14437 break; 14438 case FLASH_5752VENDOR_ST_M45PE10: 14439 case FLASH_5752VENDOR_ST_M45PE20: 14440 case FLASH_5752VENDOR_ST_M45PE40: 14441 tp->nvram_jedecnum = JEDEC_ST; 14442 tg3_flag_set(tp, NVRAM_BUFFERED); 14443 tg3_flag_set(tp, FLASH); 14444 break; 14445 } 14446 14447 if (tg3_flag(tp, FLASH)) { 14448 tg3_nvram_get_pagesize(tp, nvcfg1); 14449 } else { 14450 /* For eeprom, set pagesize to maximum eeprom size */ 14451 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14452 14453 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14454 tw32(NVRAM_CFG1, nvcfg1); 14455 } 14456 } 14457 14458 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14459 { 14460 u32 nvcfg1, protect = 0; 14461 14462 nvcfg1 = tr32(NVRAM_CFG1); 14463 14464 /* NVRAM protection for TPM */ 14465 if (nvcfg1 & (1 << 27)) { 14466 tg3_flag_set(tp, PROTECTED_NVRAM); 14467 protect = 1; 14468 } 14469 14470 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14471 switch (nvcfg1) { 14472 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14473 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14474 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14475 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14476 tp->nvram_jedecnum = JEDEC_ATMEL; 14477 tg3_flag_set(tp, NVRAM_BUFFERED); 14478 tg3_flag_set(tp, FLASH); 14479 tp->nvram_pagesize = 264; 14480 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14481 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14482 tp->nvram_size = (protect ? 0x3e200 : 14483 TG3_NVRAM_SIZE_512KB); 14484 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14485 tp->nvram_size = (protect ? 0x1f200 : 14486 TG3_NVRAM_SIZE_256KB); 14487 else 14488 tp->nvram_size = (protect ? 0x1f200 : 14489 TG3_NVRAM_SIZE_128KB); 14490 break; 14491 case FLASH_5752VENDOR_ST_M45PE10: 14492 case FLASH_5752VENDOR_ST_M45PE20: 14493 case FLASH_5752VENDOR_ST_M45PE40: 14494 tp->nvram_jedecnum = JEDEC_ST; 14495 tg3_flag_set(tp, NVRAM_BUFFERED); 14496 tg3_flag_set(tp, FLASH); 14497 tp->nvram_pagesize = 256; 14498 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14499 tp->nvram_size = (protect ? 14500 TG3_NVRAM_SIZE_64KB : 14501 TG3_NVRAM_SIZE_128KB); 14502 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14503 tp->nvram_size = (protect ? 14504 TG3_NVRAM_SIZE_64KB : 14505 TG3_NVRAM_SIZE_256KB); 14506 else 14507 tp->nvram_size = (protect ? 14508 TG3_NVRAM_SIZE_128KB : 14509 TG3_NVRAM_SIZE_512KB); 14510 break; 14511 } 14512 } 14513 14514 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14515 { 14516 u32 nvcfg1; 14517 14518 nvcfg1 = tr32(NVRAM_CFG1); 14519 14520 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14521 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14522 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14523 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14524 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14525 tp->nvram_jedecnum = JEDEC_ATMEL; 14526 tg3_flag_set(tp, NVRAM_BUFFERED); 14527 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14528 14529 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14530 tw32(NVRAM_CFG1, nvcfg1); 14531 break; 14532 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14533 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14534 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14535 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14536 tp->nvram_jedecnum = JEDEC_ATMEL; 14537 tg3_flag_set(tp, NVRAM_BUFFERED); 14538 tg3_flag_set(tp, FLASH); 14539 tp->nvram_pagesize = 264; 14540 break; 14541 case FLASH_5752VENDOR_ST_M45PE10: 14542 case FLASH_5752VENDOR_ST_M45PE20: 14543 case FLASH_5752VENDOR_ST_M45PE40: 14544 tp->nvram_jedecnum = JEDEC_ST; 14545 tg3_flag_set(tp, NVRAM_BUFFERED); 14546 tg3_flag_set(tp, FLASH); 14547 tp->nvram_pagesize = 256; 14548 break; 14549 } 14550 } 14551 14552 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14553 { 14554 u32 nvcfg1, protect = 0; 14555 14556 nvcfg1 = tr32(NVRAM_CFG1); 14557 14558 /* NVRAM protection for TPM */ 14559 if (nvcfg1 & (1 << 27)) { 14560 tg3_flag_set(tp, PROTECTED_NVRAM); 14561 protect = 1; 14562 } 14563 14564 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14565 switch (nvcfg1) { 14566 case FLASH_5761VENDOR_ATMEL_ADB021D: 14567 case FLASH_5761VENDOR_ATMEL_ADB041D: 14568 case FLASH_5761VENDOR_ATMEL_ADB081D: 14569 case FLASH_5761VENDOR_ATMEL_ADB161D: 14570 case FLASH_5761VENDOR_ATMEL_MDB021D: 14571 case FLASH_5761VENDOR_ATMEL_MDB041D: 14572 case FLASH_5761VENDOR_ATMEL_MDB081D: 14573 case FLASH_5761VENDOR_ATMEL_MDB161D: 14574 tp->nvram_jedecnum = JEDEC_ATMEL; 14575 tg3_flag_set(tp, NVRAM_BUFFERED); 14576 tg3_flag_set(tp, FLASH); 14577 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14578 tp->nvram_pagesize = 256; 14579 break; 14580 case FLASH_5761VENDOR_ST_A_M45PE20: 14581 case FLASH_5761VENDOR_ST_A_M45PE40: 14582 case FLASH_5761VENDOR_ST_A_M45PE80: 14583 case FLASH_5761VENDOR_ST_A_M45PE16: 14584 case FLASH_5761VENDOR_ST_M_M45PE20: 14585 case FLASH_5761VENDOR_ST_M_M45PE40: 14586 case FLASH_5761VENDOR_ST_M_M45PE80: 14587 case FLASH_5761VENDOR_ST_M_M45PE16: 14588 tp->nvram_jedecnum = JEDEC_ST; 14589 tg3_flag_set(tp, NVRAM_BUFFERED); 14590 tg3_flag_set(tp, FLASH); 14591 tp->nvram_pagesize = 256; 14592 break; 14593 } 14594 14595 if (protect) { 14596 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14597 } else { 14598 switch (nvcfg1) { 14599 case FLASH_5761VENDOR_ATMEL_ADB161D: 14600 case FLASH_5761VENDOR_ATMEL_MDB161D: 14601 case FLASH_5761VENDOR_ST_A_M45PE16: 14602 case FLASH_5761VENDOR_ST_M_M45PE16: 14603 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14604 break; 14605 case FLASH_5761VENDOR_ATMEL_ADB081D: 14606 case FLASH_5761VENDOR_ATMEL_MDB081D: 14607 case FLASH_5761VENDOR_ST_A_M45PE80: 14608 case FLASH_5761VENDOR_ST_M_M45PE80: 14609 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14610 break; 14611 case FLASH_5761VENDOR_ATMEL_ADB041D: 14612 case FLASH_5761VENDOR_ATMEL_MDB041D: 14613 case FLASH_5761VENDOR_ST_A_M45PE40: 14614 case FLASH_5761VENDOR_ST_M_M45PE40: 14615 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14616 break; 14617 case FLASH_5761VENDOR_ATMEL_ADB021D: 14618 case FLASH_5761VENDOR_ATMEL_MDB021D: 14619 case FLASH_5761VENDOR_ST_A_M45PE20: 14620 case FLASH_5761VENDOR_ST_M_M45PE20: 14621 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14622 break; 14623 } 14624 } 14625 } 14626 14627 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14628 { 14629 tp->nvram_jedecnum = JEDEC_ATMEL; 14630 tg3_flag_set(tp, NVRAM_BUFFERED); 14631 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14632 } 14633 14634 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14635 { 14636 u32 nvcfg1; 14637 14638 nvcfg1 = tr32(NVRAM_CFG1); 14639 14640 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14641 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14642 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14643 tp->nvram_jedecnum = JEDEC_ATMEL; 14644 tg3_flag_set(tp, NVRAM_BUFFERED); 14645 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14646 14647 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14648 tw32(NVRAM_CFG1, nvcfg1); 14649 return; 14650 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14651 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14652 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14653 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14654 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14655 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14656 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14657 tp->nvram_jedecnum = JEDEC_ATMEL; 14658 tg3_flag_set(tp, NVRAM_BUFFERED); 14659 tg3_flag_set(tp, FLASH); 14660 14661 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14662 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14663 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14664 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14665 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14666 break; 14667 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14668 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14669 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14670 break; 14671 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14672 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14673 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14674 break; 14675 } 14676 break; 14677 case FLASH_5752VENDOR_ST_M45PE10: 14678 case FLASH_5752VENDOR_ST_M45PE20: 14679 case FLASH_5752VENDOR_ST_M45PE40: 14680 tp->nvram_jedecnum = JEDEC_ST; 14681 tg3_flag_set(tp, NVRAM_BUFFERED); 14682 tg3_flag_set(tp, FLASH); 14683 14684 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14685 case FLASH_5752VENDOR_ST_M45PE10: 14686 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14687 break; 14688 case FLASH_5752VENDOR_ST_M45PE20: 14689 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14690 break; 14691 case FLASH_5752VENDOR_ST_M45PE40: 14692 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14693 break; 14694 } 14695 break; 14696 default: 14697 tg3_flag_set(tp, NO_NVRAM); 14698 return; 14699 } 14700 14701 tg3_nvram_get_pagesize(tp, nvcfg1); 14702 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14703 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14704 } 14705 14706 14707 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14708 { 14709 u32 nvcfg1; 14710 14711 nvcfg1 = tr32(NVRAM_CFG1); 14712 14713 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14714 case FLASH_5717VENDOR_ATMEL_EEPROM: 14715 case FLASH_5717VENDOR_MICRO_EEPROM: 14716 tp->nvram_jedecnum = JEDEC_ATMEL; 14717 tg3_flag_set(tp, NVRAM_BUFFERED); 14718 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14719 14720 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14721 tw32(NVRAM_CFG1, nvcfg1); 14722 return; 14723 case FLASH_5717VENDOR_ATMEL_MDB011D: 14724 case FLASH_5717VENDOR_ATMEL_ADB011B: 14725 case FLASH_5717VENDOR_ATMEL_ADB011D: 14726 case FLASH_5717VENDOR_ATMEL_MDB021D: 14727 case FLASH_5717VENDOR_ATMEL_ADB021B: 14728 case FLASH_5717VENDOR_ATMEL_ADB021D: 14729 case FLASH_5717VENDOR_ATMEL_45USPT: 14730 tp->nvram_jedecnum = JEDEC_ATMEL; 14731 tg3_flag_set(tp, NVRAM_BUFFERED); 14732 tg3_flag_set(tp, FLASH); 14733 14734 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14735 case FLASH_5717VENDOR_ATMEL_MDB021D: 14736 /* Detect size with tg3_nvram_get_size() */ 14737 break; 14738 case FLASH_5717VENDOR_ATMEL_ADB021B: 14739 case FLASH_5717VENDOR_ATMEL_ADB021D: 14740 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14741 break; 14742 default: 14743 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14744 break; 14745 } 14746 break; 14747 case FLASH_5717VENDOR_ST_M_M25PE10: 14748 case FLASH_5717VENDOR_ST_A_M25PE10: 14749 case FLASH_5717VENDOR_ST_M_M45PE10: 14750 case FLASH_5717VENDOR_ST_A_M45PE10: 14751 case FLASH_5717VENDOR_ST_M_M25PE20: 14752 case FLASH_5717VENDOR_ST_A_M25PE20: 14753 case FLASH_5717VENDOR_ST_M_M45PE20: 14754 case FLASH_5717VENDOR_ST_A_M45PE20: 14755 case FLASH_5717VENDOR_ST_25USPT: 14756 case FLASH_5717VENDOR_ST_45USPT: 14757 tp->nvram_jedecnum = JEDEC_ST; 14758 tg3_flag_set(tp, NVRAM_BUFFERED); 14759 tg3_flag_set(tp, FLASH); 14760 14761 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14762 case FLASH_5717VENDOR_ST_M_M25PE20: 14763 case FLASH_5717VENDOR_ST_M_M45PE20: 14764 /* Detect size with tg3_nvram_get_size() */ 14765 break; 14766 case FLASH_5717VENDOR_ST_A_M25PE20: 14767 case FLASH_5717VENDOR_ST_A_M45PE20: 14768 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14769 break; 14770 default: 14771 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14772 break; 14773 } 14774 break; 14775 default: 14776 tg3_flag_set(tp, NO_NVRAM); 14777 return; 14778 } 14779 14780 tg3_nvram_get_pagesize(tp, nvcfg1); 14781 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14782 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14783 } 14784 14785 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14786 { 14787 u32 nvcfg1, nvmpinstrp, nv_status; 14788 14789 nvcfg1 = tr32(NVRAM_CFG1); 14790 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14791 14792 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14793 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14794 tg3_flag_set(tp, NO_NVRAM); 14795 return; 14796 } 14797 14798 switch (nvmpinstrp) { 14799 case FLASH_5762_MX25L_100: 14800 case FLASH_5762_MX25L_200: 14801 case FLASH_5762_MX25L_400: 14802 case FLASH_5762_MX25L_800: 14803 case FLASH_5762_MX25L_160_320: 14804 tp->nvram_pagesize = 4096; 14805 tp->nvram_jedecnum = JEDEC_MACRONIX; 14806 tg3_flag_set(tp, NVRAM_BUFFERED); 14807 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14808 tg3_flag_set(tp, FLASH); 14809 nv_status = tr32(NVRAM_AUTOSENSE_STATUS); 14810 tp->nvram_size = 14811 (1 << (nv_status >> AUTOSENSE_DEVID & 14812 AUTOSENSE_DEVID_MASK) 14813 << AUTOSENSE_SIZE_IN_MB); 14814 return; 14815 14816 case FLASH_5762_EEPROM_HD: 14817 nvmpinstrp = FLASH_5720_EEPROM_HD; 14818 break; 14819 case FLASH_5762_EEPROM_LD: 14820 nvmpinstrp = FLASH_5720_EEPROM_LD; 14821 break; 14822 case FLASH_5720VENDOR_M_ST_M45PE20: 14823 /* This pinstrap supports multiple sizes, so force it 14824 * to read the actual size from location 0xf0. 14825 */ 14826 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14827 break; 14828 } 14829 } 14830 14831 switch (nvmpinstrp) { 14832 case FLASH_5720_EEPROM_HD: 14833 case FLASH_5720_EEPROM_LD: 14834 tp->nvram_jedecnum = JEDEC_ATMEL; 14835 tg3_flag_set(tp, NVRAM_BUFFERED); 14836 14837 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14838 tw32(NVRAM_CFG1, nvcfg1); 14839 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14840 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14841 else 14842 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14843 return; 14844 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14845 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14846 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14847 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14848 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14849 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14850 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14851 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14852 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14853 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14854 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14855 case FLASH_5720VENDOR_ATMEL_45USPT: 14856 tp->nvram_jedecnum = JEDEC_ATMEL; 14857 tg3_flag_set(tp, NVRAM_BUFFERED); 14858 tg3_flag_set(tp, FLASH); 14859 14860 switch (nvmpinstrp) { 14861 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14862 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14863 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14864 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14865 break; 14866 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14867 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14868 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14869 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14870 break; 14871 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14872 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14873 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14874 break; 14875 default: 14876 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14877 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14878 break; 14879 } 14880 break; 14881 case FLASH_5720VENDOR_M_ST_M25PE10: 14882 case FLASH_5720VENDOR_M_ST_M45PE10: 14883 case FLASH_5720VENDOR_A_ST_M25PE10: 14884 case FLASH_5720VENDOR_A_ST_M45PE10: 14885 case FLASH_5720VENDOR_M_ST_M25PE20: 14886 case FLASH_5720VENDOR_M_ST_M45PE20: 14887 case FLASH_5720VENDOR_A_ST_M25PE20: 14888 case FLASH_5720VENDOR_A_ST_M45PE20: 14889 case FLASH_5720VENDOR_M_ST_M25PE40: 14890 case FLASH_5720VENDOR_M_ST_M45PE40: 14891 case FLASH_5720VENDOR_A_ST_M25PE40: 14892 case FLASH_5720VENDOR_A_ST_M45PE40: 14893 case FLASH_5720VENDOR_M_ST_M25PE80: 14894 case FLASH_5720VENDOR_M_ST_M45PE80: 14895 case FLASH_5720VENDOR_A_ST_M25PE80: 14896 case FLASH_5720VENDOR_A_ST_M45PE80: 14897 case FLASH_5720VENDOR_ST_25USPT: 14898 case FLASH_5720VENDOR_ST_45USPT: 14899 tp->nvram_jedecnum = JEDEC_ST; 14900 tg3_flag_set(tp, NVRAM_BUFFERED); 14901 tg3_flag_set(tp, FLASH); 14902 14903 switch (nvmpinstrp) { 14904 case FLASH_5720VENDOR_M_ST_M25PE20: 14905 case FLASH_5720VENDOR_M_ST_M45PE20: 14906 case FLASH_5720VENDOR_A_ST_M25PE20: 14907 case FLASH_5720VENDOR_A_ST_M45PE20: 14908 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14909 break; 14910 case FLASH_5720VENDOR_M_ST_M25PE40: 14911 case FLASH_5720VENDOR_M_ST_M45PE40: 14912 case FLASH_5720VENDOR_A_ST_M25PE40: 14913 case FLASH_5720VENDOR_A_ST_M45PE40: 14914 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14915 break; 14916 case FLASH_5720VENDOR_M_ST_M25PE80: 14917 case FLASH_5720VENDOR_M_ST_M45PE80: 14918 case FLASH_5720VENDOR_A_ST_M25PE80: 14919 case FLASH_5720VENDOR_A_ST_M45PE80: 14920 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14921 break; 14922 default: 14923 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14924 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14925 break; 14926 } 14927 break; 14928 default: 14929 tg3_flag_set(tp, NO_NVRAM); 14930 return; 14931 } 14932 14933 tg3_nvram_get_pagesize(tp, nvcfg1); 14934 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14935 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14936 14937 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14938 u32 val; 14939 14940 if (tg3_nvram_read(tp, 0, &val)) 14941 return; 14942 14943 if (val != TG3_EEPROM_MAGIC && 14944 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 14945 tg3_flag_set(tp, NO_NVRAM); 14946 } 14947 } 14948 14949 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 14950 static void tg3_nvram_init(struct tg3 *tp) 14951 { 14952 if (tg3_flag(tp, IS_SSB_CORE)) { 14953 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 14954 tg3_flag_clear(tp, NVRAM); 14955 tg3_flag_clear(tp, NVRAM_BUFFERED); 14956 tg3_flag_set(tp, NO_NVRAM); 14957 return; 14958 } 14959 14960 tw32_f(GRC_EEPROM_ADDR, 14961 (EEPROM_ADDR_FSM_RESET | 14962 (EEPROM_DEFAULT_CLOCK_PERIOD << 14963 EEPROM_ADDR_CLKPERD_SHIFT))); 14964 14965 msleep(1); 14966 14967 /* Enable seeprom accesses. */ 14968 tw32_f(GRC_LOCAL_CTRL, 14969 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 14970 udelay(100); 14971 14972 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 14973 tg3_asic_rev(tp) != ASIC_REV_5701) { 14974 tg3_flag_set(tp, NVRAM); 14975 14976 if (tg3_nvram_lock(tp)) { 14977 netdev_warn(tp->dev, 14978 "Cannot get nvram lock, %s failed\n", 14979 __func__); 14980 return; 14981 } 14982 tg3_enable_nvram_access(tp); 14983 14984 tp->nvram_size = 0; 14985 14986 if (tg3_asic_rev(tp) == ASIC_REV_5752) 14987 tg3_get_5752_nvram_info(tp); 14988 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 14989 tg3_get_5755_nvram_info(tp); 14990 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 14991 tg3_asic_rev(tp) == ASIC_REV_5784 || 14992 tg3_asic_rev(tp) == ASIC_REV_5785) 14993 tg3_get_5787_nvram_info(tp); 14994 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 14995 tg3_get_5761_nvram_info(tp); 14996 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 14997 tg3_get_5906_nvram_info(tp); 14998 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 14999 tg3_flag(tp, 57765_CLASS)) 15000 tg3_get_57780_nvram_info(tp); 15001 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15002 tg3_asic_rev(tp) == ASIC_REV_5719) 15003 tg3_get_5717_nvram_info(tp); 15004 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 15005 tg3_asic_rev(tp) == ASIC_REV_5762) 15006 tg3_get_5720_nvram_info(tp); 15007 else 15008 tg3_get_nvram_info(tp); 15009 15010 if (tp->nvram_size == 0) 15011 tg3_get_nvram_size(tp); 15012 15013 tg3_disable_nvram_access(tp); 15014 tg3_nvram_unlock(tp); 15015 15016 } else { 15017 tg3_flag_clear(tp, NVRAM); 15018 tg3_flag_clear(tp, NVRAM_BUFFERED); 15019 15020 tg3_get_eeprom_size(tp); 15021 } 15022 } 15023 15024 struct subsys_tbl_ent { 15025 u16 subsys_vendor, subsys_devid; 15026 u32 phy_id; 15027 }; 15028 15029 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 15030 /* Broadcom boards. */ 15031 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15032 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 15033 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15034 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 15035 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15036 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 15037 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15038 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 15039 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15040 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 15041 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15042 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15043 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15044 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15045 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15046 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15047 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15048 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15049 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15050 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15051 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15052 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15053 15054 /* 3com boards. */ 15055 { TG3PCI_SUBVENDOR_ID_3COM, 15056 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15057 { TG3PCI_SUBVENDOR_ID_3COM, 15058 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15059 { TG3PCI_SUBVENDOR_ID_3COM, 15060 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15061 { TG3PCI_SUBVENDOR_ID_3COM, 15062 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15063 { TG3PCI_SUBVENDOR_ID_3COM, 15064 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15065 15066 /* DELL boards. */ 15067 { TG3PCI_SUBVENDOR_ID_DELL, 15068 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15069 { TG3PCI_SUBVENDOR_ID_DELL, 15070 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15071 { TG3PCI_SUBVENDOR_ID_DELL, 15072 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15073 { TG3PCI_SUBVENDOR_ID_DELL, 15074 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15075 15076 /* Compaq boards. */ 15077 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15078 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15079 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15080 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15081 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15082 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15083 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15084 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15085 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15086 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15087 15088 /* IBM boards. */ 15089 { TG3PCI_SUBVENDOR_ID_IBM, 15090 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15091 }; 15092 15093 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15094 { 15095 int i; 15096 15097 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15098 if ((subsys_id_to_phy_id[i].subsys_vendor == 15099 tp->pdev->subsystem_vendor) && 15100 (subsys_id_to_phy_id[i].subsys_devid == 15101 tp->pdev->subsystem_device)) 15102 return &subsys_id_to_phy_id[i]; 15103 } 15104 return NULL; 15105 } 15106 15107 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15108 { 15109 u32 val; 15110 15111 tp->phy_id = TG3_PHY_ID_INVALID; 15112 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15113 15114 /* Assume an onboard device and WOL capable by default. */ 15115 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15116 tg3_flag_set(tp, WOL_CAP); 15117 15118 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15119 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15120 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15121 tg3_flag_set(tp, IS_NIC); 15122 } 15123 val = tr32(VCPU_CFGSHDW); 15124 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15125 tg3_flag_set(tp, ASPM_WORKAROUND); 15126 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15127 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15128 tg3_flag_set(tp, WOL_ENABLE); 15129 device_set_wakeup_enable(&tp->pdev->dev, true); 15130 } 15131 goto done; 15132 } 15133 15134 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15135 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15136 u32 nic_cfg, led_cfg; 15137 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15138 u32 nic_phy_id, ver, eeprom_phy_id; 15139 int eeprom_phy_serdes = 0; 15140 15141 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15142 tp->nic_sram_data_cfg = nic_cfg; 15143 15144 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15145 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15146 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15147 tg3_asic_rev(tp) != ASIC_REV_5701 && 15148 tg3_asic_rev(tp) != ASIC_REV_5703 && 15149 (ver > 0) && (ver < 0x100)) 15150 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15151 15152 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15153 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15154 15155 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15156 tg3_asic_rev(tp) == ASIC_REV_5719 || 15157 tg3_asic_rev(tp) == ASIC_REV_5720) 15158 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15159 15160 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15161 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15162 eeprom_phy_serdes = 1; 15163 15164 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15165 if (nic_phy_id != 0) { 15166 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15167 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15168 15169 eeprom_phy_id = (id1 >> 16) << 10; 15170 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15171 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15172 } else 15173 eeprom_phy_id = 0; 15174 15175 tp->phy_id = eeprom_phy_id; 15176 if (eeprom_phy_serdes) { 15177 if (!tg3_flag(tp, 5705_PLUS)) 15178 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15179 else 15180 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15181 } 15182 15183 if (tg3_flag(tp, 5750_PLUS)) 15184 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15185 SHASTA_EXT_LED_MODE_MASK); 15186 else 15187 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15188 15189 switch (led_cfg) { 15190 default: 15191 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15192 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15193 break; 15194 15195 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15196 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15197 break; 15198 15199 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15200 tp->led_ctrl = LED_CTRL_MODE_MAC; 15201 15202 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15203 * read on some older 5700/5701 bootcode. 15204 */ 15205 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15206 tg3_asic_rev(tp) == ASIC_REV_5701) 15207 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15208 15209 break; 15210 15211 case SHASTA_EXT_LED_SHARED: 15212 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15213 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15214 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15215 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15216 LED_CTRL_MODE_PHY_2); 15217 15218 if (tg3_flag(tp, 5717_PLUS) || 15219 tg3_asic_rev(tp) == ASIC_REV_5762) 15220 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15221 LED_CTRL_BLINK_RATE_MASK; 15222 15223 break; 15224 15225 case SHASTA_EXT_LED_MAC: 15226 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15227 break; 15228 15229 case SHASTA_EXT_LED_COMBO: 15230 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15231 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15232 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15233 LED_CTRL_MODE_PHY_2); 15234 break; 15235 15236 } 15237 15238 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15239 tg3_asic_rev(tp) == ASIC_REV_5701) && 15240 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15241 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15242 15243 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15244 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15245 15246 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15247 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15248 if ((tp->pdev->subsystem_vendor == 15249 PCI_VENDOR_ID_ARIMA) && 15250 (tp->pdev->subsystem_device == 0x205a || 15251 tp->pdev->subsystem_device == 0x2063)) 15252 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15253 } else { 15254 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15255 tg3_flag_set(tp, IS_NIC); 15256 } 15257 15258 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15259 tg3_flag_set(tp, ENABLE_ASF); 15260 if (tg3_flag(tp, 5750_PLUS)) 15261 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15262 } 15263 15264 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15265 tg3_flag(tp, 5750_PLUS)) 15266 tg3_flag_set(tp, ENABLE_APE); 15267 15268 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15269 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15270 tg3_flag_clear(tp, WOL_CAP); 15271 15272 if (tg3_flag(tp, WOL_CAP) && 15273 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15274 tg3_flag_set(tp, WOL_ENABLE); 15275 device_set_wakeup_enable(&tp->pdev->dev, true); 15276 } 15277 15278 if (cfg2 & (1 << 17)) 15279 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15280 15281 /* serdes signal pre-emphasis in register 0x590 set by */ 15282 /* bootcode if bit 18 is set */ 15283 if (cfg2 & (1 << 18)) 15284 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15285 15286 if ((tg3_flag(tp, 57765_PLUS) || 15287 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15288 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15289 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15290 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15291 15292 if (tg3_flag(tp, PCI_EXPRESS)) { 15293 u32 cfg3; 15294 15295 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15296 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15297 !tg3_flag(tp, 57765_PLUS) && 15298 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15299 tg3_flag_set(tp, ASPM_WORKAROUND); 15300 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15301 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15302 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15303 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15304 } 15305 15306 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15307 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15308 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15309 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15310 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15311 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15312 15313 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15314 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15315 } 15316 done: 15317 if (tg3_flag(tp, WOL_CAP)) 15318 device_set_wakeup_enable(&tp->pdev->dev, 15319 tg3_flag(tp, WOL_ENABLE)); 15320 else 15321 device_set_wakeup_capable(&tp->pdev->dev, false); 15322 } 15323 15324 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15325 { 15326 int i, err; 15327 u32 val2, off = offset * 8; 15328 15329 err = tg3_nvram_lock(tp); 15330 if (err) 15331 return err; 15332 15333 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15334 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15335 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15336 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15337 udelay(10); 15338 15339 for (i = 0; i < 100; i++) { 15340 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15341 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15342 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15343 break; 15344 } 15345 udelay(10); 15346 } 15347 15348 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15349 15350 tg3_nvram_unlock(tp); 15351 if (val2 & APE_OTP_STATUS_CMD_DONE) 15352 return 0; 15353 15354 return -EBUSY; 15355 } 15356 15357 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15358 { 15359 int i; 15360 u32 val; 15361 15362 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15363 tw32(OTP_CTRL, cmd); 15364 15365 /* Wait for up to 1 ms for command to execute. */ 15366 for (i = 0; i < 100; i++) { 15367 val = tr32(OTP_STATUS); 15368 if (val & OTP_STATUS_CMD_DONE) 15369 break; 15370 udelay(10); 15371 } 15372 15373 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15374 } 15375 15376 /* Read the gphy configuration from the OTP region of the chip. The gphy 15377 * configuration is a 32-bit value that straddles the alignment boundary. 15378 * We do two 32-bit reads and then shift and merge the results. 15379 */ 15380 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15381 { 15382 u32 bhalf_otp, thalf_otp; 15383 15384 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15385 15386 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15387 return 0; 15388 15389 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15390 15391 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15392 return 0; 15393 15394 thalf_otp = tr32(OTP_READ_DATA); 15395 15396 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15397 15398 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15399 return 0; 15400 15401 bhalf_otp = tr32(OTP_READ_DATA); 15402 15403 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15404 } 15405 15406 static void tg3_phy_init_link_config(struct tg3 *tp) 15407 { 15408 u32 adv = ADVERTISED_Autoneg; 15409 15410 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15411 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15412 adv |= ADVERTISED_1000baseT_Half; 15413 adv |= ADVERTISED_1000baseT_Full; 15414 } 15415 15416 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15417 adv |= ADVERTISED_100baseT_Half | 15418 ADVERTISED_100baseT_Full | 15419 ADVERTISED_10baseT_Half | 15420 ADVERTISED_10baseT_Full | 15421 ADVERTISED_TP; 15422 else 15423 adv |= ADVERTISED_FIBRE; 15424 15425 tp->link_config.advertising = adv; 15426 tp->link_config.speed = SPEED_UNKNOWN; 15427 tp->link_config.duplex = DUPLEX_UNKNOWN; 15428 tp->link_config.autoneg = AUTONEG_ENABLE; 15429 tp->link_config.active_speed = SPEED_UNKNOWN; 15430 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15431 15432 tp->old_link = -1; 15433 } 15434 15435 static int tg3_phy_probe(struct tg3 *tp) 15436 { 15437 u32 hw_phy_id_1, hw_phy_id_2; 15438 u32 hw_phy_id, hw_phy_id_masked; 15439 int err; 15440 15441 /* flow control autonegotiation is default behavior */ 15442 tg3_flag_set(tp, PAUSE_AUTONEG); 15443 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15444 15445 if (tg3_flag(tp, ENABLE_APE)) { 15446 switch (tp->pci_fn) { 15447 case 0: 15448 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15449 break; 15450 case 1: 15451 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15452 break; 15453 case 2: 15454 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15455 break; 15456 case 3: 15457 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15458 break; 15459 } 15460 } 15461 15462 if (!tg3_flag(tp, ENABLE_ASF) && 15463 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15464 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15465 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15466 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15467 15468 if (tg3_flag(tp, USE_PHYLIB)) 15469 return tg3_phy_init(tp); 15470 15471 /* Reading the PHY ID register can conflict with ASF 15472 * firmware access to the PHY hardware. 15473 */ 15474 err = 0; 15475 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15476 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15477 } else { 15478 /* Now read the physical PHY_ID from the chip and verify 15479 * that it is sane. If it doesn't look good, we fall back 15480 * to either the hard-coded table based PHY_ID and failing 15481 * that the value found in the eeprom area. 15482 */ 15483 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15484 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15485 15486 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15487 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15488 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15489 15490 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15491 } 15492 15493 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15494 tp->phy_id = hw_phy_id; 15495 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15496 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15497 else 15498 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15499 } else { 15500 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15501 /* Do nothing, phy ID already set up in 15502 * tg3_get_eeprom_hw_cfg(). 15503 */ 15504 } else { 15505 struct subsys_tbl_ent *p; 15506 15507 /* No eeprom signature? Try the hardcoded 15508 * subsys device table. 15509 */ 15510 p = tg3_lookup_by_subsys(tp); 15511 if (p) { 15512 tp->phy_id = p->phy_id; 15513 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15514 /* For now we saw the IDs 0xbc050cd0, 15515 * 0xbc050f80 and 0xbc050c30 on devices 15516 * connected to an BCM4785 and there are 15517 * probably more. Just assume that the phy is 15518 * supported when it is connected to a SSB core 15519 * for now. 15520 */ 15521 return -ENODEV; 15522 } 15523 15524 if (!tp->phy_id || 15525 tp->phy_id == TG3_PHY_ID_BCM8002) 15526 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15527 } 15528 } 15529 15530 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15531 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15532 tg3_asic_rev(tp) == ASIC_REV_5720 || 15533 tg3_asic_rev(tp) == ASIC_REV_57766 || 15534 tg3_asic_rev(tp) == ASIC_REV_5762 || 15535 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15536 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15537 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15538 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15539 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15540 15541 tp->eee.supported = SUPPORTED_100baseT_Full | 15542 SUPPORTED_1000baseT_Full; 15543 tp->eee.advertised = ADVERTISED_100baseT_Full | 15544 ADVERTISED_1000baseT_Full; 15545 tp->eee.eee_enabled = 1; 15546 tp->eee.tx_lpi_enabled = 1; 15547 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15548 } 15549 15550 tg3_phy_init_link_config(tp); 15551 15552 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15553 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15554 !tg3_flag(tp, ENABLE_APE) && 15555 !tg3_flag(tp, ENABLE_ASF)) { 15556 u32 bmsr, dummy; 15557 15558 tg3_readphy(tp, MII_BMSR, &bmsr); 15559 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15560 (bmsr & BMSR_LSTATUS)) 15561 goto skip_phy_reset; 15562 15563 err = tg3_phy_reset(tp); 15564 if (err) 15565 return err; 15566 15567 tg3_phy_set_wirespeed(tp); 15568 15569 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15570 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15571 tp->link_config.flowctrl); 15572 15573 tg3_writephy(tp, MII_BMCR, 15574 BMCR_ANENABLE | BMCR_ANRESTART); 15575 } 15576 } 15577 15578 skip_phy_reset: 15579 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15580 err = tg3_init_5401phy_dsp(tp); 15581 if (err) 15582 return err; 15583 15584 err = tg3_init_5401phy_dsp(tp); 15585 } 15586 15587 return err; 15588 } 15589 15590 static void tg3_read_vpd(struct tg3 *tp) 15591 { 15592 u8 *vpd_data; 15593 unsigned int len, vpdlen; 15594 int i; 15595 15596 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15597 if (!vpd_data) 15598 goto out_no_vpd; 15599 15600 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15601 PCI_VPD_RO_KEYWORD_MFR_ID, &len); 15602 if (i < 0) 15603 goto partno; 15604 15605 if (len != 4 || memcmp(vpd_data + i, "1028", 4)) 15606 goto partno; 15607 15608 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15609 PCI_VPD_RO_KEYWORD_VENDOR0, &len); 15610 if (i < 0) 15611 goto partno; 15612 15613 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15614 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i); 15615 15616 partno: 15617 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15618 PCI_VPD_RO_KEYWORD_PARTNO, &len); 15619 if (i < 0) 15620 goto out_not_found; 15621 15622 if (len > TG3_BPN_SIZE) 15623 goto out_not_found; 15624 15625 memcpy(tp->board_part_number, &vpd_data[i], len); 15626 15627 out_not_found: 15628 kfree(vpd_data); 15629 if (tp->board_part_number[0]) 15630 return; 15631 15632 out_no_vpd: 15633 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15634 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15635 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15636 strcpy(tp->board_part_number, "BCM5717"); 15637 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15638 strcpy(tp->board_part_number, "BCM5718"); 15639 else 15640 goto nomatch; 15641 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15642 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15643 strcpy(tp->board_part_number, "BCM57780"); 15644 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15645 strcpy(tp->board_part_number, "BCM57760"); 15646 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15647 strcpy(tp->board_part_number, "BCM57790"); 15648 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15649 strcpy(tp->board_part_number, "BCM57788"); 15650 else 15651 goto nomatch; 15652 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15653 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15654 strcpy(tp->board_part_number, "BCM57761"); 15655 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15656 strcpy(tp->board_part_number, "BCM57765"); 15657 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15658 strcpy(tp->board_part_number, "BCM57781"); 15659 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15660 strcpy(tp->board_part_number, "BCM57785"); 15661 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15662 strcpy(tp->board_part_number, "BCM57791"); 15663 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15664 strcpy(tp->board_part_number, "BCM57795"); 15665 else 15666 goto nomatch; 15667 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15668 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15669 strcpy(tp->board_part_number, "BCM57762"); 15670 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15671 strcpy(tp->board_part_number, "BCM57766"); 15672 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15673 strcpy(tp->board_part_number, "BCM57782"); 15674 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15675 strcpy(tp->board_part_number, "BCM57786"); 15676 else 15677 goto nomatch; 15678 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15679 strcpy(tp->board_part_number, "BCM95906"); 15680 } else { 15681 nomatch: 15682 strcpy(tp->board_part_number, "none"); 15683 } 15684 } 15685 15686 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15687 { 15688 u32 val; 15689 15690 if (tg3_nvram_read(tp, offset, &val) || 15691 (val & 0xfc000000) != 0x0c000000 || 15692 tg3_nvram_read(tp, offset + 4, &val) || 15693 val != 0) 15694 return 0; 15695 15696 return 1; 15697 } 15698 15699 static void tg3_read_bc_ver(struct tg3 *tp) 15700 { 15701 u32 val, offset, start, ver_offset; 15702 int i, dst_off; 15703 bool newver = false; 15704 15705 if (tg3_nvram_read(tp, 0xc, &offset) || 15706 tg3_nvram_read(tp, 0x4, &start)) 15707 return; 15708 15709 offset = tg3_nvram_logical_addr(tp, offset); 15710 15711 if (tg3_nvram_read(tp, offset, &val)) 15712 return; 15713 15714 if ((val & 0xfc000000) == 0x0c000000) { 15715 if (tg3_nvram_read(tp, offset + 4, &val)) 15716 return; 15717 15718 if (val == 0) 15719 newver = true; 15720 } 15721 15722 dst_off = strlen(tp->fw_ver); 15723 15724 if (newver) { 15725 if (TG3_VER_SIZE - dst_off < 16 || 15726 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15727 return; 15728 15729 offset = offset + ver_offset - start; 15730 for (i = 0; i < 16; i += 4) { 15731 __be32 v; 15732 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15733 return; 15734 15735 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15736 } 15737 } else { 15738 u32 major, minor; 15739 15740 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15741 return; 15742 15743 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15744 TG3_NVM_BCVER_MAJSFT; 15745 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15746 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15747 "v%d.%02d", major, minor); 15748 } 15749 } 15750 15751 static void tg3_read_hwsb_ver(struct tg3 *tp) 15752 { 15753 u32 val, major, minor; 15754 15755 /* Use native endian representation */ 15756 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15757 return; 15758 15759 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15760 TG3_NVM_HWSB_CFG1_MAJSFT; 15761 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15762 TG3_NVM_HWSB_CFG1_MINSFT; 15763 15764 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15765 } 15766 15767 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15768 { 15769 u32 offset, major, minor, build; 15770 15771 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15772 15773 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15774 return; 15775 15776 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15777 case TG3_EEPROM_SB_REVISION_0: 15778 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15779 break; 15780 case TG3_EEPROM_SB_REVISION_2: 15781 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15782 break; 15783 case TG3_EEPROM_SB_REVISION_3: 15784 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15785 break; 15786 case TG3_EEPROM_SB_REVISION_4: 15787 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15788 break; 15789 case TG3_EEPROM_SB_REVISION_5: 15790 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15791 break; 15792 case TG3_EEPROM_SB_REVISION_6: 15793 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15794 break; 15795 default: 15796 return; 15797 } 15798 15799 if (tg3_nvram_read(tp, offset, &val)) 15800 return; 15801 15802 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15803 TG3_EEPROM_SB_EDH_BLD_SHFT; 15804 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15805 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15806 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15807 15808 if (minor > 99 || build > 26) 15809 return; 15810 15811 offset = strlen(tp->fw_ver); 15812 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15813 " v%d.%02d", major, minor); 15814 15815 if (build > 0) { 15816 offset = strlen(tp->fw_ver); 15817 if (offset < TG3_VER_SIZE - 1) 15818 tp->fw_ver[offset] = 'a' + build - 1; 15819 } 15820 } 15821 15822 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15823 { 15824 u32 val, offset, start; 15825 int i, vlen; 15826 15827 for (offset = TG3_NVM_DIR_START; 15828 offset < TG3_NVM_DIR_END; 15829 offset += TG3_NVM_DIRENT_SIZE) { 15830 if (tg3_nvram_read(tp, offset, &val)) 15831 return; 15832 15833 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15834 break; 15835 } 15836 15837 if (offset == TG3_NVM_DIR_END) 15838 return; 15839 15840 if (!tg3_flag(tp, 5705_PLUS)) 15841 start = 0x08000000; 15842 else if (tg3_nvram_read(tp, offset - 4, &start)) 15843 return; 15844 15845 if (tg3_nvram_read(tp, offset + 4, &offset) || 15846 !tg3_fw_img_is_valid(tp, offset) || 15847 tg3_nvram_read(tp, offset + 8, &val)) 15848 return; 15849 15850 offset += val - start; 15851 15852 vlen = strlen(tp->fw_ver); 15853 15854 tp->fw_ver[vlen++] = ','; 15855 tp->fw_ver[vlen++] = ' '; 15856 15857 for (i = 0; i < 4; i++) { 15858 __be32 v; 15859 if (tg3_nvram_read_be32(tp, offset, &v)) 15860 return; 15861 15862 offset += sizeof(v); 15863 15864 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15865 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15866 break; 15867 } 15868 15869 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15870 vlen += sizeof(v); 15871 } 15872 } 15873 15874 static void tg3_probe_ncsi(struct tg3 *tp) 15875 { 15876 u32 apedata; 15877 15878 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15879 if (apedata != APE_SEG_SIG_MAGIC) 15880 return; 15881 15882 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15883 if (!(apedata & APE_FW_STATUS_READY)) 15884 return; 15885 15886 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15887 tg3_flag_set(tp, APE_HAS_NCSI); 15888 } 15889 15890 static void tg3_read_dash_ver(struct tg3 *tp) 15891 { 15892 int vlen; 15893 u32 apedata; 15894 char *fwtype; 15895 15896 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 15897 15898 if (tg3_flag(tp, APE_HAS_NCSI)) 15899 fwtype = "NCSI"; 15900 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 15901 fwtype = "SMASH"; 15902 else 15903 fwtype = "DASH"; 15904 15905 vlen = strlen(tp->fw_ver); 15906 15907 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 15908 fwtype, 15909 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 15910 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 15911 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 15912 (apedata & APE_FW_VERSION_BLDMSK)); 15913 } 15914 15915 static void tg3_read_otp_ver(struct tg3 *tp) 15916 { 15917 u32 val, val2; 15918 15919 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15920 return; 15921 15922 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 15923 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 15924 TG3_OTP_MAGIC0_VALID(val)) { 15925 u64 val64 = (u64) val << 32 | val2; 15926 u32 ver = 0; 15927 int i, vlen; 15928 15929 for (i = 0; i < 7; i++) { 15930 if ((val64 & 0xff) == 0) 15931 break; 15932 ver = val64 & 0xff; 15933 val64 >>= 8; 15934 } 15935 vlen = strlen(tp->fw_ver); 15936 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 15937 } 15938 } 15939 15940 static void tg3_read_fw_ver(struct tg3 *tp) 15941 { 15942 u32 val; 15943 bool vpd_vers = false; 15944 15945 if (tp->fw_ver[0] != 0) 15946 vpd_vers = true; 15947 15948 if (tg3_flag(tp, NO_NVRAM)) { 15949 strcat(tp->fw_ver, "sb"); 15950 tg3_read_otp_ver(tp); 15951 return; 15952 } 15953 15954 if (tg3_nvram_read(tp, 0, &val)) 15955 return; 15956 15957 if (val == TG3_EEPROM_MAGIC) 15958 tg3_read_bc_ver(tp); 15959 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 15960 tg3_read_sb_ver(tp, val); 15961 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 15962 tg3_read_hwsb_ver(tp); 15963 15964 if (tg3_flag(tp, ENABLE_ASF)) { 15965 if (tg3_flag(tp, ENABLE_APE)) { 15966 tg3_probe_ncsi(tp); 15967 if (!vpd_vers) 15968 tg3_read_dash_ver(tp); 15969 } else if (!vpd_vers) { 15970 tg3_read_mgmtfw_ver(tp); 15971 } 15972 } 15973 15974 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 15975 } 15976 15977 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 15978 { 15979 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 15980 return TG3_RX_RET_MAX_SIZE_5717; 15981 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 15982 return TG3_RX_RET_MAX_SIZE_5700; 15983 else 15984 return TG3_RX_RET_MAX_SIZE_5705; 15985 } 15986 15987 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 15988 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 15989 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 15990 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 15991 { }, 15992 }; 15993 15994 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 15995 { 15996 struct pci_dev *peer; 15997 unsigned int func, devnr = tp->pdev->devfn & ~7; 15998 15999 for (func = 0; func < 8; func++) { 16000 peer = pci_get_slot(tp->pdev->bus, devnr | func); 16001 if (peer && peer != tp->pdev) 16002 break; 16003 pci_dev_put(peer); 16004 } 16005 /* 5704 can be configured in single-port mode, set peer to 16006 * tp->pdev in that case. 16007 */ 16008 if (!peer) { 16009 peer = tp->pdev; 16010 return peer; 16011 } 16012 16013 /* 16014 * We don't need to keep the refcount elevated; there's no way 16015 * to remove one half of this device without removing the other 16016 */ 16017 pci_dev_put(peer); 16018 16019 return peer; 16020 } 16021 16022 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16023 { 16024 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16025 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16026 u32 reg; 16027 16028 /* All devices that use the alternate 16029 * ASIC REV location have a CPMU. 16030 */ 16031 tg3_flag_set(tp, CPMU_PRESENT); 16032 16033 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16043 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16044 reg = TG3PCI_GEN2_PRODID_ASICREV; 16045 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16053 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16054 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16055 reg = TG3PCI_GEN15_PRODID_ASICREV; 16056 else 16057 reg = TG3PCI_PRODID_ASICREV; 16058 16059 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16060 } 16061 16062 /* Wrong chip ID in 5752 A0. This code can be removed later 16063 * as A0 is not in production. 16064 */ 16065 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16066 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16067 16068 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16069 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16070 16071 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16072 tg3_asic_rev(tp) == ASIC_REV_5719 || 16073 tg3_asic_rev(tp) == ASIC_REV_5720) 16074 tg3_flag_set(tp, 5717_PLUS); 16075 16076 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16077 tg3_asic_rev(tp) == ASIC_REV_57766) 16078 tg3_flag_set(tp, 57765_CLASS); 16079 16080 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16081 tg3_asic_rev(tp) == ASIC_REV_5762) 16082 tg3_flag_set(tp, 57765_PLUS); 16083 16084 /* Intentionally exclude ASIC_REV_5906 */ 16085 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16086 tg3_asic_rev(tp) == ASIC_REV_5787 || 16087 tg3_asic_rev(tp) == ASIC_REV_5784 || 16088 tg3_asic_rev(tp) == ASIC_REV_5761 || 16089 tg3_asic_rev(tp) == ASIC_REV_5785 || 16090 tg3_asic_rev(tp) == ASIC_REV_57780 || 16091 tg3_flag(tp, 57765_PLUS)) 16092 tg3_flag_set(tp, 5755_PLUS); 16093 16094 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16095 tg3_asic_rev(tp) == ASIC_REV_5714) 16096 tg3_flag_set(tp, 5780_CLASS); 16097 16098 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16099 tg3_asic_rev(tp) == ASIC_REV_5752 || 16100 tg3_asic_rev(tp) == ASIC_REV_5906 || 16101 tg3_flag(tp, 5755_PLUS) || 16102 tg3_flag(tp, 5780_CLASS)) 16103 tg3_flag_set(tp, 5750_PLUS); 16104 16105 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16106 tg3_flag(tp, 5750_PLUS)) 16107 tg3_flag_set(tp, 5705_PLUS); 16108 } 16109 16110 static bool tg3_10_100_only_device(struct tg3 *tp, 16111 const struct pci_device_id *ent) 16112 { 16113 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16114 16115 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16116 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16117 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16118 return true; 16119 16120 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16121 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16122 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16123 return true; 16124 } else { 16125 return true; 16126 } 16127 } 16128 16129 return false; 16130 } 16131 16132 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16133 { 16134 u32 misc_ctrl_reg; 16135 u32 pci_state_reg, grc_misc_cfg; 16136 u32 val; 16137 u16 pci_cmd; 16138 int err; 16139 16140 /* Force memory write invalidate off. If we leave it on, 16141 * then on 5700_BX chips we have to enable a workaround. 16142 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16143 * to match the cacheline size. The Broadcom driver have this 16144 * workaround but turns MWI off all the times so never uses 16145 * it. This seems to suggest that the workaround is insufficient. 16146 */ 16147 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16148 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16149 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16150 16151 /* Important! -- Make sure register accesses are byteswapped 16152 * correctly. Also, for those chips that require it, make 16153 * sure that indirect register accesses are enabled before 16154 * the first operation. 16155 */ 16156 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16157 &misc_ctrl_reg); 16158 tp->misc_host_ctrl |= (misc_ctrl_reg & 16159 MISC_HOST_CTRL_CHIPREV); 16160 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16161 tp->misc_host_ctrl); 16162 16163 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16164 16165 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16166 * we need to disable memory and use config. cycles 16167 * only to access all registers. The 5702/03 chips 16168 * can mistakenly decode the special cycles from the 16169 * ICH chipsets as memory write cycles, causing corruption 16170 * of register and memory space. Only certain ICH bridges 16171 * will drive special cycles with non-zero data during the 16172 * address phase which can fall within the 5703's address 16173 * range. This is not an ICH bug as the PCI spec allows 16174 * non-zero address during special cycles. However, only 16175 * these ICH bridges are known to drive non-zero addresses 16176 * during special cycles. 16177 * 16178 * Since special cycles do not cross PCI bridges, we only 16179 * enable this workaround if the 5703 is on the secondary 16180 * bus of these ICH bridges. 16181 */ 16182 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16183 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16184 static struct tg3_dev_id { 16185 u32 vendor; 16186 u32 device; 16187 u32 rev; 16188 } ich_chipsets[] = { 16189 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16190 PCI_ANY_ID }, 16191 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16192 PCI_ANY_ID }, 16193 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16194 0xa }, 16195 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16196 PCI_ANY_ID }, 16197 { }, 16198 }; 16199 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16200 struct pci_dev *bridge = NULL; 16201 16202 while (pci_id->vendor != 0) { 16203 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16204 bridge); 16205 if (!bridge) { 16206 pci_id++; 16207 continue; 16208 } 16209 if (pci_id->rev != PCI_ANY_ID) { 16210 if (bridge->revision > pci_id->rev) 16211 continue; 16212 } 16213 if (bridge->subordinate && 16214 (bridge->subordinate->number == 16215 tp->pdev->bus->number)) { 16216 tg3_flag_set(tp, ICH_WORKAROUND); 16217 pci_dev_put(bridge); 16218 break; 16219 } 16220 } 16221 } 16222 16223 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16224 static struct tg3_dev_id { 16225 u32 vendor; 16226 u32 device; 16227 } bridge_chipsets[] = { 16228 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16229 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16230 { }, 16231 }; 16232 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16233 struct pci_dev *bridge = NULL; 16234 16235 while (pci_id->vendor != 0) { 16236 bridge = pci_get_device(pci_id->vendor, 16237 pci_id->device, 16238 bridge); 16239 if (!bridge) { 16240 pci_id++; 16241 continue; 16242 } 16243 if (bridge->subordinate && 16244 (bridge->subordinate->number <= 16245 tp->pdev->bus->number) && 16246 (bridge->subordinate->busn_res.end >= 16247 tp->pdev->bus->number)) { 16248 tg3_flag_set(tp, 5701_DMA_BUG); 16249 pci_dev_put(bridge); 16250 break; 16251 } 16252 } 16253 } 16254 16255 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16256 * DMA addresses > 40-bit. This bridge may have other additional 16257 * 57xx devices behind it in some 4-port NIC designs for example. 16258 * Any tg3 device found behind the bridge will also need the 40-bit 16259 * DMA workaround. 16260 */ 16261 if (tg3_flag(tp, 5780_CLASS)) { 16262 tg3_flag_set(tp, 40BIT_DMA_BUG); 16263 tp->msi_cap = tp->pdev->msi_cap; 16264 } else { 16265 struct pci_dev *bridge = NULL; 16266 16267 do { 16268 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16269 PCI_DEVICE_ID_SERVERWORKS_EPB, 16270 bridge); 16271 if (bridge && bridge->subordinate && 16272 (bridge->subordinate->number <= 16273 tp->pdev->bus->number) && 16274 (bridge->subordinate->busn_res.end >= 16275 tp->pdev->bus->number)) { 16276 tg3_flag_set(tp, 40BIT_DMA_BUG); 16277 pci_dev_put(bridge); 16278 break; 16279 } 16280 } while (bridge); 16281 } 16282 16283 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16284 tg3_asic_rev(tp) == ASIC_REV_5714) 16285 tp->pdev_peer = tg3_find_peer(tp); 16286 16287 /* Determine TSO capabilities */ 16288 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16289 ; /* Do nothing. HW bug. */ 16290 else if (tg3_flag(tp, 57765_PLUS)) 16291 tg3_flag_set(tp, HW_TSO_3); 16292 else if (tg3_flag(tp, 5755_PLUS) || 16293 tg3_asic_rev(tp) == ASIC_REV_5906) 16294 tg3_flag_set(tp, HW_TSO_2); 16295 else if (tg3_flag(tp, 5750_PLUS)) { 16296 tg3_flag_set(tp, HW_TSO_1); 16297 tg3_flag_set(tp, TSO_BUG); 16298 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16299 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16300 tg3_flag_clear(tp, TSO_BUG); 16301 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16302 tg3_asic_rev(tp) != ASIC_REV_5701 && 16303 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16304 tg3_flag_set(tp, FW_TSO); 16305 tg3_flag_set(tp, TSO_BUG); 16306 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16307 tp->fw_needed = FIRMWARE_TG3TSO5; 16308 else 16309 tp->fw_needed = FIRMWARE_TG3TSO; 16310 } 16311 16312 /* Selectively allow TSO based on operating conditions */ 16313 if (tg3_flag(tp, HW_TSO_1) || 16314 tg3_flag(tp, HW_TSO_2) || 16315 tg3_flag(tp, HW_TSO_3) || 16316 tg3_flag(tp, FW_TSO)) { 16317 /* For firmware TSO, assume ASF is disabled. 16318 * We'll disable TSO later if we discover ASF 16319 * is enabled in tg3_get_eeprom_hw_cfg(). 16320 */ 16321 tg3_flag_set(tp, TSO_CAPABLE); 16322 } else { 16323 tg3_flag_clear(tp, TSO_CAPABLE); 16324 tg3_flag_clear(tp, TSO_BUG); 16325 tp->fw_needed = NULL; 16326 } 16327 16328 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16329 tp->fw_needed = FIRMWARE_TG3; 16330 16331 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16332 tp->fw_needed = FIRMWARE_TG357766; 16333 16334 tp->irq_max = 1; 16335 16336 if (tg3_flag(tp, 5750_PLUS)) { 16337 tg3_flag_set(tp, SUPPORT_MSI); 16338 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16339 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16340 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16341 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16342 tp->pdev_peer == tp->pdev)) 16343 tg3_flag_clear(tp, SUPPORT_MSI); 16344 16345 if (tg3_flag(tp, 5755_PLUS) || 16346 tg3_asic_rev(tp) == ASIC_REV_5906) { 16347 tg3_flag_set(tp, 1SHOT_MSI); 16348 } 16349 16350 if (tg3_flag(tp, 57765_PLUS)) { 16351 tg3_flag_set(tp, SUPPORT_MSIX); 16352 tp->irq_max = TG3_IRQ_MAX_VECS; 16353 } 16354 } 16355 16356 tp->txq_max = 1; 16357 tp->rxq_max = 1; 16358 if (tp->irq_max > 1) { 16359 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16360 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16361 16362 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16363 tg3_asic_rev(tp) == ASIC_REV_5720) 16364 tp->txq_max = tp->irq_max - 1; 16365 } 16366 16367 if (tg3_flag(tp, 5755_PLUS) || 16368 tg3_asic_rev(tp) == ASIC_REV_5906) 16369 tg3_flag_set(tp, SHORT_DMA_BUG); 16370 16371 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16372 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16373 16374 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16375 tg3_asic_rev(tp) == ASIC_REV_5719 || 16376 tg3_asic_rev(tp) == ASIC_REV_5720 || 16377 tg3_asic_rev(tp) == ASIC_REV_5762) 16378 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16379 16380 if (tg3_flag(tp, 57765_PLUS) && 16381 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16382 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16383 16384 if (!tg3_flag(tp, 5705_PLUS) || 16385 tg3_flag(tp, 5780_CLASS) || 16386 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16387 tg3_flag_set(tp, JUMBO_CAPABLE); 16388 16389 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16390 &pci_state_reg); 16391 16392 if (pci_is_pcie(tp->pdev)) { 16393 u16 lnkctl; 16394 16395 tg3_flag_set(tp, PCI_EXPRESS); 16396 16397 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16398 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16399 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16400 tg3_flag_clear(tp, HW_TSO_2); 16401 tg3_flag_clear(tp, TSO_CAPABLE); 16402 } 16403 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16404 tg3_asic_rev(tp) == ASIC_REV_5761 || 16405 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16406 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16407 tg3_flag_set(tp, CLKREQ_BUG); 16408 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16409 tg3_flag_set(tp, L1PLLPD_EN); 16410 } 16411 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16412 /* BCM5785 devices are effectively PCIe devices, and should 16413 * follow PCIe codepaths, but do not have a PCIe capabilities 16414 * section. 16415 */ 16416 tg3_flag_set(tp, PCI_EXPRESS); 16417 } else if (!tg3_flag(tp, 5705_PLUS) || 16418 tg3_flag(tp, 5780_CLASS)) { 16419 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16420 if (!tp->pcix_cap) { 16421 dev_err(&tp->pdev->dev, 16422 "Cannot find PCI-X capability, aborting\n"); 16423 return -EIO; 16424 } 16425 16426 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16427 tg3_flag_set(tp, PCIX_MODE); 16428 } 16429 16430 /* If we have an AMD 762 or VIA K8T800 chipset, write 16431 * reordering to the mailbox registers done by the host 16432 * controller can cause major troubles. We read back from 16433 * every mailbox register write to force the writes to be 16434 * posted to the chip in order. 16435 */ 16436 if (pci_dev_present(tg3_write_reorder_chipsets) && 16437 !tg3_flag(tp, PCI_EXPRESS)) 16438 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16439 16440 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16441 &tp->pci_cacheline_sz); 16442 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16443 &tp->pci_lat_timer); 16444 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16445 tp->pci_lat_timer < 64) { 16446 tp->pci_lat_timer = 64; 16447 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16448 tp->pci_lat_timer); 16449 } 16450 16451 /* Important! -- It is critical that the PCI-X hw workaround 16452 * situation is decided before the first MMIO register access. 16453 */ 16454 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16455 /* 5700 BX chips need to have their TX producer index 16456 * mailboxes written twice to workaround a bug. 16457 */ 16458 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16459 16460 /* If we are in PCI-X mode, enable register write workaround. 16461 * 16462 * The workaround is to use indirect register accesses 16463 * for all chip writes not to mailbox registers. 16464 */ 16465 if (tg3_flag(tp, PCIX_MODE)) { 16466 u32 pm_reg; 16467 16468 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16469 16470 /* The chip can have it's power management PCI config 16471 * space registers clobbered due to this bug. 16472 * So explicitly force the chip into D0 here. 16473 */ 16474 pci_read_config_dword(tp->pdev, 16475 tp->pdev->pm_cap + PCI_PM_CTRL, 16476 &pm_reg); 16477 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16478 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16479 pci_write_config_dword(tp->pdev, 16480 tp->pdev->pm_cap + PCI_PM_CTRL, 16481 pm_reg); 16482 16483 /* Also, force SERR#/PERR# in PCI command. */ 16484 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16485 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16486 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16487 } 16488 } 16489 16490 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16491 tg3_flag_set(tp, PCI_HIGH_SPEED); 16492 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16493 tg3_flag_set(tp, PCI_32BIT); 16494 16495 /* Chip-specific fixup from Broadcom driver */ 16496 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16497 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16498 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16499 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16500 } 16501 16502 /* Default fast path register access methods */ 16503 tp->read32 = tg3_read32; 16504 tp->write32 = tg3_write32; 16505 tp->read32_mbox = tg3_read32; 16506 tp->write32_mbox = tg3_write32; 16507 tp->write32_tx_mbox = tg3_write32; 16508 tp->write32_rx_mbox = tg3_write32; 16509 16510 /* Various workaround register access methods */ 16511 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16512 tp->write32 = tg3_write_indirect_reg32; 16513 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16514 (tg3_flag(tp, PCI_EXPRESS) && 16515 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16516 /* 16517 * Back to back register writes can cause problems on these 16518 * chips, the workaround is to read back all reg writes 16519 * except those to mailbox regs. 16520 * 16521 * See tg3_write_indirect_reg32(). 16522 */ 16523 tp->write32 = tg3_write_flush_reg32; 16524 } 16525 16526 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16527 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16528 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16529 tp->write32_rx_mbox = tg3_write_flush_reg32; 16530 } 16531 16532 if (tg3_flag(tp, ICH_WORKAROUND)) { 16533 tp->read32 = tg3_read_indirect_reg32; 16534 tp->write32 = tg3_write_indirect_reg32; 16535 tp->read32_mbox = tg3_read_indirect_mbox; 16536 tp->write32_mbox = tg3_write_indirect_mbox; 16537 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16538 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16539 16540 iounmap(tp->regs); 16541 tp->regs = NULL; 16542 16543 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16544 pci_cmd &= ~PCI_COMMAND_MEMORY; 16545 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16546 } 16547 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16548 tp->read32_mbox = tg3_read32_mbox_5906; 16549 tp->write32_mbox = tg3_write32_mbox_5906; 16550 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16551 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16552 } 16553 16554 if (tp->write32 == tg3_write_indirect_reg32 || 16555 (tg3_flag(tp, PCIX_MODE) && 16556 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16557 tg3_asic_rev(tp) == ASIC_REV_5701))) 16558 tg3_flag_set(tp, SRAM_USE_CONFIG); 16559 16560 /* The memory arbiter has to be enabled in order for SRAM accesses 16561 * to succeed. Normally on powerup the tg3 chip firmware will make 16562 * sure it is enabled, but other entities such as system netboot 16563 * code might disable it. 16564 */ 16565 val = tr32(MEMARB_MODE); 16566 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16567 16568 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16569 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16570 tg3_flag(tp, 5780_CLASS)) { 16571 if (tg3_flag(tp, PCIX_MODE)) { 16572 pci_read_config_dword(tp->pdev, 16573 tp->pcix_cap + PCI_X_STATUS, 16574 &val); 16575 tp->pci_fn = val & 0x7; 16576 } 16577 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16578 tg3_asic_rev(tp) == ASIC_REV_5719 || 16579 tg3_asic_rev(tp) == ASIC_REV_5720) { 16580 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16581 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16582 val = tr32(TG3_CPMU_STATUS); 16583 16584 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16585 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16586 else 16587 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16588 TG3_CPMU_STATUS_FSHFT_5719; 16589 } 16590 16591 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16592 tp->write32_tx_mbox = tg3_write_flush_reg32; 16593 tp->write32_rx_mbox = tg3_write_flush_reg32; 16594 } 16595 16596 /* Get eeprom hw config before calling tg3_set_power_state(). 16597 * In particular, the TG3_FLAG_IS_NIC flag must be 16598 * determined before calling tg3_set_power_state() so that 16599 * we know whether or not to switch out of Vaux power. 16600 * When the flag is set, it means that GPIO1 is used for eeprom 16601 * write protect and also implies that it is a LOM where GPIOs 16602 * are not used to switch power. 16603 */ 16604 tg3_get_eeprom_hw_cfg(tp); 16605 16606 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16607 tg3_flag_clear(tp, TSO_CAPABLE); 16608 tg3_flag_clear(tp, TSO_BUG); 16609 tp->fw_needed = NULL; 16610 } 16611 16612 if (tg3_flag(tp, ENABLE_APE)) { 16613 /* Allow reads and writes to the 16614 * APE register and memory space. 16615 */ 16616 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16617 PCISTATE_ALLOW_APE_SHMEM_WR | 16618 PCISTATE_ALLOW_APE_PSPACE_WR; 16619 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16620 pci_state_reg); 16621 16622 tg3_ape_lock_init(tp); 16623 tp->ape_hb_interval = 16624 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); 16625 } 16626 16627 /* Set up tp->grc_local_ctrl before calling 16628 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16629 * will bring 5700's external PHY out of reset. 16630 * It is also used as eeprom write protect on LOMs. 16631 */ 16632 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16633 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16634 tg3_flag(tp, EEPROM_WRITE_PROT)) 16635 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16636 GRC_LCLCTRL_GPIO_OUTPUT1); 16637 /* Unused GPIO3 must be driven as output on 5752 because there 16638 * are no pull-up resistors on unused GPIO pins. 16639 */ 16640 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16641 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16642 16643 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16644 tg3_asic_rev(tp) == ASIC_REV_57780 || 16645 tg3_flag(tp, 57765_CLASS)) 16646 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16647 16648 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16650 /* Turn off the debug UART. */ 16651 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16652 if (tg3_flag(tp, IS_NIC)) 16653 /* Keep VMain power. */ 16654 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16655 GRC_LCLCTRL_GPIO_OUTPUT0; 16656 } 16657 16658 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16659 tp->grc_local_ctrl |= 16660 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16661 16662 /* Switch out of Vaux if it is a NIC */ 16663 tg3_pwrsrc_switch_to_vmain(tp); 16664 16665 /* Derive initial jumbo mode from MTU assigned in 16666 * ether_setup() via the alloc_etherdev() call 16667 */ 16668 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16669 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16670 16671 /* Determine WakeOnLan speed to use. */ 16672 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16673 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16674 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16675 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16676 tg3_flag_clear(tp, WOL_SPEED_100MB); 16677 } else { 16678 tg3_flag_set(tp, WOL_SPEED_100MB); 16679 } 16680 16681 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16682 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16683 16684 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16685 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16686 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16687 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16688 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16689 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16690 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16691 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16692 16693 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16694 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16695 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16696 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16697 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16698 16699 if (tg3_flag(tp, 5705_PLUS) && 16700 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16701 tg3_asic_rev(tp) != ASIC_REV_5785 && 16702 tg3_asic_rev(tp) != ASIC_REV_57780 && 16703 !tg3_flag(tp, 57765_PLUS)) { 16704 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16705 tg3_asic_rev(tp) == ASIC_REV_5787 || 16706 tg3_asic_rev(tp) == ASIC_REV_5784 || 16707 tg3_asic_rev(tp) == ASIC_REV_5761) { 16708 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16709 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16710 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16711 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16712 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16713 } else 16714 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16715 } 16716 16717 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16718 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16719 tp->phy_otp = tg3_read_otp_phycfg(tp); 16720 if (tp->phy_otp == 0) 16721 tp->phy_otp = TG3_OTP_DEFAULT; 16722 } 16723 16724 if (tg3_flag(tp, CPMU_PRESENT)) 16725 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16726 else 16727 tp->mi_mode = MAC_MI_MODE_BASE; 16728 16729 tp->coalesce_mode = 0; 16730 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16731 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16732 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16733 16734 /* Set these bits to enable statistics workaround. */ 16735 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16736 tg3_asic_rev(tp) == ASIC_REV_5762 || 16737 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16738 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16739 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16740 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16741 } 16742 16743 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16744 tg3_asic_rev(tp) == ASIC_REV_57780) 16745 tg3_flag_set(tp, USE_PHYLIB); 16746 16747 err = tg3_mdio_init(tp); 16748 if (err) 16749 return err; 16750 16751 /* Initialize data/descriptor byte/word swapping. */ 16752 val = tr32(GRC_MODE); 16753 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16754 tg3_asic_rev(tp) == ASIC_REV_5762) 16755 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16756 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16757 GRC_MODE_B2HRX_ENABLE | 16758 GRC_MODE_HTX2B_ENABLE | 16759 GRC_MODE_HOST_STACKUP); 16760 else 16761 val &= GRC_MODE_HOST_STACKUP; 16762 16763 tw32(GRC_MODE, val | tp->grc_mode); 16764 16765 tg3_switch_clocks(tp); 16766 16767 /* Clear this out for sanity. */ 16768 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16769 16770 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16771 tw32(TG3PCI_REG_BASE_ADDR, 0); 16772 16773 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16774 &pci_state_reg); 16775 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16776 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16777 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16778 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16779 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16780 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16781 void __iomem *sram_base; 16782 16783 /* Write some dummy words into the SRAM status block 16784 * area, see if it reads back correctly. If the return 16785 * value is bad, force enable the PCIX workaround. 16786 */ 16787 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16788 16789 writel(0x00000000, sram_base); 16790 writel(0x00000000, sram_base + 4); 16791 writel(0xffffffff, sram_base + 4); 16792 if (readl(sram_base) != 0x00000000) 16793 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16794 } 16795 } 16796 16797 udelay(50); 16798 tg3_nvram_init(tp); 16799 16800 /* If the device has an NVRAM, no need to load patch firmware */ 16801 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16802 !tg3_flag(tp, NO_NVRAM)) 16803 tp->fw_needed = NULL; 16804 16805 grc_misc_cfg = tr32(GRC_MISC_CFG); 16806 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16807 16808 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16809 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16810 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16811 tg3_flag_set(tp, IS_5788); 16812 16813 if (!tg3_flag(tp, IS_5788) && 16814 tg3_asic_rev(tp) != ASIC_REV_5700) 16815 tg3_flag_set(tp, TAGGED_STATUS); 16816 if (tg3_flag(tp, TAGGED_STATUS)) { 16817 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16818 HOSTCC_MODE_CLRTICK_TXBD); 16819 16820 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16821 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16822 tp->misc_host_ctrl); 16823 } 16824 16825 /* Preserve the APE MAC_MODE bits */ 16826 if (tg3_flag(tp, ENABLE_APE)) 16827 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16828 else 16829 tp->mac_mode = 0; 16830 16831 if (tg3_10_100_only_device(tp, ent)) 16832 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16833 16834 err = tg3_phy_probe(tp); 16835 if (err) { 16836 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16837 /* ... but do not return immediately ... */ 16838 tg3_mdio_fini(tp); 16839 } 16840 16841 tg3_read_vpd(tp); 16842 tg3_read_fw_ver(tp); 16843 16844 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16845 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16846 } else { 16847 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16848 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16849 else 16850 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16851 } 16852 16853 /* 5700 {AX,BX} chips have a broken status block link 16854 * change bit implementation, so we must use the 16855 * status register in those cases. 16856 */ 16857 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16858 tg3_flag_set(tp, USE_LINKCHG_REG); 16859 else 16860 tg3_flag_clear(tp, USE_LINKCHG_REG); 16861 16862 /* The led_ctrl is set during tg3_phy_probe, here we might 16863 * have to force the link status polling mechanism based 16864 * upon subsystem IDs. 16865 */ 16866 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16867 tg3_asic_rev(tp) == ASIC_REV_5701 && 16868 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16869 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16870 tg3_flag_set(tp, USE_LINKCHG_REG); 16871 } 16872 16873 /* For all SERDES we poll the MAC status register. */ 16874 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16875 tg3_flag_set(tp, POLL_SERDES); 16876 else 16877 tg3_flag_clear(tp, POLL_SERDES); 16878 16879 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16880 tg3_flag_set(tp, POLL_CPMU_LINK); 16881 16882 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16883 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16884 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16885 tg3_flag(tp, PCIX_MODE)) { 16886 tp->rx_offset = NET_SKB_PAD; 16887 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16888 tp->rx_copy_thresh = ~(u16)0; 16889 #endif 16890 } 16891 16892 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 16893 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 16894 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 16895 16896 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 16897 16898 /* Increment the rx prod index on the rx std ring by at most 16899 * 8 for these chips to workaround hw errata. 16900 */ 16901 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16902 tg3_asic_rev(tp) == ASIC_REV_5752 || 16903 tg3_asic_rev(tp) == ASIC_REV_5755) 16904 tp->rx_std_max_post = 8; 16905 16906 if (tg3_flag(tp, ASPM_WORKAROUND)) 16907 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 16908 PCIE_PWR_MGMT_L1_THRESH_MSK; 16909 16910 return err; 16911 } 16912 16913 static int tg3_get_device_address(struct tg3 *tp, u8 *addr) 16914 { 16915 u32 hi, lo, mac_offset; 16916 int addr_ok = 0; 16917 int err; 16918 16919 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) 16920 return 0; 16921 16922 if (tg3_flag(tp, IS_SSB_CORE)) { 16923 err = ssb_gige_get_macaddr(tp->pdev, addr); 16924 if (!err && is_valid_ether_addr(addr)) 16925 return 0; 16926 } 16927 16928 mac_offset = 0x7c; 16929 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16930 tg3_flag(tp, 5780_CLASS)) { 16931 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 16932 mac_offset = 0xcc; 16933 if (tg3_nvram_lock(tp)) 16934 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 16935 else 16936 tg3_nvram_unlock(tp); 16937 } else if (tg3_flag(tp, 5717_PLUS)) { 16938 if (tp->pci_fn & 1) 16939 mac_offset = 0xcc; 16940 if (tp->pci_fn > 1) 16941 mac_offset += 0x18c; 16942 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 16943 mac_offset = 0x10; 16944 16945 /* First try to get it from MAC address mailbox. */ 16946 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 16947 if ((hi >> 16) == 0x484b) { 16948 addr[0] = (hi >> 8) & 0xff; 16949 addr[1] = (hi >> 0) & 0xff; 16950 16951 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 16952 addr[2] = (lo >> 24) & 0xff; 16953 addr[3] = (lo >> 16) & 0xff; 16954 addr[4] = (lo >> 8) & 0xff; 16955 addr[5] = (lo >> 0) & 0xff; 16956 16957 /* Some old bootcode may report a 0 MAC address in SRAM */ 16958 addr_ok = is_valid_ether_addr(addr); 16959 } 16960 if (!addr_ok) { 16961 /* Next, try NVRAM. */ 16962 if (!tg3_flag(tp, NO_NVRAM) && 16963 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 16964 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 16965 memcpy(&addr[0], ((char *)&hi) + 2, 2); 16966 memcpy(&addr[2], (char *)&lo, sizeof(lo)); 16967 } 16968 /* Finally just fetch it out of the MAC control regs. */ 16969 else { 16970 hi = tr32(MAC_ADDR_0_HIGH); 16971 lo = tr32(MAC_ADDR_0_LOW); 16972 16973 addr[5] = lo & 0xff; 16974 addr[4] = (lo >> 8) & 0xff; 16975 addr[3] = (lo >> 16) & 0xff; 16976 addr[2] = (lo >> 24) & 0xff; 16977 addr[1] = hi & 0xff; 16978 addr[0] = (hi >> 8) & 0xff; 16979 } 16980 } 16981 16982 if (!is_valid_ether_addr(addr)) 16983 return -EINVAL; 16984 return 0; 16985 } 16986 16987 #define BOUNDARY_SINGLE_CACHELINE 1 16988 #define BOUNDARY_MULTI_CACHELINE 2 16989 16990 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 16991 { 16992 int cacheline_size; 16993 u8 byte; 16994 int goal; 16995 16996 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 16997 if (byte == 0) 16998 cacheline_size = 1024; 16999 else 17000 cacheline_size = (int) byte * 4; 17001 17002 /* On 5703 and later chips, the boundary bits have no 17003 * effect. 17004 */ 17005 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17006 tg3_asic_rev(tp) != ASIC_REV_5701 && 17007 !tg3_flag(tp, PCI_EXPRESS)) 17008 goto out; 17009 17010 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17011 goal = BOUNDARY_MULTI_CACHELINE; 17012 #else 17013 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17014 goal = BOUNDARY_SINGLE_CACHELINE; 17015 #else 17016 goal = 0; 17017 #endif 17018 #endif 17019 17020 if (tg3_flag(tp, 57765_PLUS)) { 17021 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17022 goto out; 17023 } 17024 17025 if (!goal) 17026 goto out; 17027 17028 /* PCI controllers on most RISC systems tend to disconnect 17029 * when a device tries to burst across a cache-line boundary. 17030 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17031 * 17032 * Unfortunately, for PCI-E there are only limited 17033 * write-side controls for this, and thus for reads 17034 * we will still get the disconnects. We'll also waste 17035 * these PCI cycles for both read and write for chips 17036 * other than 5700 and 5701 which do not implement the 17037 * boundary bits. 17038 */ 17039 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17040 switch (cacheline_size) { 17041 case 16: 17042 case 32: 17043 case 64: 17044 case 128: 17045 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17046 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17047 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17048 } else { 17049 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17050 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17051 } 17052 break; 17053 17054 case 256: 17055 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17056 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17057 break; 17058 17059 default: 17060 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17061 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17062 break; 17063 } 17064 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17065 switch (cacheline_size) { 17066 case 16: 17067 case 32: 17068 case 64: 17069 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17070 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17071 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17072 break; 17073 } 17074 fallthrough; 17075 case 128: 17076 default: 17077 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17078 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17079 break; 17080 } 17081 } else { 17082 switch (cacheline_size) { 17083 case 16: 17084 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17085 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17086 DMA_RWCTRL_WRITE_BNDRY_16); 17087 break; 17088 } 17089 fallthrough; 17090 case 32: 17091 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17092 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17093 DMA_RWCTRL_WRITE_BNDRY_32); 17094 break; 17095 } 17096 fallthrough; 17097 case 64: 17098 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17099 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17100 DMA_RWCTRL_WRITE_BNDRY_64); 17101 break; 17102 } 17103 fallthrough; 17104 case 128: 17105 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17106 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17107 DMA_RWCTRL_WRITE_BNDRY_128); 17108 break; 17109 } 17110 fallthrough; 17111 case 256: 17112 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17113 DMA_RWCTRL_WRITE_BNDRY_256); 17114 break; 17115 case 512: 17116 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17117 DMA_RWCTRL_WRITE_BNDRY_512); 17118 break; 17119 case 1024: 17120 default: 17121 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17122 DMA_RWCTRL_WRITE_BNDRY_1024); 17123 break; 17124 } 17125 } 17126 17127 out: 17128 return val; 17129 } 17130 17131 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17132 int size, bool to_device) 17133 { 17134 struct tg3_internal_buffer_desc test_desc; 17135 u32 sram_dma_descs; 17136 int i, ret; 17137 17138 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17139 17140 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17141 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17142 tw32(RDMAC_STATUS, 0); 17143 tw32(WDMAC_STATUS, 0); 17144 17145 tw32(BUFMGR_MODE, 0); 17146 tw32(FTQ_RESET, 0); 17147 17148 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17149 test_desc.addr_lo = buf_dma & 0xffffffff; 17150 test_desc.nic_mbuf = 0x00002100; 17151 test_desc.len = size; 17152 17153 /* 17154 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17155 * the *second* time the tg3 driver was getting loaded after an 17156 * initial scan. 17157 * 17158 * Broadcom tells me: 17159 * ...the DMA engine is connected to the GRC block and a DMA 17160 * reset may affect the GRC block in some unpredictable way... 17161 * The behavior of resets to individual blocks has not been tested. 17162 * 17163 * Broadcom noted the GRC reset will also reset all sub-components. 17164 */ 17165 if (to_device) { 17166 test_desc.cqid_sqid = (13 << 8) | 2; 17167 17168 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17169 udelay(40); 17170 } else { 17171 test_desc.cqid_sqid = (16 << 8) | 7; 17172 17173 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17174 udelay(40); 17175 } 17176 test_desc.flags = 0x00000005; 17177 17178 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17179 u32 val; 17180 17181 val = *(((u32 *)&test_desc) + i); 17182 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17183 sram_dma_descs + (i * sizeof(u32))); 17184 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17185 } 17186 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17187 17188 if (to_device) 17189 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17190 else 17191 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17192 17193 ret = -ENODEV; 17194 for (i = 0; i < 40; i++) { 17195 u32 val; 17196 17197 if (to_device) 17198 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17199 else 17200 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17201 if ((val & 0xffff) == sram_dma_descs) { 17202 ret = 0; 17203 break; 17204 } 17205 17206 udelay(100); 17207 } 17208 17209 return ret; 17210 } 17211 17212 #define TEST_BUFFER_SIZE 0x2000 17213 17214 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17215 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17216 { }, 17217 }; 17218 17219 static int tg3_test_dma(struct tg3 *tp) 17220 { 17221 dma_addr_t buf_dma; 17222 u32 *buf, saved_dma_rwctrl; 17223 int ret = 0; 17224 17225 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17226 &buf_dma, GFP_KERNEL); 17227 if (!buf) { 17228 ret = -ENOMEM; 17229 goto out_nofree; 17230 } 17231 17232 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17233 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17234 17235 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17236 17237 if (tg3_flag(tp, 57765_PLUS)) 17238 goto out; 17239 17240 if (tg3_flag(tp, PCI_EXPRESS)) { 17241 /* DMA read watermark not used on PCIE */ 17242 tp->dma_rwctrl |= 0x00180000; 17243 } else if (!tg3_flag(tp, PCIX_MODE)) { 17244 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17245 tg3_asic_rev(tp) == ASIC_REV_5750) 17246 tp->dma_rwctrl |= 0x003f0000; 17247 else 17248 tp->dma_rwctrl |= 0x003f000f; 17249 } else { 17250 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17251 tg3_asic_rev(tp) == ASIC_REV_5704) { 17252 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17253 u32 read_water = 0x7; 17254 17255 /* If the 5704 is behind the EPB bridge, we can 17256 * do the less restrictive ONE_DMA workaround for 17257 * better performance. 17258 */ 17259 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17260 tg3_asic_rev(tp) == ASIC_REV_5704) 17261 tp->dma_rwctrl |= 0x8000; 17262 else if (ccval == 0x6 || ccval == 0x7) 17263 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17264 17265 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17266 read_water = 4; 17267 /* Set bit 23 to enable PCIX hw bug fix */ 17268 tp->dma_rwctrl |= 17269 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17270 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17271 (1 << 23); 17272 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17273 /* 5780 always in PCIX mode */ 17274 tp->dma_rwctrl |= 0x00144000; 17275 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17276 /* 5714 always in PCIX mode */ 17277 tp->dma_rwctrl |= 0x00148000; 17278 } else { 17279 tp->dma_rwctrl |= 0x001b000f; 17280 } 17281 } 17282 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17283 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17284 17285 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17286 tg3_asic_rev(tp) == ASIC_REV_5704) 17287 tp->dma_rwctrl &= 0xfffffff0; 17288 17289 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17290 tg3_asic_rev(tp) == ASIC_REV_5701) { 17291 /* Remove this if it causes problems for some boards. */ 17292 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17293 17294 /* On 5700/5701 chips, we need to set this bit. 17295 * Otherwise the chip will issue cacheline transactions 17296 * to streamable DMA memory with not all the byte 17297 * enables turned on. This is an error on several 17298 * RISC PCI controllers, in particular sparc64. 17299 * 17300 * On 5703/5704 chips, this bit has been reassigned 17301 * a different meaning. In particular, it is used 17302 * on those chips to enable a PCI-X workaround. 17303 */ 17304 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17305 } 17306 17307 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17308 17309 17310 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17311 tg3_asic_rev(tp) != ASIC_REV_5701) 17312 goto out; 17313 17314 /* It is best to perform DMA test with maximum write burst size 17315 * to expose the 5700/5701 write DMA bug. 17316 */ 17317 saved_dma_rwctrl = tp->dma_rwctrl; 17318 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17319 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17320 17321 while (1) { 17322 u32 *p = buf, i; 17323 17324 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17325 p[i] = i; 17326 17327 /* Send the buffer to the chip. */ 17328 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17329 if (ret) { 17330 dev_err(&tp->pdev->dev, 17331 "%s: Buffer write failed. err = %d\n", 17332 __func__, ret); 17333 break; 17334 } 17335 17336 /* Now read it back. */ 17337 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17338 if (ret) { 17339 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17340 "err = %d\n", __func__, ret); 17341 break; 17342 } 17343 17344 /* Verify it. */ 17345 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17346 if (p[i] == i) 17347 continue; 17348 17349 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17350 DMA_RWCTRL_WRITE_BNDRY_16) { 17351 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17352 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17353 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17354 break; 17355 } else { 17356 dev_err(&tp->pdev->dev, 17357 "%s: Buffer corrupted on read back! " 17358 "(%d != %d)\n", __func__, p[i], i); 17359 ret = -ENODEV; 17360 goto out; 17361 } 17362 } 17363 17364 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17365 /* Success. */ 17366 ret = 0; 17367 break; 17368 } 17369 } 17370 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17371 DMA_RWCTRL_WRITE_BNDRY_16) { 17372 /* DMA test passed without adjusting DMA boundary, 17373 * now look for chipsets that are known to expose the 17374 * DMA bug without failing the test. 17375 */ 17376 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17377 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17378 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17379 } else { 17380 /* Safe to use the calculated DMA boundary. */ 17381 tp->dma_rwctrl = saved_dma_rwctrl; 17382 } 17383 17384 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17385 } 17386 17387 out: 17388 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17389 out_nofree: 17390 return ret; 17391 } 17392 17393 static void tg3_init_bufmgr_config(struct tg3 *tp) 17394 { 17395 if (tg3_flag(tp, 57765_PLUS)) { 17396 tp->bufmgr_config.mbuf_read_dma_low_water = 17397 DEFAULT_MB_RDMA_LOW_WATER_5705; 17398 tp->bufmgr_config.mbuf_mac_rx_low_water = 17399 DEFAULT_MB_MACRX_LOW_WATER_57765; 17400 tp->bufmgr_config.mbuf_high_water = 17401 DEFAULT_MB_HIGH_WATER_57765; 17402 17403 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17404 DEFAULT_MB_RDMA_LOW_WATER_5705; 17405 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17406 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17407 tp->bufmgr_config.mbuf_high_water_jumbo = 17408 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17409 } else if (tg3_flag(tp, 5705_PLUS)) { 17410 tp->bufmgr_config.mbuf_read_dma_low_water = 17411 DEFAULT_MB_RDMA_LOW_WATER_5705; 17412 tp->bufmgr_config.mbuf_mac_rx_low_water = 17413 DEFAULT_MB_MACRX_LOW_WATER_5705; 17414 tp->bufmgr_config.mbuf_high_water = 17415 DEFAULT_MB_HIGH_WATER_5705; 17416 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17417 tp->bufmgr_config.mbuf_mac_rx_low_water = 17418 DEFAULT_MB_MACRX_LOW_WATER_5906; 17419 tp->bufmgr_config.mbuf_high_water = 17420 DEFAULT_MB_HIGH_WATER_5906; 17421 } 17422 17423 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17424 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17425 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17426 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17427 tp->bufmgr_config.mbuf_high_water_jumbo = 17428 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17429 } else { 17430 tp->bufmgr_config.mbuf_read_dma_low_water = 17431 DEFAULT_MB_RDMA_LOW_WATER; 17432 tp->bufmgr_config.mbuf_mac_rx_low_water = 17433 DEFAULT_MB_MACRX_LOW_WATER; 17434 tp->bufmgr_config.mbuf_high_water = 17435 DEFAULT_MB_HIGH_WATER; 17436 17437 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17438 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17439 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17440 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17441 tp->bufmgr_config.mbuf_high_water_jumbo = 17442 DEFAULT_MB_HIGH_WATER_JUMBO; 17443 } 17444 17445 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17446 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17447 } 17448 17449 static char *tg3_phy_string(struct tg3 *tp) 17450 { 17451 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17452 case TG3_PHY_ID_BCM5400: return "5400"; 17453 case TG3_PHY_ID_BCM5401: return "5401"; 17454 case TG3_PHY_ID_BCM5411: return "5411"; 17455 case TG3_PHY_ID_BCM5701: return "5701"; 17456 case TG3_PHY_ID_BCM5703: return "5703"; 17457 case TG3_PHY_ID_BCM5704: return "5704"; 17458 case TG3_PHY_ID_BCM5705: return "5705"; 17459 case TG3_PHY_ID_BCM5750: return "5750"; 17460 case TG3_PHY_ID_BCM5752: return "5752"; 17461 case TG3_PHY_ID_BCM5714: return "5714"; 17462 case TG3_PHY_ID_BCM5780: return "5780"; 17463 case TG3_PHY_ID_BCM5755: return "5755"; 17464 case TG3_PHY_ID_BCM5787: return "5787"; 17465 case TG3_PHY_ID_BCM5784: return "5784"; 17466 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17467 case TG3_PHY_ID_BCM5906: return "5906"; 17468 case TG3_PHY_ID_BCM5761: return "5761"; 17469 case TG3_PHY_ID_BCM5718C: return "5718C"; 17470 case TG3_PHY_ID_BCM5718S: return "5718S"; 17471 case TG3_PHY_ID_BCM57765: return "57765"; 17472 case TG3_PHY_ID_BCM5719C: return "5719C"; 17473 case TG3_PHY_ID_BCM5720C: return "5720C"; 17474 case TG3_PHY_ID_BCM5762: return "5762C"; 17475 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17476 case 0: return "serdes"; 17477 default: return "unknown"; 17478 } 17479 } 17480 17481 static char *tg3_bus_string(struct tg3 *tp, char *str) 17482 { 17483 if (tg3_flag(tp, PCI_EXPRESS)) { 17484 strcpy(str, "PCI Express"); 17485 return str; 17486 } else if (tg3_flag(tp, PCIX_MODE)) { 17487 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17488 17489 strcpy(str, "PCIX:"); 17490 17491 if ((clock_ctrl == 7) || 17492 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17493 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17494 strcat(str, "133MHz"); 17495 else if (clock_ctrl == 0) 17496 strcat(str, "33MHz"); 17497 else if (clock_ctrl == 2) 17498 strcat(str, "50MHz"); 17499 else if (clock_ctrl == 4) 17500 strcat(str, "66MHz"); 17501 else if (clock_ctrl == 6) 17502 strcat(str, "100MHz"); 17503 } else { 17504 strcpy(str, "PCI:"); 17505 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17506 strcat(str, "66MHz"); 17507 else 17508 strcat(str, "33MHz"); 17509 } 17510 if (tg3_flag(tp, PCI_32BIT)) 17511 strcat(str, ":32-bit"); 17512 else 17513 strcat(str, ":64-bit"); 17514 return str; 17515 } 17516 17517 static void tg3_init_coal(struct tg3 *tp) 17518 { 17519 struct ethtool_coalesce *ec = &tp->coal; 17520 17521 memset(ec, 0, sizeof(*ec)); 17522 ec->cmd = ETHTOOL_GCOALESCE; 17523 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17524 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17525 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17526 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17527 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17528 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17529 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17530 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17531 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17532 17533 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17534 HOSTCC_MODE_CLRTICK_TXBD)) { 17535 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17536 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17537 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17538 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17539 } 17540 17541 if (tg3_flag(tp, 5705_PLUS)) { 17542 ec->rx_coalesce_usecs_irq = 0; 17543 ec->tx_coalesce_usecs_irq = 0; 17544 ec->stats_block_coalesce_usecs = 0; 17545 } 17546 } 17547 17548 static int tg3_init_one(struct pci_dev *pdev, 17549 const struct pci_device_id *ent) 17550 { 17551 struct net_device *dev; 17552 struct tg3 *tp; 17553 int i, err; 17554 u32 sndmbx, rcvmbx, intmbx; 17555 char str[40]; 17556 u64 dma_mask, persist_dma_mask; 17557 netdev_features_t features = 0; 17558 u8 addr[ETH_ALEN] __aligned(2); 17559 17560 err = pci_enable_device(pdev); 17561 if (err) { 17562 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17563 return err; 17564 } 17565 17566 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17567 if (err) { 17568 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17569 goto err_out_disable_pdev; 17570 } 17571 17572 pci_set_master(pdev); 17573 17574 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17575 if (!dev) { 17576 err = -ENOMEM; 17577 goto err_out_free_res; 17578 } 17579 17580 SET_NETDEV_DEV(dev, &pdev->dev); 17581 17582 tp = netdev_priv(dev); 17583 tp->pdev = pdev; 17584 tp->dev = dev; 17585 tp->rx_mode = TG3_DEF_RX_MODE; 17586 tp->tx_mode = TG3_DEF_TX_MODE; 17587 tp->irq_sync = 1; 17588 tp->pcierr_recovery = false; 17589 17590 if (tg3_debug > 0) 17591 tp->msg_enable = tg3_debug; 17592 else 17593 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17594 17595 if (pdev_is_ssb_gige_core(pdev)) { 17596 tg3_flag_set(tp, IS_SSB_CORE); 17597 if (ssb_gige_must_flush_posted_writes(pdev)) 17598 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17599 if (ssb_gige_one_dma_at_once(pdev)) 17600 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17601 if (ssb_gige_have_roboswitch(pdev)) { 17602 tg3_flag_set(tp, USE_PHYLIB); 17603 tg3_flag_set(tp, ROBOSWITCH); 17604 } 17605 if (ssb_gige_is_rgmii(pdev)) 17606 tg3_flag_set(tp, RGMII_MODE); 17607 } 17608 17609 /* The word/byte swap controls here control register access byte 17610 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17611 * setting below. 17612 */ 17613 tp->misc_host_ctrl = 17614 MISC_HOST_CTRL_MASK_PCI_INT | 17615 MISC_HOST_CTRL_WORD_SWAP | 17616 MISC_HOST_CTRL_INDIR_ACCESS | 17617 MISC_HOST_CTRL_PCISTATE_RW; 17618 17619 /* The NONFRM (non-frame) byte/word swap controls take effect 17620 * on descriptor entries, anything which isn't packet data. 17621 * 17622 * The StrongARM chips on the board (one for tx, one for rx) 17623 * are running in big-endian mode. 17624 */ 17625 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17626 GRC_MODE_WSWAP_NONFRM_DATA); 17627 #ifdef __BIG_ENDIAN 17628 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17629 #endif 17630 spin_lock_init(&tp->lock); 17631 spin_lock_init(&tp->indirect_lock); 17632 INIT_WORK(&tp->reset_task, tg3_reset_task); 17633 17634 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17635 if (!tp->regs) { 17636 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17637 err = -ENOMEM; 17638 goto err_out_free_dev; 17639 } 17640 17641 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17642 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17651 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17652 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17653 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17654 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17655 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17656 tg3_flag_set(tp, ENABLE_APE); 17657 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17658 if (!tp->aperegs) { 17659 dev_err(&pdev->dev, 17660 "Cannot map APE registers, aborting\n"); 17661 err = -ENOMEM; 17662 goto err_out_iounmap; 17663 } 17664 } 17665 17666 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17667 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17668 17669 dev->ethtool_ops = &tg3_ethtool_ops; 17670 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17671 dev->netdev_ops = &tg3_netdev_ops; 17672 dev->irq = pdev->irq; 17673 17674 err = tg3_get_invariants(tp, ent); 17675 if (err) { 17676 dev_err(&pdev->dev, 17677 "Problem fetching invariants of chip, aborting\n"); 17678 goto err_out_apeunmap; 17679 } 17680 17681 /* The EPB bridge inside 5714, 5715, and 5780 and any 17682 * device behind the EPB cannot support DMA addresses > 40-bit. 17683 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17684 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17685 * do DMA address check in tg3_start_xmit(). 17686 */ 17687 if (tg3_flag(tp, IS_5788)) 17688 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17689 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17690 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17691 #ifdef CONFIG_HIGHMEM 17692 dma_mask = DMA_BIT_MASK(64); 17693 #endif 17694 } else 17695 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17696 17697 /* Configure DMA attributes. */ 17698 if (dma_mask > DMA_BIT_MASK(32)) { 17699 err = dma_set_mask(&pdev->dev, dma_mask); 17700 if (!err) { 17701 features |= NETIF_F_HIGHDMA; 17702 err = dma_set_coherent_mask(&pdev->dev, 17703 persist_dma_mask); 17704 if (err < 0) { 17705 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17706 "DMA for consistent allocations\n"); 17707 goto err_out_apeunmap; 17708 } 17709 } 17710 } 17711 if (err || dma_mask == DMA_BIT_MASK(32)) { 17712 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 17713 if (err) { 17714 dev_err(&pdev->dev, 17715 "No usable DMA configuration, aborting\n"); 17716 goto err_out_apeunmap; 17717 } 17718 } 17719 17720 tg3_init_bufmgr_config(tp); 17721 17722 /* 5700 B0 chips do not support checksumming correctly due 17723 * to hardware bugs. 17724 */ 17725 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17726 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17727 17728 if (tg3_flag(tp, 5755_PLUS)) 17729 features |= NETIF_F_IPV6_CSUM; 17730 } 17731 17732 /* TSO is on by default on chips that support hardware TSO. 17733 * Firmware TSO on older chips gives lower performance, so it 17734 * is off by default, but can be enabled using ethtool. 17735 */ 17736 if ((tg3_flag(tp, HW_TSO_1) || 17737 tg3_flag(tp, HW_TSO_2) || 17738 tg3_flag(tp, HW_TSO_3)) && 17739 (features & NETIF_F_IP_CSUM)) 17740 features |= NETIF_F_TSO; 17741 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17742 if (features & NETIF_F_IPV6_CSUM) 17743 features |= NETIF_F_TSO6; 17744 if (tg3_flag(tp, HW_TSO_3) || 17745 tg3_asic_rev(tp) == ASIC_REV_5761 || 17746 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17747 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17748 tg3_asic_rev(tp) == ASIC_REV_5785 || 17749 tg3_asic_rev(tp) == ASIC_REV_57780) 17750 features |= NETIF_F_TSO_ECN; 17751 } 17752 17753 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17754 NETIF_F_HW_VLAN_CTAG_RX; 17755 dev->vlan_features |= features; 17756 17757 /* 17758 * Add loopback capability only for a subset of devices that support 17759 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17760 * loopback for the remaining devices. 17761 */ 17762 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17763 !tg3_flag(tp, CPMU_PRESENT)) 17764 /* Add the loopback capability */ 17765 features |= NETIF_F_LOOPBACK; 17766 17767 dev->hw_features |= features; 17768 dev->priv_flags |= IFF_UNICAST_FLT; 17769 17770 /* MTU range: 60 - 9000 or 1500, depending on hardware */ 17771 dev->min_mtu = TG3_MIN_MTU; 17772 dev->max_mtu = TG3_MAX_MTU(tp); 17773 17774 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17775 !tg3_flag(tp, TSO_CAPABLE) && 17776 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17777 tg3_flag_set(tp, MAX_RXPEND_64); 17778 tp->rx_pending = 63; 17779 } 17780 17781 err = tg3_get_device_address(tp, addr); 17782 if (err) { 17783 dev_err(&pdev->dev, 17784 "Could not obtain valid ethernet address, aborting\n"); 17785 goto err_out_apeunmap; 17786 } 17787 eth_hw_addr_set(dev, addr); 17788 17789 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17790 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17791 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17792 for (i = 0; i < tp->irq_max; i++) { 17793 struct tg3_napi *tnapi = &tp->napi[i]; 17794 17795 tnapi->tp = tp; 17796 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17797 17798 tnapi->int_mbox = intmbx; 17799 if (i <= 4) 17800 intmbx += 0x8; 17801 else 17802 intmbx += 0x4; 17803 17804 tnapi->consmbox = rcvmbx; 17805 tnapi->prodmbox = sndmbx; 17806 17807 if (i) 17808 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17809 else 17810 tnapi->coal_now = HOSTCC_MODE_NOW; 17811 17812 if (!tg3_flag(tp, SUPPORT_MSIX)) 17813 break; 17814 17815 /* 17816 * If we support MSIX, we'll be using RSS. If we're using 17817 * RSS, the first vector only handles link interrupts and the 17818 * remaining vectors handle rx and tx interrupts. Reuse the 17819 * mailbox values for the next iteration. The values we setup 17820 * above are still useful for the single vectored mode. 17821 */ 17822 if (!i) 17823 continue; 17824 17825 rcvmbx += 0x8; 17826 17827 if (sndmbx & 0x4) 17828 sndmbx -= 0x4; 17829 else 17830 sndmbx += 0xc; 17831 } 17832 17833 /* 17834 * Reset chip in case UNDI or EFI driver did not shutdown 17835 * DMA self test will enable WDMAC and we'll see (spurious) 17836 * pending DMA on the PCI bus at that point. 17837 */ 17838 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17839 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17840 tg3_full_lock(tp, 0); 17841 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17842 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17843 tg3_full_unlock(tp); 17844 } 17845 17846 err = tg3_test_dma(tp); 17847 if (err) { 17848 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17849 goto err_out_apeunmap; 17850 } 17851 17852 tg3_init_coal(tp); 17853 17854 pci_set_drvdata(pdev, dev); 17855 17856 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17857 tg3_asic_rev(tp) == ASIC_REV_5720 || 17858 tg3_asic_rev(tp) == ASIC_REV_5762) 17859 tg3_flag_set(tp, PTP_CAPABLE); 17860 17861 tg3_timer_init(tp); 17862 17863 tg3_carrier_off(tp); 17864 17865 err = register_netdev(dev); 17866 if (err) { 17867 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17868 goto err_out_apeunmap; 17869 } 17870 17871 if (tg3_flag(tp, PTP_CAPABLE)) { 17872 tg3_ptp_init(tp); 17873 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17874 &tp->pdev->dev); 17875 if (IS_ERR(tp->ptp_clock)) 17876 tp->ptp_clock = NULL; 17877 } 17878 17879 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17880 tp->board_part_number, 17881 tg3_chip_rev_id(tp), 17882 tg3_bus_string(tp, str), 17883 dev->dev_addr); 17884 17885 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 17886 char *ethtype; 17887 17888 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17889 ethtype = "10/100Base-TX"; 17890 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17891 ethtype = "1000Base-SX"; 17892 else 17893 ethtype = "10/100/1000Base-T"; 17894 17895 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 17896 "(WireSpeed[%d], EEE[%d])\n", 17897 tg3_phy_string(tp), ethtype, 17898 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 17899 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 17900 } 17901 17902 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 17903 (dev->features & NETIF_F_RXCSUM) != 0, 17904 tg3_flag(tp, USE_LINKCHG_REG) != 0, 17905 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 17906 tg3_flag(tp, ENABLE_ASF) != 0, 17907 tg3_flag(tp, TSO_CAPABLE) != 0); 17908 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 17909 tp->dma_rwctrl, 17910 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 17911 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 17912 17913 pci_save_state(pdev); 17914 17915 return 0; 17916 17917 err_out_apeunmap: 17918 if (tp->aperegs) { 17919 iounmap(tp->aperegs); 17920 tp->aperegs = NULL; 17921 } 17922 17923 err_out_iounmap: 17924 if (tp->regs) { 17925 iounmap(tp->regs); 17926 tp->regs = NULL; 17927 } 17928 17929 err_out_free_dev: 17930 free_netdev(dev); 17931 17932 err_out_free_res: 17933 pci_release_regions(pdev); 17934 17935 err_out_disable_pdev: 17936 if (pci_is_enabled(pdev)) 17937 pci_disable_device(pdev); 17938 return err; 17939 } 17940 17941 static void tg3_remove_one(struct pci_dev *pdev) 17942 { 17943 struct net_device *dev = pci_get_drvdata(pdev); 17944 17945 if (dev) { 17946 struct tg3 *tp = netdev_priv(dev); 17947 17948 tg3_ptp_fini(tp); 17949 17950 release_firmware(tp->fw); 17951 17952 tg3_reset_task_cancel(tp); 17953 17954 if (tg3_flag(tp, USE_PHYLIB)) { 17955 tg3_phy_fini(tp); 17956 tg3_mdio_fini(tp); 17957 } 17958 17959 unregister_netdev(dev); 17960 if (tp->aperegs) { 17961 iounmap(tp->aperegs); 17962 tp->aperegs = NULL; 17963 } 17964 if (tp->regs) { 17965 iounmap(tp->regs); 17966 tp->regs = NULL; 17967 } 17968 free_netdev(dev); 17969 pci_release_regions(pdev); 17970 pci_disable_device(pdev); 17971 } 17972 } 17973 17974 #ifdef CONFIG_PM_SLEEP 17975 static int tg3_suspend(struct device *device) 17976 { 17977 struct net_device *dev = dev_get_drvdata(device); 17978 struct tg3 *tp = netdev_priv(dev); 17979 int err = 0; 17980 17981 rtnl_lock(); 17982 17983 if (!netif_running(dev)) 17984 goto unlock; 17985 17986 tg3_reset_task_cancel(tp); 17987 tg3_phy_stop(tp); 17988 tg3_netif_stop(tp); 17989 17990 tg3_timer_stop(tp); 17991 17992 tg3_full_lock(tp, 1); 17993 tg3_disable_ints(tp); 17994 tg3_full_unlock(tp); 17995 17996 netif_device_detach(dev); 17997 17998 tg3_full_lock(tp, 0); 17999 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 18000 tg3_flag_clear(tp, INIT_COMPLETE); 18001 tg3_full_unlock(tp); 18002 18003 err = tg3_power_down_prepare(tp); 18004 if (err) { 18005 int err2; 18006 18007 tg3_full_lock(tp, 0); 18008 18009 tg3_flag_set(tp, INIT_COMPLETE); 18010 err2 = tg3_restart_hw(tp, true); 18011 if (err2) 18012 goto out; 18013 18014 tg3_timer_start(tp); 18015 18016 netif_device_attach(dev); 18017 tg3_netif_start(tp); 18018 18019 out: 18020 tg3_full_unlock(tp); 18021 18022 if (!err2) 18023 tg3_phy_start(tp); 18024 } 18025 18026 unlock: 18027 rtnl_unlock(); 18028 return err; 18029 } 18030 18031 static int tg3_resume(struct device *device) 18032 { 18033 struct net_device *dev = dev_get_drvdata(device); 18034 struct tg3 *tp = netdev_priv(dev); 18035 int err = 0; 18036 18037 rtnl_lock(); 18038 18039 if (!netif_running(dev)) 18040 goto unlock; 18041 18042 netif_device_attach(dev); 18043 18044 tg3_full_lock(tp, 0); 18045 18046 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18047 18048 tg3_flag_set(tp, INIT_COMPLETE); 18049 err = tg3_restart_hw(tp, 18050 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18051 if (err) 18052 goto out; 18053 18054 tg3_timer_start(tp); 18055 18056 tg3_netif_start(tp); 18057 18058 out: 18059 tg3_full_unlock(tp); 18060 18061 if (!err) 18062 tg3_phy_start(tp); 18063 18064 unlock: 18065 rtnl_unlock(); 18066 return err; 18067 } 18068 #endif /* CONFIG_PM_SLEEP */ 18069 18070 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18071 18072 static void tg3_shutdown(struct pci_dev *pdev) 18073 { 18074 struct net_device *dev = pci_get_drvdata(pdev); 18075 struct tg3 *tp = netdev_priv(dev); 18076 18077 rtnl_lock(); 18078 netif_device_detach(dev); 18079 18080 if (netif_running(dev)) 18081 dev_close(dev); 18082 18083 if (system_state == SYSTEM_POWER_OFF) 18084 tg3_power_down(tp); 18085 18086 rtnl_unlock(); 18087 } 18088 18089 /** 18090 * tg3_io_error_detected - called when PCI error is detected 18091 * @pdev: Pointer to PCI device 18092 * @state: The current pci connection state 18093 * 18094 * This function is called after a PCI bus error affecting 18095 * this device has been detected. 18096 */ 18097 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18098 pci_channel_state_t state) 18099 { 18100 struct net_device *netdev = pci_get_drvdata(pdev); 18101 struct tg3 *tp = netdev_priv(netdev); 18102 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18103 18104 netdev_info(netdev, "PCI I/O error detected\n"); 18105 18106 rtnl_lock(); 18107 18108 /* Could be second call or maybe we don't have netdev yet */ 18109 if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) 18110 goto done; 18111 18112 /* We needn't recover from permanent error */ 18113 if (state == pci_channel_io_frozen) 18114 tp->pcierr_recovery = true; 18115 18116 tg3_phy_stop(tp); 18117 18118 tg3_netif_stop(tp); 18119 18120 tg3_timer_stop(tp); 18121 18122 /* Want to make sure that the reset task doesn't run */ 18123 tg3_reset_task_cancel(tp); 18124 18125 netif_device_detach(netdev); 18126 18127 /* Clean up software state, even if MMIO is blocked */ 18128 tg3_full_lock(tp, 0); 18129 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18130 tg3_full_unlock(tp); 18131 18132 done: 18133 if (state == pci_channel_io_perm_failure) { 18134 if (netdev) { 18135 tg3_napi_enable(tp); 18136 dev_close(netdev); 18137 } 18138 err = PCI_ERS_RESULT_DISCONNECT; 18139 } else { 18140 pci_disable_device(pdev); 18141 } 18142 18143 rtnl_unlock(); 18144 18145 return err; 18146 } 18147 18148 /** 18149 * tg3_io_slot_reset - called after the pci bus has been reset. 18150 * @pdev: Pointer to PCI device 18151 * 18152 * Restart the card from scratch, as if from a cold-boot. 18153 * At this point, the card has exprienced a hard reset, 18154 * followed by fixups by BIOS, and has its config space 18155 * set up identically to what it was at cold boot. 18156 */ 18157 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18158 { 18159 struct net_device *netdev = pci_get_drvdata(pdev); 18160 struct tg3 *tp = netdev_priv(netdev); 18161 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18162 int err; 18163 18164 rtnl_lock(); 18165 18166 if (pci_enable_device(pdev)) { 18167 dev_err(&pdev->dev, 18168 "Cannot re-enable PCI device after reset.\n"); 18169 goto done; 18170 } 18171 18172 pci_set_master(pdev); 18173 pci_restore_state(pdev); 18174 pci_save_state(pdev); 18175 18176 if (!netdev || !netif_running(netdev)) { 18177 rc = PCI_ERS_RESULT_RECOVERED; 18178 goto done; 18179 } 18180 18181 err = tg3_power_up(tp); 18182 if (err) 18183 goto done; 18184 18185 rc = PCI_ERS_RESULT_RECOVERED; 18186 18187 done: 18188 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18189 tg3_napi_enable(tp); 18190 dev_close(netdev); 18191 } 18192 rtnl_unlock(); 18193 18194 return rc; 18195 } 18196 18197 /** 18198 * tg3_io_resume - called when traffic can start flowing again. 18199 * @pdev: Pointer to PCI device 18200 * 18201 * This callback is called when the error recovery driver tells 18202 * us that its OK to resume normal operation. 18203 */ 18204 static void tg3_io_resume(struct pci_dev *pdev) 18205 { 18206 struct net_device *netdev = pci_get_drvdata(pdev); 18207 struct tg3 *tp = netdev_priv(netdev); 18208 int err; 18209 18210 rtnl_lock(); 18211 18212 if (!netdev || !netif_running(netdev)) 18213 goto done; 18214 18215 tg3_full_lock(tp, 0); 18216 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18217 tg3_flag_set(tp, INIT_COMPLETE); 18218 err = tg3_restart_hw(tp, true); 18219 if (err) { 18220 tg3_full_unlock(tp); 18221 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18222 goto done; 18223 } 18224 18225 netif_device_attach(netdev); 18226 18227 tg3_timer_start(tp); 18228 18229 tg3_netif_start(tp); 18230 18231 tg3_full_unlock(tp); 18232 18233 tg3_phy_start(tp); 18234 18235 done: 18236 tp->pcierr_recovery = false; 18237 rtnl_unlock(); 18238 } 18239 18240 static const struct pci_error_handlers tg3_err_handler = { 18241 .error_detected = tg3_io_error_detected, 18242 .slot_reset = tg3_io_slot_reset, 18243 .resume = tg3_io_resume 18244 }; 18245 18246 static struct pci_driver tg3_driver = { 18247 .name = DRV_MODULE_NAME, 18248 .id_table = tg3_pci_tbl, 18249 .probe = tg3_init_one, 18250 .remove = tg3_remove_one, 18251 .err_handler = &tg3_err_handler, 18252 .driver.pm = &tg3_pm_ops, 18253 .shutdown = tg3_shutdown, 18254 }; 18255 18256 module_pci_driver(tg3_driver); 18257