1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2016 Broadcom Corporation. 8 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 10 * refers to Broadcom Inc. and/or its subsidiaries. 11 * 12 * Firmware is: 13 * Derived from proprietary unpublished source code, 14 * Copyright (C) 2000-2016 Broadcom Corporation. 15 * Copyright (C) 2016-2017 Broadcom Ltd. 16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 17 * refers to Broadcom Inc. and/or its subsidiaries. 18 * 19 * Permission is hereby granted for the distribution of this firmware 20 * data in hexadecimal or equivalent format, provided this copyright 21 * notice is accompanying it. 22 */ 23 24 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/stringify.h> 28 #include <linux/kernel.h> 29 #include <linux/sched/signal.h> 30 #include <linux/types.h> 31 #include <linux/compiler.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/in.h> 35 #include <linux/interrupt.h> 36 #include <linux/ioport.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/ethtool.h> 42 #include <linux/mdio.h> 43 #include <linux/mii.h> 44 #include <linux/phy.h> 45 #include <linux/brcmphy.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/ip.h> 49 #include <linux/tcp.h> 50 #include <linux/workqueue.h> 51 #include <linux/prefetch.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/firmware.h> 54 #include <linux/ssb/ssb_driver_gige.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <linux/crc32poly.h> 58 59 #include <net/checksum.h> 60 #include <net/gso.h> 61 #include <net/ip.h> 62 63 #include <linux/io.h> 64 #include <asm/byteorder.h> 65 #include <linux/uaccess.h> 66 67 #include <uapi/linux/net_tstamp.h> 68 #include <linux/ptp_clock_kernel.h> 69 70 #define BAR_0 0 71 #define BAR_2 2 72 73 #include "tg3.h" 74 75 /* Functions & macros to verify TG3_FLAGS types */ 76 77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 78 { 79 return test_bit(flag, bits); 80 } 81 82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 83 { 84 set_bit(flag, bits); 85 } 86 87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 88 { 89 clear_bit(flag, bits); 90 } 91 92 #define tg3_flag(tp, flag) \ 93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 94 #define tg3_flag_set(tp, flag) \ 95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 96 #define tg3_flag_clear(tp, flag) \ 97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 98 99 #define DRV_MODULE_NAME "tg3" 100 /* DO NOT UPDATE TG3_*_NUM defines */ 101 #define TG3_MAJ_NUM 3 102 #define TG3_MIN_NUM 137 103 104 #define RESET_KIND_SHUTDOWN 0 105 #define RESET_KIND_INIT 1 106 #define RESET_KIND_SUSPEND 2 107 108 #define TG3_DEF_RX_MODE 0 109 #define TG3_DEF_TX_MODE 0 110 #define TG3_DEF_MSG_ENABLE \ 111 (NETIF_MSG_DRV | \ 112 NETIF_MSG_PROBE | \ 113 NETIF_MSG_LINK | \ 114 NETIF_MSG_TIMER | \ 115 NETIF_MSG_IFDOWN | \ 116 NETIF_MSG_IFUP | \ 117 NETIF_MSG_RX_ERR | \ 118 NETIF_MSG_TX_ERR) 119 120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 121 122 /* length of time before we decide the hardware is borked, 123 * and dev->tx_timeout() should be called to fix the problem 124 */ 125 126 #define TG3_TX_TIMEOUT (5 * HZ) 127 128 /* hardware minimum and maximum for a single frame's data payload */ 129 #define TG3_MIN_MTU ETH_ZLEN 130 #define TG3_MAX_MTU(tp) \ 131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 132 133 /* These numbers seem to be hard coded in the NIC firmware somehow. 134 * You can't change the ring sizes, but you can change where you place 135 * them in the NIC onboard memory. 136 */ 137 #define TG3_RX_STD_RING_SIZE(tp) \ 138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 140 #define TG3_DEF_RX_RING_PENDING 200 141 #define TG3_RX_JMB_RING_SIZE(tp) \ 142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 145 146 /* Do not place this n-ring entries value into the tp struct itself, 147 * we really want to expose these constants to GCC so that modulo et 148 * al. operations are done with shifts and masks instead of with 149 * hw multiply/modulo instructions. Another solution would be to 150 * replace things like '% foo' with '& (foo - 1)'. 151 */ 152 153 #define TG3_TX_RING_SIZE 512 154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 155 156 #define TG3_RX_STD_RING_BYTES(tp) \ 157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 158 #define TG3_RX_JMB_RING_BYTES(tp) \ 159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 160 #define TG3_RX_RCB_RING_BYTES(tp) \ 161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 163 TG3_TX_RING_SIZE) 164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 165 166 #define TG3_DMA_BYTE_ENAB 64 167 168 #define TG3_RX_STD_DMA_SZ 1536 169 #define TG3_RX_JMB_DMA_SZ 9046 170 171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 172 173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 175 176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 178 179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 181 182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 183 * that are at least dword aligned when used in PCIX mode. The driver 184 * works around this bug by double copying the packet. This workaround 185 * is built into the normal double copy length check for efficiency. 186 * 187 * However, the double copy is only necessary on those architectures 188 * where unaligned memory accesses are inefficient. For those architectures 189 * where unaligned memory accesses incur little penalty, we can reintegrate 190 * the 5701 in the normal rx path. Doing so saves a device structure 191 * dereference by hardcoding the double copy threshold in place. 192 */ 193 #define TG3_RX_COPY_THRESHOLD 256 194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 196 #else 197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 198 #endif 199 200 #if (NET_IP_ALIGN != 0) 201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 202 #else 203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 204 #endif 205 206 /* minimum number of free TX descriptors required to wake up TX process */ 207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 208 #define TG3_TX_BD_DMA_MAX_2K 2048 209 #define TG3_TX_BD_DMA_MAX_4K 4096 210 211 #define TG3_RAW_IP_ALIGN 2 212 213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 215 216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 218 219 #define FIRMWARE_TG3 "tigon/tg3.bin" 220 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 223 224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 226 MODULE_LICENSE("GPL"); 227 MODULE_FIRMWARE(FIRMWARE_TG3); 228 MODULE_FIRMWARE(FIRMWARE_TG357766); 229 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 231 232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 233 module_param(tg3_debug, int, 0); 234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 235 236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 238 239 static const struct pci_device_id tg3_pci_tbl[] = { 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 260 TG3_DRV_DATA_FLAG_5705_10_100}, 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 263 TG3_DRV_DATA_FLAG_5705_10_100}, 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 267 TG3_DRV_DATA_FLAG_5705_10_100}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 289 PCI_VENDOR_ID_LENOVO, 290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 355 {} 356 }; 357 358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 359 360 static const struct { 361 const char string[ETH_GSTRING_LEN]; 362 } ethtool_stats_keys[] = { 363 { "rx_octets" }, 364 { "rx_fragments" }, 365 { "rx_ucast_packets" }, 366 { "rx_mcast_packets" }, 367 { "rx_bcast_packets" }, 368 { "rx_fcs_errors" }, 369 { "rx_align_errors" }, 370 { "rx_xon_pause_rcvd" }, 371 { "rx_xoff_pause_rcvd" }, 372 { "rx_mac_ctrl_rcvd" }, 373 { "rx_xoff_entered" }, 374 { "rx_frame_too_long_errors" }, 375 { "rx_jabbers" }, 376 { "rx_undersize_packets" }, 377 { "rx_in_length_errors" }, 378 { "rx_out_length_errors" }, 379 { "rx_64_or_less_octet_packets" }, 380 { "rx_65_to_127_octet_packets" }, 381 { "rx_128_to_255_octet_packets" }, 382 { "rx_256_to_511_octet_packets" }, 383 { "rx_512_to_1023_octet_packets" }, 384 { "rx_1024_to_1522_octet_packets" }, 385 { "rx_1523_to_2047_octet_packets" }, 386 { "rx_2048_to_4095_octet_packets" }, 387 { "rx_4096_to_8191_octet_packets" }, 388 { "rx_8192_to_9022_octet_packets" }, 389 390 { "tx_octets" }, 391 { "tx_collisions" }, 392 393 { "tx_xon_sent" }, 394 { "tx_xoff_sent" }, 395 { "tx_flow_control" }, 396 { "tx_mac_errors" }, 397 { "tx_single_collisions" }, 398 { "tx_mult_collisions" }, 399 { "tx_deferred" }, 400 { "tx_excessive_collisions" }, 401 { "tx_late_collisions" }, 402 { "tx_collide_2times" }, 403 { "tx_collide_3times" }, 404 { "tx_collide_4times" }, 405 { "tx_collide_5times" }, 406 { "tx_collide_6times" }, 407 { "tx_collide_7times" }, 408 { "tx_collide_8times" }, 409 { "tx_collide_9times" }, 410 { "tx_collide_10times" }, 411 { "tx_collide_11times" }, 412 { "tx_collide_12times" }, 413 { "tx_collide_13times" }, 414 { "tx_collide_14times" }, 415 { "tx_collide_15times" }, 416 { "tx_ucast_packets" }, 417 { "tx_mcast_packets" }, 418 { "tx_bcast_packets" }, 419 { "tx_carrier_sense_errors" }, 420 { "tx_discards" }, 421 { "tx_errors" }, 422 423 { "dma_writeq_full" }, 424 { "dma_write_prioq_full" }, 425 { "rxbds_empty" }, 426 { "rx_discards" }, 427 { "rx_errors" }, 428 { "rx_threshold_hit" }, 429 430 { "dma_readq_full" }, 431 { "dma_read_prioq_full" }, 432 { "tx_comp_queue_full" }, 433 434 { "ring_set_send_prod_index" }, 435 { "ring_status_update" }, 436 { "nic_irqs" }, 437 { "nic_avoided_irqs" }, 438 { "nic_tx_threshold_hit" }, 439 440 { "mbuf_lwm_thresh_hit" }, 441 }; 442 443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 444 #define TG3_NVRAM_TEST 0 445 #define TG3_LINK_TEST 1 446 #define TG3_REGISTER_TEST 2 447 #define TG3_MEMORY_TEST 3 448 #define TG3_MAC_LOOPB_TEST 4 449 #define TG3_PHY_LOOPB_TEST 5 450 #define TG3_EXT_LOOPB_TEST 6 451 #define TG3_INTERRUPT_TEST 7 452 453 454 static const struct { 455 const char string[ETH_GSTRING_LEN]; 456 } ethtool_test_keys[] = { 457 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 458 [TG3_LINK_TEST] = { "link test (online) " }, 459 [TG3_REGISTER_TEST] = { "register test (offline)" }, 460 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 465 }; 466 467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 468 469 470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 471 { 472 writel(val, tp->regs + off); 473 } 474 475 static u32 tg3_read32(struct tg3 *tp, u32 off) 476 { 477 return readl(tp->regs + off); 478 } 479 480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 481 { 482 writel(val, tp->aperegs + off); 483 } 484 485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 486 { 487 return readl(tp->aperegs + off); 488 } 489 490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 491 { 492 unsigned long flags; 493 494 spin_lock_irqsave(&tp->indirect_lock, flags); 495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 497 spin_unlock_irqrestore(&tp->indirect_lock, flags); 498 } 499 500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 501 { 502 writel(val, tp->regs + off); 503 readl(tp->regs + off); 504 } 505 506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 507 { 508 unsigned long flags; 509 u32 val; 510 511 spin_lock_irqsave(&tp->indirect_lock, flags); 512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 514 spin_unlock_irqrestore(&tp->indirect_lock, flags); 515 return val; 516 } 517 518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 519 { 520 unsigned long flags; 521 522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 524 TG3_64BIT_REG_LOW, val); 525 return; 526 } 527 if (off == TG3_RX_STD_PROD_IDX_REG) { 528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 529 TG3_64BIT_REG_LOW, val); 530 return; 531 } 532 533 spin_lock_irqsave(&tp->indirect_lock, flags); 534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 536 spin_unlock_irqrestore(&tp->indirect_lock, flags); 537 538 /* In indirect mode when disabling interrupts, we also need 539 * to clear the interrupt bit in the GRC local ctrl register. 540 */ 541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 542 (val == 0x1)) { 543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 545 } 546 } 547 548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 549 { 550 unsigned long flags; 551 u32 val; 552 553 spin_lock_irqsave(&tp->indirect_lock, flags); 554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 556 spin_unlock_irqrestore(&tp->indirect_lock, flags); 557 return val; 558 } 559 560 /* usec_wait specifies the wait time in usec when writing to certain registers 561 * where it is unsafe to read back the register without some delay. 562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 564 */ 565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 566 { 567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 568 /* Non-posted methods */ 569 tp->write32(tp, off, val); 570 else { 571 /* Posted method */ 572 tg3_write32(tp, off, val); 573 if (usec_wait) 574 udelay(usec_wait); 575 tp->read32(tp, off); 576 } 577 /* Wait again after the read for the posted method to guarantee that 578 * the wait time is met. 579 */ 580 if (usec_wait) 581 udelay(usec_wait); 582 } 583 584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 585 { 586 tp->write32_mbox(tp, off, val); 587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 588 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 589 !tg3_flag(tp, ICH_WORKAROUND))) 590 tp->read32_mbox(tp, off); 591 } 592 593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 594 { 595 void __iomem *mbox = tp->regs + off; 596 writel(val, mbox); 597 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 598 writel(val, mbox); 599 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 600 tg3_flag(tp, FLUSH_POSTED_WRITES)) 601 readl(mbox); 602 } 603 604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 605 { 606 return readl(tp->regs + off + GRCMBOX_BASE); 607 } 608 609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 610 { 611 writel(val, tp->regs + off + GRCMBOX_BASE); 612 } 613 614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 619 620 #define tw32(reg, val) tp->write32(tp, reg, val) 621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 623 #define tr32(reg) tp->read32(tp, reg) 624 625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 626 { 627 unsigned long flags; 628 629 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 631 return; 632 633 spin_lock_irqsave(&tp->indirect_lock, flags); 634 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 637 638 /* Always leave this as zero. */ 639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 640 } else { 641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 642 tw32_f(TG3PCI_MEM_WIN_DATA, val); 643 644 /* Always leave this as zero. */ 645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 646 } 647 spin_unlock_irqrestore(&tp->indirect_lock, flags); 648 } 649 650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 651 { 652 unsigned long flags; 653 654 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 656 *val = 0; 657 return; 658 } 659 660 spin_lock_irqsave(&tp->indirect_lock, flags); 661 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 664 665 /* Always leave this as zero. */ 666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 667 } else { 668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 669 *val = tr32(TG3PCI_MEM_WIN_DATA); 670 671 /* Always leave this as zero. */ 672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 673 } 674 spin_unlock_irqrestore(&tp->indirect_lock, flags); 675 } 676 677 static void tg3_ape_lock_init(struct tg3 *tp) 678 { 679 int i; 680 u32 regbase, bit; 681 682 if (tg3_asic_rev(tp) == ASIC_REV_5761) 683 regbase = TG3_APE_LOCK_GRANT; 684 else 685 regbase = TG3_APE_PER_LOCK_GRANT; 686 687 /* Make sure the driver hasn't any stale locks. */ 688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 689 switch (i) { 690 case TG3_APE_LOCK_PHY0: 691 case TG3_APE_LOCK_PHY1: 692 case TG3_APE_LOCK_PHY2: 693 case TG3_APE_LOCK_PHY3: 694 bit = APE_LOCK_GRANT_DRIVER; 695 break; 696 default: 697 if (!tp->pci_fn) 698 bit = APE_LOCK_GRANT_DRIVER; 699 else 700 bit = 1 << tp->pci_fn; 701 } 702 tg3_ape_write32(tp, regbase + 4 * i, bit); 703 } 704 705 } 706 707 static int tg3_ape_lock(struct tg3 *tp, int locknum) 708 { 709 int i, off; 710 int ret = 0; 711 u32 status, req, gnt, bit; 712 713 if (!tg3_flag(tp, ENABLE_APE)) 714 return 0; 715 716 switch (locknum) { 717 case TG3_APE_LOCK_GPIO: 718 if (tg3_asic_rev(tp) == ASIC_REV_5761) 719 return 0; 720 fallthrough; 721 case TG3_APE_LOCK_GRC: 722 case TG3_APE_LOCK_MEM: 723 if (!tp->pci_fn) 724 bit = APE_LOCK_REQ_DRIVER; 725 else 726 bit = 1 << tp->pci_fn; 727 break; 728 case TG3_APE_LOCK_PHY0: 729 case TG3_APE_LOCK_PHY1: 730 case TG3_APE_LOCK_PHY2: 731 case TG3_APE_LOCK_PHY3: 732 bit = APE_LOCK_REQ_DRIVER; 733 break; 734 default: 735 return -EINVAL; 736 } 737 738 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 739 req = TG3_APE_LOCK_REQ; 740 gnt = TG3_APE_LOCK_GRANT; 741 } else { 742 req = TG3_APE_PER_LOCK_REQ; 743 gnt = TG3_APE_PER_LOCK_GRANT; 744 } 745 746 off = 4 * locknum; 747 748 tg3_ape_write32(tp, req + off, bit); 749 750 /* Wait for up to 1 millisecond to acquire lock. */ 751 for (i = 0; i < 100; i++) { 752 status = tg3_ape_read32(tp, gnt + off); 753 if (status == bit) 754 break; 755 if (pci_channel_offline(tp->pdev)) 756 break; 757 758 udelay(10); 759 } 760 761 if (status != bit) { 762 /* Revoke the lock request. */ 763 tg3_ape_write32(tp, gnt + off, bit); 764 ret = -EBUSY; 765 } 766 767 return ret; 768 } 769 770 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 771 { 772 u32 gnt, bit; 773 774 if (!tg3_flag(tp, ENABLE_APE)) 775 return; 776 777 switch (locknum) { 778 case TG3_APE_LOCK_GPIO: 779 if (tg3_asic_rev(tp) == ASIC_REV_5761) 780 return; 781 fallthrough; 782 case TG3_APE_LOCK_GRC: 783 case TG3_APE_LOCK_MEM: 784 if (!tp->pci_fn) 785 bit = APE_LOCK_GRANT_DRIVER; 786 else 787 bit = 1 << tp->pci_fn; 788 break; 789 case TG3_APE_LOCK_PHY0: 790 case TG3_APE_LOCK_PHY1: 791 case TG3_APE_LOCK_PHY2: 792 case TG3_APE_LOCK_PHY3: 793 bit = APE_LOCK_GRANT_DRIVER; 794 break; 795 default: 796 return; 797 } 798 799 if (tg3_asic_rev(tp) == ASIC_REV_5761) 800 gnt = TG3_APE_LOCK_GRANT; 801 else 802 gnt = TG3_APE_PER_LOCK_GRANT; 803 804 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 805 } 806 807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 808 { 809 u32 apedata; 810 811 while (timeout_us) { 812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 813 return -EBUSY; 814 815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 817 break; 818 819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 820 821 udelay(10); 822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 823 } 824 825 return timeout_us ? 0 : -EBUSY; 826 } 827 828 #ifdef CONFIG_TIGON3_HWMON 829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 830 { 831 u32 i, apedata; 832 833 for (i = 0; i < timeout_us / 10; i++) { 834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 835 836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 837 break; 838 839 udelay(10); 840 } 841 842 return i == timeout_us / 10; 843 } 844 845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 846 u32 len) 847 { 848 int err; 849 u32 i, bufoff, msgoff, maxlen, apedata; 850 851 if (!tg3_flag(tp, APE_HAS_NCSI)) 852 return 0; 853 854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 855 if (apedata != APE_SEG_SIG_MAGIC) 856 return -ENODEV; 857 858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 859 if (!(apedata & APE_FW_STATUS_READY)) 860 return -EAGAIN; 861 862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 863 TG3_APE_SHMEM_BASE; 864 msgoff = bufoff + 2 * sizeof(u32); 865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 866 867 while (len) { 868 u32 length; 869 870 /* Cap xfer sizes to scratchpad limits. */ 871 length = (len > maxlen) ? maxlen : len; 872 len -= length; 873 874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 875 if (!(apedata & APE_FW_STATUS_READY)) 876 return -EAGAIN; 877 878 /* Wait for up to 1 msec for APE to service previous event. */ 879 err = tg3_ape_event_lock(tp, 1000); 880 if (err) 881 return err; 882 883 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 884 APE_EVENT_STATUS_SCRTCHPD_READ | 885 APE_EVENT_STATUS_EVENT_PENDING; 886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 887 888 tg3_ape_write32(tp, bufoff, base_off); 889 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 890 891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 893 894 base_off += length; 895 896 if (tg3_ape_wait_for_event(tp, 30000)) 897 return -EAGAIN; 898 899 for (i = 0; length; i += 4, length -= 4) { 900 u32 val = tg3_ape_read32(tp, msgoff + i); 901 memcpy(data, &val, sizeof(u32)); 902 data++; 903 } 904 } 905 906 return 0; 907 } 908 #endif 909 910 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 911 { 912 int err; 913 u32 apedata; 914 915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 916 if (apedata != APE_SEG_SIG_MAGIC) 917 return -EAGAIN; 918 919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 920 if (!(apedata & APE_FW_STATUS_READY)) 921 return -EAGAIN; 922 923 /* Wait for up to 20 millisecond for APE to service previous event. */ 924 err = tg3_ape_event_lock(tp, 20000); 925 if (err) 926 return err; 927 928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 929 event | APE_EVENT_STATUS_EVENT_PENDING); 930 931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 933 934 return 0; 935 } 936 937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 938 { 939 u32 event; 940 u32 apedata; 941 942 if (!tg3_flag(tp, ENABLE_APE)) 943 return; 944 945 switch (kind) { 946 case RESET_KIND_INIT: 947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 949 APE_HOST_SEG_SIG_MAGIC); 950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 951 APE_HOST_SEG_LEN_MAGIC); 952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 957 APE_HOST_BEHAV_NO_PHYLOCK); 958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 959 TG3_APE_HOST_DRVR_STATE_START); 960 961 event = APE_EVENT_STATUS_STATE_START; 962 break; 963 case RESET_KIND_SHUTDOWN: 964 if (device_may_wakeup(&tp->pdev->dev) && 965 tg3_flag(tp, WOL_ENABLE)) { 966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 967 TG3_APE_HOST_WOL_SPEED_AUTO); 968 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 969 } else 970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 971 972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 973 974 event = APE_EVENT_STATUS_STATE_UNLOAD; 975 break; 976 default: 977 return; 978 } 979 980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 981 982 tg3_ape_send_event(tp, event); 983 } 984 985 static void tg3_send_ape_heartbeat(struct tg3 *tp, 986 unsigned long interval) 987 { 988 /* Check if hb interval has exceeded */ 989 if (!tg3_flag(tp, ENABLE_APE) || 990 time_before(jiffies, tp->ape_hb_jiffies + interval)) 991 return; 992 993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 994 tp->ape_hb_jiffies = jiffies; 995 } 996 997 static void tg3_disable_ints(struct tg3 *tp) 998 { 999 int i; 1000 1001 tw32(TG3PCI_MISC_HOST_CTRL, 1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 1003 for (i = 0; i < tp->irq_max; i++) 1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 1005 } 1006 1007 static void tg3_enable_ints(struct tg3 *tp) 1008 { 1009 int i; 1010 1011 tp->irq_sync = 0; 1012 wmb(); 1013 1014 tw32(TG3PCI_MISC_HOST_CTRL, 1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1016 1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1018 for (i = 0; i < tp->irq_cnt; i++) { 1019 struct tg3_napi *tnapi = &tp->napi[i]; 1020 1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1022 if (tg3_flag(tp, 1SHOT_MSI)) 1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1024 1025 tp->coal_now |= tnapi->coal_now; 1026 } 1027 1028 /* Force an initial interrupt */ 1029 if (!tg3_flag(tp, TAGGED_STATUS) && 1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1032 else 1033 tw32(HOSTCC_MODE, tp->coal_now); 1034 1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1036 } 1037 1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1039 { 1040 struct tg3 *tp = tnapi->tp; 1041 struct tg3_hw_status *sblk = tnapi->hw_status; 1042 unsigned int work_exists = 0; 1043 1044 /* check for phy events */ 1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1046 if (sblk->status & SD_STATUS_LINK_CHG) 1047 work_exists = 1; 1048 } 1049 1050 /* check for TX work to do */ 1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1052 work_exists = 1; 1053 1054 /* check for RX work to do */ 1055 if (tnapi->rx_rcb_prod_idx && 1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1057 work_exists = 1; 1058 1059 return work_exists; 1060 } 1061 1062 /* tg3_int_reenable 1063 * similar to tg3_enable_ints, but it accurately determines whether there 1064 * is new work pending and can return without flushing the PIO write 1065 * which reenables interrupts 1066 */ 1067 static void tg3_int_reenable(struct tg3_napi *tnapi) 1068 { 1069 struct tg3 *tp = tnapi->tp; 1070 1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1072 1073 /* When doing tagged status, this work check is unnecessary. 1074 * The last_tag we write above tells the chip which piece of 1075 * work we've completed. 1076 */ 1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1078 tw32(HOSTCC_MODE, tp->coalesce_mode | 1079 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1080 } 1081 1082 static void tg3_switch_clocks(struct tg3 *tp) 1083 { 1084 u32 clock_ctrl; 1085 u32 orig_clock_ctrl; 1086 1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1088 return; 1089 1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1091 1092 orig_clock_ctrl = clock_ctrl; 1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1094 CLOCK_CTRL_CLKRUN_OENABLE | 1095 0x1f); 1096 tp->pci_clock_ctrl = clock_ctrl; 1097 1098 if (tg3_flag(tp, 5705_PLUS)) { 1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1100 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1102 } 1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1105 clock_ctrl | 1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1107 40); 1108 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1109 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1110 40); 1111 } 1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1113 } 1114 1115 #define PHY_BUSY_LOOPS 5000 1116 1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1118 u32 *val) 1119 { 1120 u32 frame_val; 1121 unsigned int loops; 1122 int ret; 1123 1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1125 tw32_f(MAC_MI_MODE, 1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1127 udelay(80); 1128 } 1129 1130 tg3_ape_lock(tp, tp->phy_ape_lock); 1131 1132 *val = 0x0; 1133 1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1135 MI_COM_PHY_ADDR_MASK); 1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1137 MI_COM_REG_ADDR_MASK); 1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1139 1140 tw32_f(MAC_MI_COM, frame_val); 1141 1142 loops = PHY_BUSY_LOOPS; 1143 while (loops != 0) { 1144 udelay(10); 1145 frame_val = tr32(MAC_MI_COM); 1146 1147 if ((frame_val & MI_COM_BUSY) == 0) { 1148 udelay(5); 1149 frame_val = tr32(MAC_MI_COM); 1150 break; 1151 } 1152 loops -= 1; 1153 } 1154 1155 ret = -EBUSY; 1156 if (loops != 0) { 1157 *val = frame_val & MI_COM_DATA_MASK; 1158 ret = 0; 1159 } 1160 1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1162 tw32_f(MAC_MI_MODE, tp->mi_mode); 1163 udelay(80); 1164 } 1165 1166 tg3_ape_unlock(tp, tp->phy_ape_lock); 1167 1168 return ret; 1169 } 1170 1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1172 { 1173 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1174 } 1175 1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1177 u32 val) 1178 { 1179 u32 frame_val; 1180 unsigned int loops; 1181 int ret; 1182 1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1185 return 0; 1186 1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1188 tw32_f(MAC_MI_MODE, 1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1190 udelay(80); 1191 } 1192 1193 tg3_ape_lock(tp, tp->phy_ape_lock); 1194 1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1196 MI_COM_PHY_ADDR_MASK); 1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1198 MI_COM_REG_ADDR_MASK); 1199 frame_val |= (val & MI_COM_DATA_MASK); 1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1201 1202 tw32_f(MAC_MI_COM, frame_val); 1203 1204 loops = PHY_BUSY_LOOPS; 1205 while (loops != 0) { 1206 udelay(10); 1207 frame_val = tr32(MAC_MI_COM); 1208 if ((frame_val & MI_COM_BUSY) == 0) { 1209 udelay(5); 1210 frame_val = tr32(MAC_MI_COM); 1211 break; 1212 } 1213 loops -= 1; 1214 } 1215 1216 ret = -EBUSY; 1217 if (loops != 0) 1218 ret = 0; 1219 1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1221 tw32_f(MAC_MI_MODE, tp->mi_mode); 1222 udelay(80); 1223 } 1224 1225 tg3_ape_unlock(tp, tp->phy_ape_lock); 1226 1227 return ret; 1228 } 1229 1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1231 { 1232 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1233 } 1234 1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1236 { 1237 int err; 1238 1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1240 if (err) 1241 goto done; 1242 1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1244 if (err) 1245 goto done; 1246 1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1249 if (err) 1250 goto done; 1251 1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1253 1254 done: 1255 return err; 1256 } 1257 1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1259 { 1260 int err; 1261 1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1263 if (err) 1264 goto done; 1265 1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1267 if (err) 1268 goto done; 1269 1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1272 if (err) 1273 goto done; 1274 1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1276 1277 done: 1278 return err; 1279 } 1280 1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1282 { 1283 int err; 1284 1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1286 if (!err) 1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1288 1289 return err; 1290 } 1291 1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1293 { 1294 int err; 1295 1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1297 if (!err) 1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1299 1300 return err; 1301 } 1302 1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1304 { 1305 int err; 1306 1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1309 MII_TG3_AUXCTL_SHDWSEL_MISC); 1310 if (!err) 1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1312 1313 return err; 1314 } 1315 1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1317 { 1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1319 set |= MII_TG3_AUXCTL_MISC_WREN; 1320 1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1322 } 1323 1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1325 { 1326 u32 val; 1327 int err; 1328 1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1330 1331 if (err) 1332 return err; 1333 1334 if (enable) 1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1336 else 1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1338 1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1341 1342 return err; 1343 } 1344 1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1346 { 1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1348 reg | val | MII_TG3_MISC_SHDW_WREN); 1349 } 1350 1351 static int tg3_bmcr_reset(struct tg3 *tp) 1352 { 1353 u32 phy_control; 1354 int limit, err; 1355 1356 /* OK, reset it, and poll the BMCR_RESET bit until it 1357 * clears or we time out. 1358 */ 1359 phy_control = BMCR_RESET; 1360 err = tg3_writephy(tp, MII_BMCR, phy_control); 1361 if (err != 0) 1362 return -EBUSY; 1363 1364 limit = 5000; 1365 while (limit--) { 1366 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1367 if (err != 0) 1368 return -EBUSY; 1369 1370 if ((phy_control & BMCR_RESET) == 0) { 1371 udelay(40); 1372 break; 1373 } 1374 udelay(10); 1375 } 1376 if (limit < 0) 1377 return -EBUSY; 1378 1379 return 0; 1380 } 1381 1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1383 { 1384 struct tg3 *tp = bp->priv; 1385 u32 val; 1386 1387 spin_lock_bh(&tp->lock); 1388 1389 if (__tg3_readphy(tp, mii_id, reg, &val)) 1390 val = -EIO; 1391 1392 spin_unlock_bh(&tp->lock); 1393 1394 return val; 1395 } 1396 1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1398 { 1399 struct tg3 *tp = bp->priv; 1400 u32 ret = 0; 1401 1402 spin_lock_bh(&tp->lock); 1403 1404 if (__tg3_writephy(tp, mii_id, reg, val)) 1405 ret = -EIO; 1406 1407 spin_unlock_bh(&tp->lock); 1408 1409 return ret; 1410 } 1411 1412 static void tg3_mdio_config_5785(struct tg3 *tp) 1413 { 1414 u32 val; 1415 struct phy_device *phydev; 1416 1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1419 case PHY_ID_BCM50610: 1420 case PHY_ID_BCM50610M: 1421 val = MAC_PHYCFG2_50610_LED_MODES; 1422 break; 1423 case PHY_ID_BCMAC131: 1424 val = MAC_PHYCFG2_AC131_LED_MODES; 1425 break; 1426 case PHY_ID_RTL8211C: 1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1428 break; 1429 case PHY_ID_RTL8201E: 1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1431 break; 1432 default: 1433 return; 1434 } 1435 1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1437 tw32(MAC_PHYCFG2, val); 1438 1439 val = tr32(MAC_PHYCFG1); 1440 val &= ~(MAC_PHYCFG1_RGMII_INT | 1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1443 tw32(MAC_PHYCFG1, val); 1444 1445 return; 1446 } 1447 1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1450 MAC_PHYCFG2_FMODE_MASK_MASK | 1451 MAC_PHYCFG2_GMODE_MASK_MASK | 1452 MAC_PHYCFG2_ACT_MASK_MASK | 1453 MAC_PHYCFG2_QUAL_MASK_MASK | 1454 MAC_PHYCFG2_INBAND_ENABLE; 1455 1456 tw32(MAC_PHYCFG2, val); 1457 1458 val = tr32(MAC_PHYCFG1); 1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1466 } 1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1469 tw32(MAC_PHYCFG1, val); 1470 1471 val = tr32(MAC_EXT_RGMII_MODE); 1472 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1473 MAC_RGMII_MODE_RX_QUALITY | 1474 MAC_RGMII_MODE_RX_ACTIVITY | 1475 MAC_RGMII_MODE_RX_ENG_DET | 1476 MAC_RGMII_MODE_TX_ENABLE | 1477 MAC_RGMII_MODE_TX_LOWPWR | 1478 MAC_RGMII_MODE_TX_RESET); 1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1481 val |= MAC_RGMII_MODE_RX_INT_B | 1482 MAC_RGMII_MODE_RX_QUALITY | 1483 MAC_RGMII_MODE_RX_ACTIVITY | 1484 MAC_RGMII_MODE_RX_ENG_DET; 1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1486 val |= MAC_RGMII_MODE_TX_ENABLE | 1487 MAC_RGMII_MODE_TX_LOWPWR | 1488 MAC_RGMII_MODE_TX_RESET; 1489 } 1490 tw32(MAC_EXT_RGMII_MODE, val); 1491 } 1492 1493 static void tg3_mdio_start(struct tg3 *tp) 1494 { 1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1496 tw32_f(MAC_MI_MODE, tp->mi_mode); 1497 udelay(80); 1498 1499 if (tg3_flag(tp, MDIOBUS_INITED) && 1500 tg3_asic_rev(tp) == ASIC_REV_5785) 1501 tg3_mdio_config_5785(tp); 1502 } 1503 1504 static int tg3_mdio_init(struct tg3 *tp) 1505 { 1506 int i; 1507 u32 reg; 1508 struct phy_device *phydev; 1509 1510 if (tg3_flag(tp, 5717_PLUS)) { 1511 u32 is_serdes; 1512 1513 tp->phy_addr = tp->pci_fn + 1; 1514 1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1517 else 1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1519 TG3_CPMU_PHY_STRAP_IS_SERDES; 1520 if (is_serdes) 1521 tp->phy_addr += 7; 1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1523 int addr; 1524 1525 addr = ssb_gige_get_phyaddr(tp->pdev); 1526 if (addr < 0) 1527 return addr; 1528 tp->phy_addr = addr; 1529 } else 1530 tp->phy_addr = TG3_PHY_MII_ADDR; 1531 1532 tg3_mdio_start(tp); 1533 1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1535 return 0; 1536 1537 tp->mdio_bus = mdiobus_alloc(); 1538 if (tp->mdio_bus == NULL) 1539 return -ENOMEM; 1540 1541 tp->mdio_bus->name = "tg3 mdio bus"; 1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev)); 1543 tp->mdio_bus->priv = tp; 1544 tp->mdio_bus->parent = &tp->pdev->dev; 1545 tp->mdio_bus->read = &tg3_mdio_read; 1546 tp->mdio_bus->write = &tg3_mdio_write; 1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1548 1549 /* The bus registration will look for all the PHYs on the mdio bus. 1550 * Unfortunately, it does not ensure the PHY is powered up before 1551 * accessing the PHY ID registers. A chip reset is the 1552 * quickest way to bring the device back to an operational state.. 1553 */ 1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1555 tg3_bmcr_reset(tp); 1556 1557 i = mdiobus_register(tp->mdio_bus); 1558 if (i) { 1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1560 mdiobus_free(tp->mdio_bus); 1561 return i; 1562 } 1563 1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1565 1566 if (!phydev || !phydev->drv) { 1567 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1568 mdiobus_unregister(tp->mdio_bus); 1569 mdiobus_free(tp->mdio_bus); 1570 return -ENODEV; 1571 } 1572 1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1574 case PHY_ID_BCM57780: 1575 phydev->interface = PHY_INTERFACE_MODE_GMII; 1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1577 break; 1578 case PHY_ID_BCM50610: 1579 case PHY_ID_BCM50610M: 1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1581 PHY_BRCM_RX_REFCLK_UNUSED | 1582 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1583 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1584 fallthrough; 1585 case PHY_ID_RTL8211C: 1586 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1587 break; 1588 case PHY_ID_RTL8201E: 1589 case PHY_ID_BCMAC131: 1590 phydev->interface = PHY_INTERFACE_MODE_MII; 1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1592 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1593 break; 1594 } 1595 1596 tg3_flag_set(tp, MDIOBUS_INITED); 1597 1598 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1599 tg3_mdio_config_5785(tp); 1600 1601 return 0; 1602 } 1603 1604 static void tg3_mdio_fini(struct tg3 *tp) 1605 { 1606 if (tg3_flag(tp, MDIOBUS_INITED)) { 1607 tg3_flag_clear(tp, MDIOBUS_INITED); 1608 mdiobus_unregister(tp->mdio_bus); 1609 mdiobus_free(tp->mdio_bus); 1610 } 1611 } 1612 1613 /* tp->lock is held. */ 1614 static inline void tg3_generate_fw_event(struct tg3 *tp) 1615 { 1616 u32 val; 1617 1618 val = tr32(GRC_RX_CPU_EVENT); 1619 val |= GRC_RX_CPU_DRIVER_EVENT; 1620 tw32_f(GRC_RX_CPU_EVENT, val); 1621 1622 tp->last_event_jiffies = jiffies; 1623 } 1624 1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1626 1627 /* tp->lock is held. */ 1628 static void tg3_wait_for_event_ack(struct tg3 *tp) 1629 { 1630 int i; 1631 unsigned int delay_cnt; 1632 long time_remain; 1633 1634 /* If enough time has passed, no wait is necessary. */ 1635 time_remain = (long)(tp->last_event_jiffies + 1 + 1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1637 (long)jiffies; 1638 if (time_remain < 0) 1639 return; 1640 1641 /* Check if we can shorten the wait time. */ 1642 delay_cnt = jiffies_to_usecs(time_remain); 1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1645 delay_cnt = (delay_cnt >> 3) + 1; 1646 1647 for (i = 0; i < delay_cnt; i++) { 1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1649 break; 1650 if (pci_channel_offline(tp->pdev)) 1651 break; 1652 1653 udelay(8); 1654 } 1655 } 1656 1657 /* tp->lock is held. */ 1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1659 { 1660 u32 reg, val; 1661 1662 val = 0; 1663 if (!tg3_readphy(tp, MII_BMCR, ®)) 1664 val = reg << 16; 1665 if (!tg3_readphy(tp, MII_BMSR, ®)) 1666 val |= (reg & 0xffff); 1667 *data++ = val; 1668 1669 val = 0; 1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1671 val = reg << 16; 1672 if (!tg3_readphy(tp, MII_LPA, ®)) 1673 val |= (reg & 0xffff); 1674 *data++ = val; 1675 1676 val = 0; 1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1678 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1679 val = reg << 16; 1680 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1681 val |= (reg & 0xffff); 1682 } 1683 *data++ = val; 1684 1685 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1686 val = reg << 16; 1687 else 1688 val = 0; 1689 *data++ = val; 1690 } 1691 1692 /* tp->lock is held. */ 1693 static void tg3_ump_link_report(struct tg3 *tp) 1694 { 1695 u32 data[4]; 1696 1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1698 return; 1699 1700 tg3_phy_gather_ump_data(tp, data); 1701 1702 tg3_wait_for_event_ack(tp); 1703 1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1710 1711 tg3_generate_fw_event(tp); 1712 } 1713 1714 /* tp->lock is held. */ 1715 static void tg3_stop_fw(struct tg3 *tp) 1716 { 1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1718 /* Wait for RX cpu to ACK the previous event. */ 1719 tg3_wait_for_event_ack(tp); 1720 1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1722 1723 tg3_generate_fw_event(tp); 1724 1725 /* Wait for RX cpu to ACK this event. */ 1726 tg3_wait_for_event_ack(tp); 1727 } 1728 } 1729 1730 /* tp->lock is held. */ 1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1732 { 1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1735 1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1737 switch (kind) { 1738 case RESET_KIND_INIT: 1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1740 DRV_STATE_START); 1741 break; 1742 1743 case RESET_KIND_SHUTDOWN: 1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1745 DRV_STATE_UNLOAD); 1746 break; 1747 1748 case RESET_KIND_SUSPEND: 1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1750 DRV_STATE_SUSPEND); 1751 break; 1752 1753 default: 1754 break; 1755 } 1756 } 1757 } 1758 1759 /* tp->lock is held. */ 1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1761 { 1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1763 switch (kind) { 1764 case RESET_KIND_INIT: 1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1766 DRV_STATE_START_DONE); 1767 break; 1768 1769 case RESET_KIND_SHUTDOWN: 1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1771 DRV_STATE_UNLOAD_DONE); 1772 break; 1773 1774 default: 1775 break; 1776 } 1777 } 1778 } 1779 1780 /* tp->lock is held. */ 1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1782 { 1783 if (tg3_flag(tp, ENABLE_ASF)) { 1784 switch (kind) { 1785 case RESET_KIND_INIT: 1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1787 DRV_STATE_START); 1788 break; 1789 1790 case RESET_KIND_SHUTDOWN: 1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1792 DRV_STATE_UNLOAD); 1793 break; 1794 1795 case RESET_KIND_SUSPEND: 1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1797 DRV_STATE_SUSPEND); 1798 break; 1799 1800 default: 1801 break; 1802 } 1803 } 1804 } 1805 1806 static int tg3_poll_fw(struct tg3 *tp) 1807 { 1808 int i; 1809 u32 val; 1810 1811 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1812 return 0; 1813 1814 if (tg3_flag(tp, IS_SSB_CORE)) { 1815 /* We don't use firmware. */ 1816 return 0; 1817 } 1818 1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1820 /* Wait up to 20ms for init done. */ 1821 for (i = 0; i < 200; i++) { 1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1823 return 0; 1824 if (pci_channel_offline(tp->pdev)) 1825 return -ENODEV; 1826 1827 udelay(100); 1828 } 1829 return -ENODEV; 1830 } 1831 1832 /* Wait for firmware initialization to complete. */ 1833 for (i = 0; i < 100000; i++) { 1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1836 break; 1837 if (pci_channel_offline(tp->pdev)) { 1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1839 tg3_flag_set(tp, NO_FWARE_REPORTED); 1840 netdev_info(tp->dev, "No firmware running\n"); 1841 } 1842 1843 break; 1844 } 1845 1846 udelay(10); 1847 } 1848 1849 /* Chip might not be fitted with firmware. Some Sun onboard 1850 * parts are configured like that. So don't signal the timeout 1851 * of the above loop as an error, but do report the lack of 1852 * running firmware once. 1853 */ 1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1855 tg3_flag_set(tp, NO_FWARE_REPORTED); 1856 1857 netdev_info(tp->dev, "No firmware running\n"); 1858 } 1859 1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1861 /* The 57765 A0 needs a little more 1862 * time to do some important work. 1863 */ 1864 mdelay(10); 1865 } 1866 1867 return 0; 1868 } 1869 1870 static void tg3_link_report(struct tg3 *tp) 1871 { 1872 if (!netif_carrier_ok(tp->dev)) { 1873 netif_info(tp, link, tp->dev, "Link is down\n"); 1874 tg3_ump_link_report(tp); 1875 } else if (netif_msg_link(tp)) { 1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1877 (tp->link_config.active_speed == SPEED_1000 ? 1878 1000 : 1879 (tp->link_config.active_speed == SPEED_100 ? 1880 100 : 10)), 1881 (tp->link_config.active_duplex == DUPLEX_FULL ? 1882 "full" : "half")); 1883 1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1886 "on" : "off", 1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1888 "on" : "off"); 1889 1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1891 netdev_info(tp->dev, "EEE is %s\n", 1892 tp->setlpicnt ? "enabled" : "disabled"); 1893 1894 tg3_ump_link_report(tp); 1895 } 1896 1897 tp->link_up = netif_carrier_ok(tp->dev); 1898 } 1899 1900 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1901 { 1902 u32 flowctrl = 0; 1903 1904 if (adv & ADVERTISE_PAUSE_CAP) { 1905 flowctrl |= FLOW_CTRL_RX; 1906 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1907 flowctrl |= FLOW_CTRL_TX; 1908 } else if (adv & ADVERTISE_PAUSE_ASYM) 1909 flowctrl |= FLOW_CTRL_TX; 1910 1911 return flowctrl; 1912 } 1913 1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1915 { 1916 u16 miireg; 1917 1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1919 miireg = ADVERTISE_1000XPAUSE; 1920 else if (flow_ctrl & FLOW_CTRL_TX) 1921 miireg = ADVERTISE_1000XPSE_ASYM; 1922 else if (flow_ctrl & FLOW_CTRL_RX) 1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1924 else 1925 miireg = 0; 1926 1927 return miireg; 1928 } 1929 1930 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1931 { 1932 u32 flowctrl = 0; 1933 1934 if (adv & ADVERTISE_1000XPAUSE) { 1935 flowctrl |= FLOW_CTRL_RX; 1936 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1937 flowctrl |= FLOW_CTRL_TX; 1938 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1939 flowctrl |= FLOW_CTRL_TX; 1940 1941 return flowctrl; 1942 } 1943 1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1945 { 1946 u8 cap = 0; 1947 1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1951 if (lcladv & ADVERTISE_1000XPAUSE) 1952 cap = FLOW_CTRL_RX; 1953 if (rmtadv & ADVERTISE_1000XPAUSE) 1954 cap = FLOW_CTRL_TX; 1955 } 1956 1957 return cap; 1958 } 1959 1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1961 { 1962 u8 autoneg; 1963 u8 flowctrl = 0; 1964 u32 old_rx_mode = tp->rx_mode; 1965 u32 old_tx_mode = tp->tx_mode; 1966 1967 if (tg3_flag(tp, USE_PHYLIB)) 1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1969 else 1970 autoneg = tp->link_config.autoneg; 1971 1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1975 else 1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1977 } else 1978 flowctrl = tp->link_config.flowctrl; 1979 1980 tp->link_config.active_flowctrl = flowctrl; 1981 1982 if (flowctrl & FLOW_CTRL_RX) 1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1984 else 1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1986 1987 if (old_rx_mode != tp->rx_mode) 1988 tw32_f(MAC_RX_MODE, tp->rx_mode); 1989 1990 if (flowctrl & FLOW_CTRL_TX) 1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1992 else 1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1994 1995 if (old_tx_mode != tp->tx_mode) 1996 tw32_f(MAC_TX_MODE, tp->tx_mode); 1997 } 1998 1999 static void tg3_adjust_link(struct net_device *dev) 2000 { 2001 u8 oldflowctrl, linkmesg = 0; 2002 u32 mac_mode, lcl_adv, rmt_adv; 2003 struct tg3 *tp = netdev_priv(dev); 2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2005 2006 spin_lock_bh(&tp->lock); 2007 2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2009 MAC_MODE_HALF_DUPLEX); 2010 2011 oldflowctrl = tp->link_config.active_flowctrl; 2012 2013 if (phydev->link) { 2014 lcl_adv = 0; 2015 rmt_adv = 0; 2016 2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2018 mac_mode |= MAC_MODE_PORT_MODE_MII; 2019 else if (phydev->speed == SPEED_1000 || 2020 tg3_asic_rev(tp) != ASIC_REV_5785) 2021 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2022 else 2023 mac_mode |= MAC_MODE_PORT_MODE_MII; 2024 2025 if (phydev->duplex == DUPLEX_HALF) 2026 mac_mode |= MAC_MODE_HALF_DUPLEX; 2027 else { 2028 lcl_adv = mii_advertise_flowctrl( 2029 tp->link_config.flowctrl); 2030 2031 if (phydev->pause) 2032 rmt_adv = LPA_PAUSE_CAP; 2033 if (phydev->asym_pause) 2034 rmt_adv |= LPA_PAUSE_ASYM; 2035 } 2036 2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2038 } else 2039 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2040 2041 if (mac_mode != tp->mac_mode) { 2042 tp->mac_mode = mac_mode; 2043 tw32_f(MAC_MODE, tp->mac_mode); 2044 udelay(40); 2045 } 2046 2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2048 if (phydev->speed == SPEED_10) 2049 tw32(MAC_MI_STAT, 2050 MAC_MI_STAT_10MBPS_MODE | 2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2052 else 2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2054 } 2055 2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2057 tw32(MAC_TX_LENGTHS, 2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2059 (6 << TX_LENGTHS_IPG_SHIFT) | 2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2061 else 2062 tw32(MAC_TX_LENGTHS, 2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2064 (6 << TX_LENGTHS_IPG_SHIFT) | 2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2066 2067 if (phydev->link != tp->old_link || 2068 phydev->speed != tp->link_config.active_speed || 2069 phydev->duplex != tp->link_config.active_duplex || 2070 oldflowctrl != tp->link_config.active_flowctrl) 2071 linkmesg = 1; 2072 2073 tp->old_link = phydev->link; 2074 tp->link_config.active_speed = phydev->speed; 2075 tp->link_config.active_duplex = phydev->duplex; 2076 2077 spin_unlock_bh(&tp->lock); 2078 2079 if (linkmesg) 2080 tg3_link_report(tp); 2081 } 2082 2083 static int tg3_phy_init(struct tg3 *tp) 2084 { 2085 struct phy_device *phydev; 2086 2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2088 return 0; 2089 2090 /* Bring the PHY back to a known state. */ 2091 tg3_bmcr_reset(tp); 2092 2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2094 2095 /* Attach the MAC to the PHY. */ 2096 phydev = phy_connect(tp->dev, phydev_name(phydev), 2097 tg3_adjust_link, phydev->interface); 2098 if (IS_ERR(phydev)) { 2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2100 return PTR_ERR(phydev); 2101 } 2102 2103 /* Mask with MAC supported features. */ 2104 switch (phydev->interface) { 2105 case PHY_INTERFACE_MODE_GMII: 2106 case PHY_INTERFACE_MODE_RGMII: 2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2108 phy_set_max_speed(phydev, SPEED_1000); 2109 phy_support_asym_pause(phydev); 2110 break; 2111 } 2112 fallthrough; 2113 case PHY_INTERFACE_MODE_MII: 2114 phy_set_max_speed(phydev, SPEED_100); 2115 phy_support_asym_pause(phydev); 2116 break; 2117 default: 2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2119 return -EINVAL; 2120 } 2121 2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2123 2124 phy_attached_info(phydev); 2125 2126 return 0; 2127 } 2128 2129 static void tg3_phy_start(struct tg3 *tp) 2130 { 2131 struct phy_device *phydev; 2132 2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2134 return; 2135 2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2137 2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2140 phydev->speed = tp->link_config.speed; 2141 phydev->duplex = tp->link_config.duplex; 2142 phydev->autoneg = tp->link_config.autoneg; 2143 ethtool_convert_legacy_u32_to_link_mode( 2144 phydev->advertising, tp->link_config.advertising); 2145 } 2146 2147 phy_start(phydev); 2148 2149 phy_start_aneg(phydev); 2150 } 2151 2152 static void tg3_phy_stop(struct tg3 *tp) 2153 { 2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2155 return; 2156 2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2158 } 2159 2160 static void tg3_phy_fini(struct tg3 *tp) 2161 { 2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2165 } 2166 } 2167 2168 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2169 { 2170 int err; 2171 u32 val; 2172 2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2174 return 0; 2175 2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2177 /* Cannot do read-modify-write on 5401 */ 2178 err = tg3_phy_auxctl_write(tp, 2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2181 0x4c20); 2182 goto done; 2183 } 2184 2185 err = tg3_phy_auxctl_read(tp, 2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2187 if (err) 2188 return err; 2189 2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2191 err = tg3_phy_auxctl_write(tp, 2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2193 2194 done: 2195 return err; 2196 } 2197 2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2199 { 2200 u32 phytest; 2201 2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2203 u32 phy; 2204 2205 tg3_writephy(tp, MII_TG3_FET_TEST, 2206 phytest | MII_TG3_FET_SHADOW_EN); 2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2208 if (enable) 2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2210 else 2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2213 } 2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2215 } 2216 } 2217 2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2219 { 2220 u32 reg; 2221 2222 if (!tg3_flag(tp, 5705_PLUS) || 2223 (tg3_flag(tp, 5717_PLUS) && 2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2225 return; 2226 2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2228 tg3_phy_fet_toggle_apd(tp, enable); 2229 return; 2230 } 2231 2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2234 MII_TG3_MISC_SHDW_SCR5_SDTL | 2235 MII_TG3_MISC_SHDW_SCR5_C125OE; 2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2238 2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2240 2241 2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2243 if (enable) 2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2245 2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2247 } 2248 2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2250 { 2251 u32 phy; 2252 2253 if (!tg3_flag(tp, 5705_PLUS) || 2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2255 return; 2256 2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2258 u32 ephy; 2259 2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2262 2263 tg3_writephy(tp, MII_TG3_FET_TEST, 2264 ephy | MII_TG3_FET_SHADOW_EN); 2265 if (!tg3_readphy(tp, reg, &phy)) { 2266 if (enable) 2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2268 else 2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2270 tg3_writephy(tp, reg, phy); 2271 } 2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2273 } 2274 } else { 2275 int ret; 2276 2277 ret = tg3_phy_auxctl_read(tp, 2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2279 if (!ret) { 2280 if (enable) 2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2282 else 2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2284 tg3_phy_auxctl_write(tp, 2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2286 } 2287 } 2288 } 2289 2290 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2291 { 2292 int ret; 2293 u32 val; 2294 2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2296 return; 2297 2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2299 if (!ret) 2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2302 } 2303 2304 static void tg3_phy_apply_otp(struct tg3 *tp) 2305 { 2306 u32 otp, phy; 2307 2308 if (!tp->phy_otp) 2309 return; 2310 2311 otp = tp->phy_otp; 2312 2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2314 return; 2315 2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2319 2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2323 2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2327 2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2330 2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2333 2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2337 2338 tg3_phy_toggle_auxctl_smdsp(tp, false); 2339 } 2340 2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee) 2342 { 2343 u32 val; 2344 struct ethtool_keee *dest = &tp->eee; 2345 2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2347 return; 2348 2349 if (eee) 2350 dest = eee; 2351 2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2353 return; 2354 2355 /* Pull eee_active */ 2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2358 dest->eee_active = 1; 2359 } else 2360 dest->eee_active = 0; 2361 2362 /* Pull lp advertised settings */ 2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2364 return; 2365 mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val); 2366 2367 /* Pull advertised and eee_enabled settings */ 2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2369 return; 2370 dest->eee_enabled = !!val; 2371 mii_eee_cap1_mod_linkmode_t(dest->advertised, val); 2372 2373 /* Pull tx_lpi_enabled */ 2374 val = tr32(TG3_CPMU_EEE_MODE); 2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2376 2377 /* Pull lpi timer value */ 2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2379 } 2380 2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2382 { 2383 u32 val; 2384 2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2386 return; 2387 2388 tp->setlpicnt = 0; 2389 2390 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2391 current_link_up && 2392 tp->link_config.active_duplex == DUPLEX_FULL && 2393 (tp->link_config.active_speed == SPEED_100 || 2394 tp->link_config.active_speed == SPEED_1000)) { 2395 u32 eeectl; 2396 2397 if (tp->link_config.active_speed == SPEED_1000) 2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2399 else 2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2401 2402 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2403 2404 tg3_eee_pull_config(tp, NULL); 2405 if (tp->eee.eee_active) 2406 tp->setlpicnt = 2; 2407 } 2408 2409 if (!tp->setlpicnt) { 2410 if (current_link_up && 2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2413 tg3_phy_toggle_auxctl_smdsp(tp, false); 2414 } 2415 2416 val = tr32(TG3_CPMU_EEE_MODE); 2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2418 } 2419 } 2420 2421 static void tg3_phy_eee_enable(struct tg3 *tp) 2422 { 2423 u32 val; 2424 2425 if (tp->link_config.active_speed == SPEED_1000 && 2426 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2427 tg3_asic_rev(tp) == ASIC_REV_5719 || 2428 tg3_flag(tp, 57765_CLASS)) && 2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2430 val = MII_TG3_DSP_TAP26_ALNOKO | 2431 MII_TG3_DSP_TAP26_RMRXSTO; 2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2433 tg3_phy_toggle_auxctl_smdsp(tp, false); 2434 } 2435 2436 val = tr32(TG3_CPMU_EEE_MODE); 2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2438 } 2439 2440 static int tg3_wait_macro_done(struct tg3 *tp) 2441 { 2442 int limit = 100; 2443 2444 while (limit--) { 2445 u32 tmp32; 2446 2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2448 if ((tmp32 & 0x1000) == 0) 2449 break; 2450 } 2451 } 2452 if (limit < 0) 2453 return -EBUSY; 2454 2455 return 0; 2456 } 2457 2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2459 { 2460 static const u32 test_pat[4][6] = { 2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2465 }; 2466 int chan; 2467 2468 for (chan = 0; chan < 4; chan++) { 2469 int i; 2470 2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2472 (chan * 0x2000) | 0x0200); 2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2474 2475 for (i = 0; i < 6; i++) 2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2477 test_pat[chan][i]); 2478 2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2480 if (tg3_wait_macro_done(tp)) { 2481 *resetp = 1; 2482 return -EBUSY; 2483 } 2484 2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2486 (chan * 0x2000) | 0x0200); 2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2488 if (tg3_wait_macro_done(tp)) { 2489 *resetp = 1; 2490 return -EBUSY; 2491 } 2492 2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2494 if (tg3_wait_macro_done(tp)) { 2495 *resetp = 1; 2496 return -EBUSY; 2497 } 2498 2499 for (i = 0; i < 6; i += 2) { 2500 u32 low, high; 2501 2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2504 tg3_wait_macro_done(tp)) { 2505 *resetp = 1; 2506 return -EBUSY; 2507 } 2508 low &= 0x7fff; 2509 high &= 0x000f; 2510 if (low != test_pat[chan][i] || 2511 high != test_pat[chan][i+1]) { 2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2515 2516 return -EBUSY; 2517 } 2518 } 2519 } 2520 2521 return 0; 2522 } 2523 2524 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2525 { 2526 int chan; 2527 2528 for (chan = 0; chan < 4; chan++) { 2529 int i; 2530 2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2532 (chan * 0x2000) | 0x0200); 2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2534 for (i = 0; i < 6; i++) 2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2537 if (tg3_wait_macro_done(tp)) 2538 return -EBUSY; 2539 } 2540 2541 return 0; 2542 } 2543 2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2545 { 2546 u32 reg32, phy9_orig; 2547 int retries, do_phy_reset, err; 2548 2549 retries = 10; 2550 do_phy_reset = 1; 2551 do { 2552 if (do_phy_reset) { 2553 err = tg3_bmcr_reset(tp); 2554 if (err) 2555 return err; 2556 do_phy_reset = 0; 2557 } 2558 2559 /* Disable transmitter and interrupt. */ 2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2561 continue; 2562 2563 reg32 |= 0x3000; 2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2565 2566 /* Set full-duplex, 1000 mbps. */ 2567 tg3_writephy(tp, MII_BMCR, 2568 BMCR_FULLDPLX | BMCR_SPEED1000); 2569 2570 /* Set to master mode. */ 2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2572 continue; 2573 2574 tg3_writephy(tp, MII_CTRL1000, 2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2576 2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2578 if (err) 2579 return err; 2580 2581 /* Block the PHY control access. */ 2582 tg3_phydsp_write(tp, 0x8005, 0x0800); 2583 2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2585 if (!err) 2586 break; 2587 } while (--retries); 2588 2589 err = tg3_phy_reset_chanpat(tp); 2590 if (err) 2591 return err; 2592 2593 tg3_phydsp_write(tp, 0x8005, 0x0000); 2594 2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2597 2598 tg3_phy_toggle_auxctl_smdsp(tp, false); 2599 2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2601 2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2603 if (err) 2604 return err; 2605 2606 reg32 &= ~0x3000; 2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2608 2609 return 0; 2610 } 2611 2612 static void tg3_carrier_off(struct tg3 *tp) 2613 { 2614 netif_carrier_off(tp->dev); 2615 tp->link_up = false; 2616 } 2617 2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2619 { 2620 if (tg3_flag(tp, ENABLE_ASF)) 2621 netdev_warn(tp->dev, 2622 "Management side-band traffic will be interrupted during phy settings change\n"); 2623 } 2624 2625 /* This will reset the tigon3 PHY if there is no valid 2626 * link unless the FORCE argument is non-zero. 2627 */ 2628 static int tg3_phy_reset(struct tg3 *tp) 2629 { 2630 u32 val, cpmuctrl; 2631 int err; 2632 2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2634 val = tr32(GRC_MISC_CFG); 2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2636 udelay(40); 2637 } 2638 err = tg3_readphy(tp, MII_BMSR, &val); 2639 err |= tg3_readphy(tp, MII_BMSR, &val); 2640 if (err != 0) 2641 return -EBUSY; 2642 2643 if (netif_running(tp->dev) && tp->link_up) { 2644 netif_carrier_off(tp->dev); 2645 tg3_link_report(tp); 2646 } 2647 2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2649 tg3_asic_rev(tp) == ASIC_REV_5704 || 2650 tg3_asic_rev(tp) == ASIC_REV_5705) { 2651 err = tg3_phy_reset_5703_4_5(tp); 2652 if (err) 2653 return err; 2654 goto out; 2655 } 2656 2657 cpmuctrl = 0; 2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2660 cpmuctrl = tr32(TG3_CPMU_CTRL); 2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2662 tw32(TG3_CPMU_CTRL, 2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2664 } 2665 2666 err = tg3_bmcr_reset(tp); 2667 if (err) 2668 return err; 2669 2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2673 2674 tw32(TG3_CPMU_CTRL, cpmuctrl); 2675 } 2676 2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2681 CPMU_LSPD_1000MB_MACCLK_12_5) { 2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2683 udelay(40); 2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2685 } 2686 } 2687 2688 if (tg3_flag(tp, 5717_PLUS) && 2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2690 return 0; 2691 2692 tg3_phy_apply_otp(tp); 2693 2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2695 tg3_phy_toggle_apd(tp, true); 2696 else 2697 tg3_phy_toggle_apd(tp, false); 2698 2699 out: 2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2703 tg3_phydsp_write(tp, 0x000a, 0x0323); 2704 tg3_phy_toggle_auxctl_smdsp(tp, false); 2705 } 2706 2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2710 } 2711 2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2714 tg3_phydsp_write(tp, 0x000a, 0x310b); 2715 tg3_phydsp_write(tp, 0x201f, 0x9506); 2716 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2717 tg3_phy_toggle_auxctl_smdsp(tp, false); 2718 } 2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2724 tg3_writephy(tp, MII_TG3_TEST1, 2725 MII_TG3_TEST1_TRIM_EN | 0x4); 2726 } else 2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2728 2729 tg3_phy_toggle_auxctl_smdsp(tp, false); 2730 } 2731 } 2732 2733 /* Set Extended packet length bit (bit 14) on all chips that */ 2734 /* support jumbo frames */ 2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2736 /* Cannot do read-modify-write on 5401 */ 2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2739 /* Set bit 14 with read-modify-write to preserve other bits */ 2740 err = tg3_phy_auxctl_read(tp, 2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2742 if (!err) 2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2745 } 2746 2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2748 * jumbo frames transmission. 2749 */ 2750 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2752 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2754 } 2755 2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2757 /* adjust output voltage */ 2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2759 } 2760 2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2762 tg3_phydsp_write(tp, 0xffb, 0x4000); 2763 2764 tg3_phy_toggle_automdix(tp, true); 2765 tg3_phy_set_wirespeed(tp); 2766 return 0; 2767 } 2768 2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2772 TG3_GPIO_MSG_NEED_VAUX) 2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2777 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2778 2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2783 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2784 2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2786 { 2787 u32 status, shift; 2788 2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2790 tg3_asic_rev(tp) == ASIC_REV_5719) 2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2792 else 2793 status = tr32(TG3_CPMU_DRV_STATUS); 2794 2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2796 status &= ~(TG3_GPIO_MSG_MASK << shift); 2797 status |= (newstat << shift); 2798 2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2800 tg3_asic_rev(tp) == ASIC_REV_5719) 2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2802 else 2803 tw32(TG3_CPMU_DRV_STATUS, status); 2804 2805 return status >> TG3_APE_GPIO_MSG_SHIFT; 2806 } 2807 2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2809 { 2810 if (!tg3_flag(tp, IS_NIC)) 2811 return 0; 2812 2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2814 tg3_asic_rev(tp) == ASIC_REV_5719 || 2815 tg3_asic_rev(tp) == ASIC_REV_5720) { 2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2817 return -EIO; 2818 2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2820 2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2822 TG3_GRC_LCLCTL_PWRSW_DELAY); 2823 2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2825 } else { 2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2827 TG3_GRC_LCLCTL_PWRSW_DELAY); 2828 } 2829 2830 return 0; 2831 } 2832 2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2834 { 2835 u32 grc_local_ctrl; 2836 2837 if (!tg3_flag(tp, IS_NIC) || 2838 tg3_asic_rev(tp) == ASIC_REV_5700 || 2839 tg3_asic_rev(tp) == ASIC_REV_5701) 2840 return; 2841 2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2843 2844 tw32_wait_f(GRC_LOCAL_CTRL, 2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2846 TG3_GRC_LCLCTL_PWRSW_DELAY); 2847 2848 tw32_wait_f(GRC_LOCAL_CTRL, 2849 grc_local_ctrl, 2850 TG3_GRC_LCLCTL_PWRSW_DELAY); 2851 2852 tw32_wait_f(GRC_LOCAL_CTRL, 2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2854 TG3_GRC_LCLCTL_PWRSW_DELAY); 2855 } 2856 2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2858 { 2859 if (!tg3_flag(tp, IS_NIC)) 2860 return; 2861 2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2863 tg3_asic_rev(tp) == ASIC_REV_5701) { 2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2865 (GRC_LCLCTRL_GPIO_OE0 | 2866 GRC_LCLCTRL_GPIO_OE1 | 2867 GRC_LCLCTRL_GPIO_OE2 | 2868 GRC_LCLCTRL_GPIO_OUTPUT0 | 2869 GRC_LCLCTRL_GPIO_OUTPUT1), 2870 TG3_GRC_LCLCTL_PWRSW_DELAY); 2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2875 GRC_LCLCTRL_GPIO_OE1 | 2876 GRC_LCLCTRL_GPIO_OE2 | 2877 GRC_LCLCTRL_GPIO_OUTPUT0 | 2878 GRC_LCLCTRL_GPIO_OUTPUT1 | 2879 tp->grc_local_ctrl; 2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2881 TG3_GRC_LCLCTL_PWRSW_DELAY); 2882 2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2885 TG3_GRC_LCLCTL_PWRSW_DELAY); 2886 2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2889 TG3_GRC_LCLCTL_PWRSW_DELAY); 2890 } else { 2891 u32 no_gpio2; 2892 u32 grc_local_ctrl = 0; 2893 2894 /* Workaround to prevent overdrawing Amps. */ 2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2898 grc_local_ctrl, 2899 TG3_GRC_LCLCTL_PWRSW_DELAY); 2900 } 2901 2902 /* On 5753 and variants, GPIO2 cannot be used. */ 2903 no_gpio2 = tp->nic_sram_data_cfg & 2904 NIC_SRAM_DATA_CFG_NO_GPIO2; 2905 2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2907 GRC_LCLCTRL_GPIO_OE1 | 2908 GRC_LCLCTRL_GPIO_OE2 | 2909 GRC_LCLCTRL_GPIO_OUTPUT1 | 2910 GRC_LCLCTRL_GPIO_OUTPUT2; 2911 if (no_gpio2) { 2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2913 GRC_LCLCTRL_GPIO_OUTPUT2); 2914 } 2915 tw32_wait_f(GRC_LOCAL_CTRL, 2916 tp->grc_local_ctrl | grc_local_ctrl, 2917 TG3_GRC_LCLCTL_PWRSW_DELAY); 2918 2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2920 2921 tw32_wait_f(GRC_LOCAL_CTRL, 2922 tp->grc_local_ctrl | grc_local_ctrl, 2923 TG3_GRC_LCLCTL_PWRSW_DELAY); 2924 2925 if (!no_gpio2) { 2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2927 tw32_wait_f(GRC_LOCAL_CTRL, 2928 tp->grc_local_ctrl | grc_local_ctrl, 2929 TG3_GRC_LCLCTL_PWRSW_DELAY); 2930 } 2931 } 2932 } 2933 2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2935 { 2936 u32 msg = 0; 2937 2938 /* Serialize power state transitions */ 2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2940 return; 2941 2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2943 msg = TG3_GPIO_MSG_NEED_VAUX; 2944 2945 msg = tg3_set_function_status(tp, msg); 2946 2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2948 goto done; 2949 2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2951 tg3_pwrsrc_switch_to_vaux(tp); 2952 else 2953 tg3_pwrsrc_die_with_vmain(tp); 2954 2955 done: 2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2957 } 2958 2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2960 { 2961 bool need_vaux = false; 2962 2963 /* The GPIOs do something completely different on 57765. */ 2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2965 return; 2966 2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2968 tg3_asic_rev(tp) == ASIC_REV_5719 || 2969 tg3_asic_rev(tp) == ASIC_REV_5720) { 2970 tg3_frob_aux_power_5717(tp, include_wol ? 2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2972 return; 2973 } 2974 2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2976 struct net_device *dev_peer; 2977 2978 dev_peer = pci_get_drvdata(tp->pdev_peer); 2979 2980 /* remove_one() may have been run on the peer. */ 2981 if (dev_peer) { 2982 struct tg3 *tp_peer = netdev_priv(dev_peer); 2983 2984 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2985 return; 2986 2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2988 tg3_flag(tp_peer, ENABLE_ASF)) 2989 need_vaux = true; 2990 } 2991 } 2992 2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 2994 tg3_flag(tp, ENABLE_ASF)) 2995 need_vaux = true; 2996 2997 if (need_vaux) 2998 tg3_pwrsrc_switch_to_vaux(tp); 2999 else 3000 tg3_pwrsrc_die_with_vmain(tp); 3001 } 3002 3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3004 { 3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3006 return 1; 3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3008 if (speed != SPEED_10) 3009 return 1; 3010 } else if (speed == SPEED_10) 3011 return 1; 3012 3013 return 0; 3014 } 3015 3016 static bool tg3_phy_power_bug(struct tg3 *tp) 3017 { 3018 switch (tg3_asic_rev(tp)) { 3019 case ASIC_REV_5700: 3020 case ASIC_REV_5704: 3021 return true; 3022 case ASIC_REV_5780: 3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3024 return true; 3025 return false; 3026 case ASIC_REV_5717: 3027 if (!tp->pci_fn) 3028 return true; 3029 return false; 3030 case ASIC_REV_5719: 3031 case ASIC_REV_5720: 3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3033 !tp->pci_fn) 3034 return true; 3035 return false; 3036 } 3037 3038 return false; 3039 } 3040 3041 static bool tg3_phy_led_bug(struct tg3 *tp) 3042 { 3043 switch (tg3_asic_rev(tp)) { 3044 case ASIC_REV_5719: 3045 case ASIC_REV_5720: 3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3047 !tp->pci_fn) 3048 return true; 3049 return false; 3050 } 3051 3052 return false; 3053 } 3054 3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3056 { 3057 u32 val; 3058 3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3060 return; 3061 3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3066 3067 sg_dig_ctrl |= 3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3069 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3071 } 3072 return; 3073 } 3074 3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3076 tg3_bmcr_reset(tp); 3077 val = tr32(GRC_MISC_CFG); 3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3079 udelay(40); 3080 return; 3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3082 u32 phytest; 3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3084 u32 phy; 3085 3086 tg3_writephy(tp, MII_ADVERTISE, 0); 3087 tg3_writephy(tp, MII_BMCR, 3088 BMCR_ANENABLE | BMCR_ANRESTART); 3089 3090 tg3_writephy(tp, MII_TG3_FET_TEST, 3091 phytest | MII_TG3_FET_SHADOW_EN); 3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3094 tg3_writephy(tp, 3095 MII_TG3_FET_SHDW_AUXMODE4, 3096 phy); 3097 } 3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3099 } 3100 return; 3101 } else if (do_low_power) { 3102 if (!tg3_phy_led_bug(tp)) 3103 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3105 3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3108 MII_TG3_AUXCTL_PCTL_VREG_11V; 3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3110 } 3111 3112 /* The PHY should not be powered down on some chips because 3113 * of bugs. 3114 */ 3115 if (tg3_phy_power_bug(tp)) 3116 return; 3117 3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3124 } 3125 3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3127 } 3128 3129 /* tp->lock is held. */ 3130 static int tg3_nvram_lock(struct tg3 *tp) 3131 { 3132 if (tg3_flag(tp, NVRAM)) { 3133 int i; 3134 3135 if (tp->nvram_lock_cnt == 0) { 3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3137 for (i = 0; i < 8000; i++) { 3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3139 break; 3140 udelay(20); 3141 } 3142 if (i == 8000) { 3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3144 return -ENODEV; 3145 } 3146 } 3147 tp->nvram_lock_cnt++; 3148 } 3149 return 0; 3150 } 3151 3152 /* tp->lock is held. */ 3153 static void tg3_nvram_unlock(struct tg3 *tp) 3154 { 3155 if (tg3_flag(tp, NVRAM)) { 3156 if (tp->nvram_lock_cnt > 0) 3157 tp->nvram_lock_cnt--; 3158 if (tp->nvram_lock_cnt == 0) 3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3160 } 3161 } 3162 3163 /* tp->lock is held. */ 3164 static void tg3_enable_nvram_access(struct tg3 *tp) 3165 { 3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3167 u32 nvaccess = tr32(NVRAM_ACCESS); 3168 3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3170 } 3171 } 3172 3173 /* tp->lock is held. */ 3174 static void tg3_disable_nvram_access(struct tg3 *tp) 3175 { 3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3177 u32 nvaccess = tr32(NVRAM_ACCESS); 3178 3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3180 } 3181 } 3182 3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3184 u32 offset, u32 *val) 3185 { 3186 u32 tmp; 3187 int i; 3188 3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3190 return -EINVAL; 3191 3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3193 EEPROM_ADDR_DEVID_MASK | 3194 EEPROM_ADDR_READ); 3195 tw32(GRC_EEPROM_ADDR, 3196 tmp | 3197 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3199 EEPROM_ADDR_ADDR_MASK) | 3200 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3201 3202 for (i = 0; i < 1000; i++) { 3203 tmp = tr32(GRC_EEPROM_ADDR); 3204 3205 if (tmp & EEPROM_ADDR_COMPLETE) 3206 break; 3207 msleep(1); 3208 } 3209 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3210 return -EBUSY; 3211 3212 tmp = tr32(GRC_EEPROM_DATA); 3213 3214 /* 3215 * The data will always be opposite the native endian 3216 * format. Perform a blind byteswap to compensate. 3217 */ 3218 *val = swab32(tmp); 3219 3220 return 0; 3221 } 3222 3223 #define NVRAM_CMD_TIMEOUT 10000 3224 3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3226 { 3227 int i; 3228 3229 tw32(NVRAM_CMD, nvram_cmd); 3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3231 usleep_range(10, 40); 3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3233 udelay(10); 3234 break; 3235 } 3236 } 3237 3238 if (i == NVRAM_CMD_TIMEOUT) 3239 return -EBUSY; 3240 3241 return 0; 3242 } 3243 3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3245 { 3246 if (tg3_flag(tp, NVRAM) && 3247 tg3_flag(tp, NVRAM_BUFFERED) && 3248 tg3_flag(tp, FLASH) && 3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3250 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3251 3252 addr = ((addr / tp->nvram_pagesize) << 3253 ATMEL_AT45DB0X1B_PAGE_POS) + 3254 (addr % tp->nvram_pagesize); 3255 3256 return addr; 3257 } 3258 3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3260 { 3261 if (tg3_flag(tp, NVRAM) && 3262 tg3_flag(tp, NVRAM_BUFFERED) && 3263 tg3_flag(tp, FLASH) && 3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3265 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3266 3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3268 tp->nvram_pagesize) + 3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3270 3271 return addr; 3272 } 3273 3274 /* NOTE: Data read in from NVRAM is byteswapped according to 3275 * the byteswapping settings for all other register accesses. 3276 * tg3 devices are BE devices, so on a BE machine, the data 3277 * returned will be exactly as it is seen in NVRAM. On a LE 3278 * machine, the 32-bit value will be byteswapped. 3279 */ 3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3281 { 3282 int ret; 3283 3284 if (!tg3_flag(tp, NVRAM)) 3285 return tg3_nvram_read_using_eeprom(tp, offset, val); 3286 3287 offset = tg3_nvram_phys_addr(tp, offset); 3288 3289 if (offset > NVRAM_ADDR_MSK) 3290 return -EINVAL; 3291 3292 ret = tg3_nvram_lock(tp); 3293 if (ret) 3294 return ret; 3295 3296 tg3_enable_nvram_access(tp); 3297 3298 tw32(NVRAM_ADDR, offset); 3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3301 3302 if (ret == 0) 3303 *val = tr32(NVRAM_RDDATA); 3304 3305 tg3_disable_nvram_access(tp); 3306 3307 tg3_nvram_unlock(tp); 3308 3309 return ret; 3310 } 3311 3312 /* Ensures NVRAM data is in bytestream format. */ 3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3314 { 3315 u32 v; 3316 int res = tg3_nvram_read(tp, offset, &v); 3317 if (!res) 3318 *val = cpu_to_be32(v); 3319 return res; 3320 } 3321 3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3323 u32 offset, u32 len, u8 *buf) 3324 { 3325 int i, j, rc = 0; 3326 u32 val; 3327 3328 for (i = 0; i < len; i += 4) { 3329 u32 addr; 3330 __be32 data; 3331 3332 addr = offset + i; 3333 3334 memcpy(&data, buf + i, 4); 3335 3336 /* 3337 * The SEEPROM interface expects the data to always be opposite 3338 * the native endian format. We accomplish this by reversing 3339 * all the operations that would have been performed on the 3340 * data from a call to tg3_nvram_read_be32(). 3341 */ 3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3343 3344 val = tr32(GRC_EEPROM_ADDR); 3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3346 3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3348 EEPROM_ADDR_READ); 3349 tw32(GRC_EEPROM_ADDR, val | 3350 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3351 (addr & EEPROM_ADDR_ADDR_MASK) | 3352 EEPROM_ADDR_START | 3353 EEPROM_ADDR_WRITE); 3354 3355 for (j = 0; j < 1000; j++) { 3356 val = tr32(GRC_EEPROM_ADDR); 3357 3358 if (val & EEPROM_ADDR_COMPLETE) 3359 break; 3360 msleep(1); 3361 } 3362 if (!(val & EEPROM_ADDR_COMPLETE)) { 3363 rc = -EBUSY; 3364 break; 3365 } 3366 } 3367 3368 return rc; 3369 } 3370 3371 /* offset and length are dword aligned */ 3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3373 u8 *buf) 3374 { 3375 int ret = 0; 3376 u32 pagesize = tp->nvram_pagesize; 3377 u32 pagemask = pagesize - 1; 3378 u32 nvram_cmd; 3379 u8 *tmp; 3380 3381 tmp = kmalloc(pagesize, GFP_KERNEL); 3382 if (tmp == NULL) 3383 return -ENOMEM; 3384 3385 while (len) { 3386 int j; 3387 u32 phy_addr, page_off, size; 3388 3389 phy_addr = offset & ~pagemask; 3390 3391 for (j = 0; j < pagesize; j += 4) { 3392 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3393 (__be32 *) (tmp + j)); 3394 if (ret) 3395 break; 3396 } 3397 if (ret) 3398 break; 3399 3400 page_off = offset & pagemask; 3401 size = pagesize; 3402 if (len < size) 3403 size = len; 3404 3405 len -= size; 3406 3407 memcpy(tmp + page_off, buf, size); 3408 3409 offset = offset + (pagesize - page_off); 3410 3411 tg3_enable_nvram_access(tp); 3412 3413 /* 3414 * Before we can erase the flash page, we need 3415 * to issue a special "write enable" command. 3416 */ 3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3418 3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3420 break; 3421 3422 /* Erase the target page */ 3423 tw32(NVRAM_ADDR, phy_addr); 3424 3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3427 3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3429 break; 3430 3431 /* Issue another write enable to start the write. */ 3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3433 3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3435 break; 3436 3437 for (j = 0; j < pagesize; j += 4) { 3438 __be32 data; 3439 3440 data = *((__be32 *) (tmp + j)); 3441 3442 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3443 3444 tw32(NVRAM_ADDR, phy_addr + j); 3445 3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3447 NVRAM_CMD_WR; 3448 3449 if (j == 0) 3450 nvram_cmd |= NVRAM_CMD_FIRST; 3451 else if (j == (pagesize - 4)) 3452 nvram_cmd |= NVRAM_CMD_LAST; 3453 3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3455 if (ret) 3456 break; 3457 } 3458 if (ret) 3459 break; 3460 } 3461 3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3463 tg3_nvram_exec_cmd(tp, nvram_cmd); 3464 3465 kfree(tmp); 3466 3467 return ret; 3468 } 3469 3470 /* offset and length are dword aligned */ 3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3472 u8 *buf) 3473 { 3474 int i, ret = 0; 3475 3476 for (i = 0; i < len; i += 4, offset += 4) { 3477 u32 page_off, phy_addr, nvram_cmd; 3478 __be32 data; 3479 3480 memcpy(&data, buf + i, 4); 3481 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3482 3483 page_off = offset % tp->nvram_pagesize; 3484 3485 phy_addr = tg3_nvram_phys_addr(tp, offset); 3486 3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3488 3489 if (page_off == 0 || i == 0) 3490 nvram_cmd |= NVRAM_CMD_FIRST; 3491 if (page_off == (tp->nvram_pagesize - 4)) 3492 nvram_cmd |= NVRAM_CMD_LAST; 3493 3494 if (i == (len - 4)) 3495 nvram_cmd |= NVRAM_CMD_LAST; 3496 3497 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3498 !tg3_flag(tp, FLASH) || 3499 !tg3_flag(tp, 57765_PLUS)) 3500 tw32(NVRAM_ADDR, phy_addr); 3501 3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3503 !tg3_flag(tp, 5755_PLUS) && 3504 (tp->nvram_jedecnum == JEDEC_ST) && 3505 (nvram_cmd & NVRAM_CMD_FIRST)) { 3506 u32 cmd; 3507 3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3509 ret = tg3_nvram_exec_cmd(tp, cmd); 3510 if (ret) 3511 break; 3512 } 3513 if (!tg3_flag(tp, FLASH)) { 3514 /* We always do complete word writes to eeprom. */ 3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3516 } 3517 3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3519 if (ret) 3520 break; 3521 } 3522 return ret; 3523 } 3524 3525 /* offset and length are dword aligned */ 3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3527 { 3528 int ret; 3529 3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3532 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3533 udelay(40); 3534 } 3535 3536 if (!tg3_flag(tp, NVRAM)) { 3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3538 } else { 3539 u32 grc_mode; 3540 3541 ret = tg3_nvram_lock(tp); 3542 if (ret) 3543 return ret; 3544 3545 tg3_enable_nvram_access(tp); 3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3547 tw32(NVRAM_WRITE1, 0x406); 3548 3549 grc_mode = tr32(GRC_MODE); 3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3551 3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3553 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3554 buf); 3555 } else { 3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3557 buf); 3558 } 3559 3560 grc_mode = tr32(GRC_MODE); 3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3562 3563 tg3_disable_nvram_access(tp); 3564 tg3_nvram_unlock(tp); 3565 } 3566 3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3569 udelay(40); 3570 } 3571 3572 return ret; 3573 } 3574 3575 #define RX_CPU_SCRATCH_BASE 0x30000 3576 #define RX_CPU_SCRATCH_SIZE 0x04000 3577 #define TX_CPU_SCRATCH_BASE 0x34000 3578 #define TX_CPU_SCRATCH_SIZE 0x04000 3579 3580 /* tp->lock is held. */ 3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3582 { 3583 int i; 3584 const int iters = 10000; 3585 3586 for (i = 0; i < iters; i++) { 3587 tw32(cpu_base + CPU_STATE, 0xffffffff); 3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3590 break; 3591 if (pci_channel_offline(tp->pdev)) 3592 return -EBUSY; 3593 } 3594 3595 return (i == iters) ? -EBUSY : 0; 3596 } 3597 3598 /* tp->lock is held. */ 3599 static int tg3_rxcpu_pause(struct tg3 *tp) 3600 { 3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3602 3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3605 udelay(10); 3606 3607 return rc; 3608 } 3609 3610 /* tp->lock is held. */ 3611 static int tg3_txcpu_pause(struct tg3 *tp) 3612 { 3613 return tg3_pause_cpu(tp, TX_CPU_BASE); 3614 } 3615 3616 /* tp->lock is held. */ 3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3618 { 3619 tw32(cpu_base + CPU_STATE, 0xffffffff); 3620 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3621 } 3622 3623 /* tp->lock is held. */ 3624 static void tg3_rxcpu_resume(struct tg3 *tp) 3625 { 3626 tg3_resume_cpu(tp, RX_CPU_BASE); 3627 } 3628 3629 /* tp->lock is held. */ 3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3631 { 3632 int rc; 3633 3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3635 3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3637 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3638 3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3640 return 0; 3641 } 3642 if (cpu_base == RX_CPU_BASE) { 3643 rc = tg3_rxcpu_pause(tp); 3644 } else { 3645 /* 3646 * There is only an Rx CPU for the 5750 derivative in the 3647 * BCM4785. 3648 */ 3649 if (tg3_flag(tp, IS_SSB_CORE)) 3650 return 0; 3651 3652 rc = tg3_txcpu_pause(tp); 3653 } 3654 3655 if (rc) { 3656 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3658 return -ENODEV; 3659 } 3660 3661 /* Clear firmware's nvram arbitration. */ 3662 if (tg3_flag(tp, NVRAM)) 3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3664 return 0; 3665 } 3666 3667 static int tg3_fw_data_len(struct tg3 *tp, 3668 const struct tg3_firmware_hdr *fw_hdr) 3669 { 3670 int fw_len; 3671 3672 /* Non fragmented firmware have one firmware header followed by a 3673 * contiguous chunk of data to be written. The length field in that 3674 * header is not the length of data to be written but the complete 3675 * length of the bss. The data length is determined based on 3676 * tp->fw->size minus headers. 3677 * 3678 * Fragmented firmware have a main header followed by multiple 3679 * fragments. Each fragment is identical to non fragmented firmware 3680 * with a firmware header followed by a contiguous chunk of data. In 3681 * the main header, the length field is unused and set to 0xffffffff. 3682 * In each fragment header the length is the entire size of that 3683 * fragment i.e. fragment data + header length. Data length is 3684 * therefore length field in the header minus TG3_FW_HDR_LEN. 3685 */ 3686 if (tp->fw_len == 0xffffffff) 3687 fw_len = be32_to_cpu(fw_hdr->len); 3688 else 3689 fw_len = tp->fw->size; 3690 3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3692 } 3693 3694 /* tp->lock is held. */ 3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3696 u32 cpu_scratch_base, int cpu_scratch_size, 3697 const struct tg3_firmware_hdr *fw_hdr) 3698 { 3699 int err, i; 3700 void (*write_op)(struct tg3 *, u32, u32); 3701 int total_len = tp->fw->size; 3702 3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3704 netdev_err(tp->dev, 3705 "%s: Trying to load TX cpu firmware which is 5705\n", 3706 __func__); 3707 return -EINVAL; 3708 } 3709 3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3711 write_op = tg3_write_mem; 3712 else 3713 write_op = tg3_write_indirect_reg32; 3714 3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3716 /* It is possible that bootcode is still loading at this point. 3717 * Get the nvram lock first before halting the cpu. 3718 */ 3719 int lock_err = tg3_nvram_lock(tp); 3720 err = tg3_halt_cpu(tp, cpu_base); 3721 if (!lock_err) 3722 tg3_nvram_unlock(tp); 3723 if (err) 3724 goto out; 3725 3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3727 write_op(tp, cpu_scratch_base + i, 0); 3728 tw32(cpu_base + CPU_STATE, 0xffffffff); 3729 tw32(cpu_base + CPU_MODE, 3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3731 } else { 3732 /* Subtract additional main header for fragmented firmware and 3733 * advance to the first fragment 3734 */ 3735 total_len -= TG3_FW_HDR_LEN; 3736 fw_hdr++; 3737 } 3738 3739 do { 3740 u32 *fw_data = (u32 *)(fw_hdr + 1); 3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3742 write_op(tp, cpu_scratch_base + 3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3744 (i * sizeof(u32)), 3745 be32_to_cpu(fw_data[i])); 3746 3747 total_len -= be32_to_cpu(fw_hdr->len); 3748 3749 /* Advance to next fragment */ 3750 fw_hdr = (struct tg3_firmware_hdr *) 3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3752 } while (total_len > 0); 3753 3754 err = 0; 3755 3756 out: 3757 return err; 3758 } 3759 3760 /* tp->lock is held. */ 3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3762 { 3763 int i; 3764 const int iters = 5; 3765 3766 tw32(cpu_base + CPU_STATE, 0xffffffff); 3767 tw32_f(cpu_base + CPU_PC, pc); 3768 3769 for (i = 0; i < iters; i++) { 3770 if (tr32(cpu_base + CPU_PC) == pc) 3771 break; 3772 tw32(cpu_base + CPU_STATE, 0xffffffff); 3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3774 tw32_f(cpu_base + CPU_PC, pc); 3775 udelay(1000); 3776 } 3777 3778 return (i == iters) ? -EBUSY : 0; 3779 } 3780 3781 /* tp->lock is held. */ 3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3783 { 3784 const struct tg3_firmware_hdr *fw_hdr; 3785 int err; 3786 3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3788 3789 /* Firmware blob starts with version numbers, followed by 3790 start address and length. We are setting complete length. 3791 length = end_address_of_bss - start_address_of_text. 3792 Remainder is the blob to be loaded contiguously 3793 from start address. */ 3794 3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3797 fw_hdr); 3798 if (err) 3799 return err; 3800 3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3803 fw_hdr); 3804 if (err) 3805 return err; 3806 3807 /* Now startup only the RX cpu. */ 3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3809 be32_to_cpu(fw_hdr->base_addr)); 3810 if (err) { 3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3812 "should be %08x\n", __func__, 3813 tr32(RX_CPU_BASE + CPU_PC), 3814 be32_to_cpu(fw_hdr->base_addr)); 3815 return -ENODEV; 3816 } 3817 3818 tg3_rxcpu_resume(tp); 3819 3820 return 0; 3821 } 3822 3823 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3824 { 3825 const int iters = 1000; 3826 int i; 3827 u32 val; 3828 3829 /* Wait for boot code to complete initialization and enter service 3830 * loop. It is then safe to download service patches 3831 */ 3832 for (i = 0; i < iters; i++) { 3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3834 break; 3835 3836 udelay(10); 3837 } 3838 3839 if (i == iters) { 3840 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3841 return -EBUSY; 3842 } 3843 3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3845 if (val & 0xff) { 3846 netdev_warn(tp->dev, 3847 "Other patches exist. Not downloading EEE patch\n"); 3848 return -EEXIST; 3849 } 3850 3851 return 0; 3852 } 3853 3854 /* tp->lock is held. */ 3855 static void tg3_load_57766_firmware(struct tg3 *tp) 3856 { 3857 struct tg3_firmware_hdr *fw_hdr; 3858 3859 if (!tg3_flag(tp, NO_NVRAM)) 3860 return; 3861 3862 if (tg3_validate_rxcpu_state(tp)) 3863 return; 3864 3865 if (!tp->fw) 3866 return; 3867 3868 /* This firmware blob has a different format than older firmware 3869 * releases as given below. The main difference is we have fragmented 3870 * data to be written to non-contiguous locations. 3871 * 3872 * In the beginning we have a firmware header identical to other 3873 * firmware which consists of version, base addr and length. The length 3874 * here is unused and set to 0xffffffff. 3875 * 3876 * This is followed by a series of firmware fragments which are 3877 * individually identical to previous firmware. i.e. they have the 3878 * firmware header and followed by data for that fragment. The version 3879 * field of the individual fragment header is unused. 3880 */ 3881 3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3884 return; 3885 3886 if (tg3_rxcpu_pause(tp)) 3887 return; 3888 3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3891 3892 tg3_rxcpu_resume(tp); 3893 } 3894 3895 /* tp->lock is held. */ 3896 static int tg3_load_tso_firmware(struct tg3 *tp) 3897 { 3898 const struct tg3_firmware_hdr *fw_hdr; 3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3900 int err; 3901 3902 if (!tg3_flag(tp, FW_TSO)) 3903 return 0; 3904 3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3906 3907 /* Firmware blob starts with version numbers, followed by 3908 start address and length. We are setting complete length. 3909 length = end_address_of_bss - start_address_of_text. 3910 Remainder is the blob to be loaded contiguously 3911 from start address. */ 3912 3913 cpu_scratch_size = tp->fw_len; 3914 3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3916 cpu_base = RX_CPU_BASE; 3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3918 } else { 3919 cpu_base = TX_CPU_BASE; 3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3922 } 3923 3924 err = tg3_load_firmware_cpu(tp, cpu_base, 3925 cpu_scratch_base, cpu_scratch_size, 3926 fw_hdr); 3927 if (err) 3928 return err; 3929 3930 /* Now startup the cpu. */ 3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3932 be32_to_cpu(fw_hdr->base_addr)); 3933 if (err) { 3934 netdev_err(tp->dev, 3935 "%s fails to set CPU PC, is %08x should be %08x\n", 3936 __func__, tr32(cpu_base + CPU_PC), 3937 be32_to_cpu(fw_hdr->base_addr)); 3938 return -ENODEV; 3939 } 3940 3941 tg3_resume_cpu(tp, cpu_base); 3942 return 0; 3943 } 3944 3945 /* tp->lock is held. */ 3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr, 3947 int index) 3948 { 3949 u32 addr_high, addr_low; 3950 3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3953 (mac_addr[4] << 8) | mac_addr[5]); 3954 3955 if (index < 4) { 3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3958 } else { 3959 index -= 4; 3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3962 } 3963 } 3964 3965 /* tp->lock is held. */ 3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3967 { 3968 u32 addr_high; 3969 int i; 3970 3971 for (i = 0; i < 4; i++) { 3972 if (i == 1 && skip_mac_1) 3973 continue; 3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3975 } 3976 3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3978 tg3_asic_rev(tp) == ASIC_REV_5704) { 3979 for (i = 4; i < 16; i++) 3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3981 } 3982 3983 addr_high = (tp->dev->dev_addr[0] + 3984 tp->dev->dev_addr[1] + 3985 tp->dev->dev_addr[2] + 3986 tp->dev->dev_addr[3] + 3987 tp->dev->dev_addr[4] + 3988 tp->dev->dev_addr[5]) & 3989 TX_BACKOFF_SEED_MASK; 3990 tw32(MAC_TX_BACKOFF_SEED, addr_high); 3991 } 3992 3993 static void tg3_enable_register_access(struct tg3 *tp) 3994 { 3995 /* 3996 * Make sure register accesses (indirect or otherwise) will function 3997 * correctly. 3998 */ 3999 pci_write_config_dword(tp->pdev, 4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4001 } 4002 4003 static int tg3_power_up(struct tg3 *tp) 4004 { 4005 int err; 4006 4007 tg3_enable_register_access(tp); 4008 4009 err = pci_set_power_state(tp->pdev, PCI_D0); 4010 if (!err) { 4011 /* Switch out of Vaux if it is a NIC */ 4012 tg3_pwrsrc_switch_to_vmain(tp); 4013 } else { 4014 netdev_err(tp->dev, "Transition to D0 failed\n"); 4015 } 4016 4017 return err; 4018 } 4019 4020 static int tg3_setup_phy(struct tg3 *, bool); 4021 4022 static int tg3_power_down_prepare(struct tg3 *tp) 4023 { 4024 u32 misc_host_ctrl; 4025 bool device_should_wake, do_low_power; 4026 4027 tg3_enable_register_access(tp); 4028 4029 /* Restore the CLKREQ setting. */ 4030 if (tg3_flag(tp, CLKREQ_BUG)) 4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4032 PCI_EXP_LNKCTL_CLKREQ_EN); 4033 4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4035 tw32(TG3PCI_MISC_HOST_CTRL, 4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4037 4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4039 tg3_flag(tp, WOL_ENABLE); 4040 4041 if (tg3_flag(tp, USE_PHYLIB)) { 4042 do_low_power = false; 4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; 4046 struct phy_device *phydev; 4047 u32 phyid; 4048 4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4050 4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4052 4053 tp->link_config.speed = phydev->speed; 4054 tp->link_config.duplex = phydev->duplex; 4055 tp->link_config.autoneg = phydev->autoneg; 4056 ethtool_convert_link_mode_to_legacy_u32( 4057 &tp->link_config.advertising, 4058 phydev->advertising); 4059 4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); 4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 4062 advertising); 4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 4064 advertising); 4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, 4066 advertising); 4067 4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4069 if (tg3_flag(tp, WOL_SPEED_100MB)) { 4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 4071 advertising); 4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 4073 advertising); 4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4075 advertising); 4076 } else { 4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4078 advertising); 4079 } 4080 } 4081 4082 linkmode_copy(phydev->advertising, advertising); 4083 phy_start_aneg(phydev); 4084 4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4086 if (phyid != PHY_ID_BCMAC131) { 4087 phyid &= PHY_BCM_OUI_MASK; 4088 if (phyid == PHY_BCM_OUI_1 || 4089 phyid == PHY_BCM_OUI_2 || 4090 phyid == PHY_BCM_OUI_3) 4091 do_low_power = true; 4092 } 4093 } 4094 } else { 4095 do_low_power = true; 4096 4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4099 4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4101 tg3_setup_phy(tp, false); 4102 } 4103 4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4105 u32 val; 4106 4107 val = tr32(GRC_VCPU_EXT_CTRL); 4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4109 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4110 int i; 4111 u32 val; 4112 4113 for (i = 0; i < 200; i++) { 4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4116 break; 4117 msleep(1); 4118 } 4119 } 4120 if (tg3_flag(tp, WOL_CAP)) 4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4122 WOL_DRV_STATE_SHUTDOWN | 4123 WOL_DRV_WOL | 4124 WOL_SET_MAGIC_PKT); 4125 4126 if (device_should_wake) { 4127 u32 mac_mode; 4128 4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4130 if (do_low_power && 4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4132 tg3_phy_auxctl_write(tp, 4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4134 MII_TG3_AUXCTL_PCTL_WOL_EN | 4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4137 udelay(40); 4138 } 4139 4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4141 mac_mode = MAC_MODE_PORT_MODE_GMII; 4142 else if (tp->phy_flags & 4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4144 if (tp->link_config.active_speed == SPEED_1000) 4145 mac_mode = MAC_MODE_PORT_MODE_GMII; 4146 else 4147 mac_mode = MAC_MODE_PORT_MODE_MII; 4148 } else 4149 mac_mode = MAC_MODE_PORT_MODE_MII; 4150 4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4154 SPEED_100 : SPEED_10; 4155 if (tg3_5700_link_polarity(tp, speed)) 4156 mac_mode |= MAC_MODE_LINK_POLARITY; 4157 else 4158 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4159 } 4160 } else { 4161 mac_mode = MAC_MODE_PORT_MODE_TBI; 4162 } 4163 4164 if (!tg3_flag(tp, 5750_PLUS)) 4165 tw32(MAC_LED_CTRL, tp->led_ctrl); 4166 4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4171 4172 if (tg3_flag(tp, ENABLE_APE)) 4173 mac_mode |= MAC_MODE_APE_TX_EN | 4174 MAC_MODE_APE_RX_EN | 4175 MAC_MODE_TDE_ENABLE; 4176 4177 tw32_f(MAC_MODE, mac_mode); 4178 udelay(100); 4179 4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4181 udelay(10); 4182 } 4183 4184 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4185 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4186 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4187 u32 base_val; 4188 4189 base_val = tp->pci_clock_ctrl; 4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4191 CLOCK_CTRL_TXCLK_DISABLE); 4192 4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4194 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4195 } else if (tg3_flag(tp, 5780_CLASS) || 4196 tg3_flag(tp, CPMU_PRESENT) || 4197 tg3_asic_rev(tp) == ASIC_REV_5906) { 4198 /* do nothing */ 4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4200 u32 newbits1, newbits2; 4201 4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4203 tg3_asic_rev(tp) == ASIC_REV_5701) { 4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4205 CLOCK_CTRL_TXCLK_DISABLE | 4206 CLOCK_CTRL_ALTCLK); 4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4208 } else if (tg3_flag(tp, 5705_PLUS)) { 4209 newbits1 = CLOCK_CTRL_625_CORE; 4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4211 } else { 4212 newbits1 = CLOCK_CTRL_ALTCLK; 4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4214 } 4215 4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4217 40); 4218 4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4220 40); 4221 4222 if (!tg3_flag(tp, 5705_PLUS)) { 4223 u32 newbits3; 4224 4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4226 tg3_asic_rev(tp) == ASIC_REV_5701) { 4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4228 CLOCK_CTRL_TXCLK_DISABLE | 4229 CLOCK_CTRL_44MHZ_CORE); 4230 } else { 4231 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4232 } 4233 4234 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4235 tp->pci_clock_ctrl | newbits3, 40); 4236 } 4237 } 4238 4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4240 tg3_power_down_phy(tp, do_low_power); 4241 4242 tg3_frob_aux_power(tp, true); 4243 4244 /* Workaround for unstable PLL clock */ 4245 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4248 u32 val = tr32(0x7d00); 4249 4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4251 tw32(0x7d00, val); 4252 if (!tg3_flag(tp, ENABLE_ASF)) { 4253 int err; 4254 4255 err = tg3_nvram_lock(tp); 4256 tg3_halt_cpu(tp, RX_CPU_BASE); 4257 if (!err) 4258 tg3_nvram_unlock(tp); 4259 } 4260 } 4261 4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4263 4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4265 4266 return 0; 4267 } 4268 4269 static void tg3_power_down(struct tg3 *tp) 4270 { 4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4272 pci_set_power_state(tp->pdev, PCI_D3hot); 4273 } 4274 4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) 4276 { 4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4278 case MII_TG3_AUX_STAT_10HALF: 4279 *speed = SPEED_10; 4280 *duplex = DUPLEX_HALF; 4281 break; 4282 4283 case MII_TG3_AUX_STAT_10FULL: 4284 *speed = SPEED_10; 4285 *duplex = DUPLEX_FULL; 4286 break; 4287 4288 case MII_TG3_AUX_STAT_100HALF: 4289 *speed = SPEED_100; 4290 *duplex = DUPLEX_HALF; 4291 break; 4292 4293 case MII_TG3_AUX_STAT_100FULL: 4294 *speed = SPEED_100; 4295 *duplex = DUPLEX_FULL; 4296 break; 4297 4298 case MII_TG3_AUX_STAT_1000HALF: 4299 *speed = SPEED_1000; 4300 *duplex = DUPLEX_HALF; 4301 break; 4302 4303 case MII_TG3_AUX_STAT_1000FULL: 4304 *speed = SPEED_1000; 4305 *duplex = DUPLEX_FULL; 4306 break; 4307 4308 default: 4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4311 SPEED_10; 4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4313 DUPLEX_HALF; 4314 break; 4315 } 4316 *speed = SPEED_UNKNOWN; 4317 *duplex = DUPLEX_UNKNOWN; 4318 break; 4319 } 4320 } 4321 4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4323 { 4324 int err = 0; 4325 u32 val, new_adv; 4326 4327 new_adv = ADVERTISE_CSMA; 4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4329 new_adv |= mii_advertise_flowctrl(flowctrl); 4330 4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4332 if (err) 4333 goto done; 4334 4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4337 4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4341 4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4343 if (err) 4344 goto done; 4345 } 4346 4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4348 goto done; 4349 4350 tw32(TG3_CPMU_EEE_MODE, 4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4352 4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4354 if (!err) { 4355 u32 err2; 4356 4357 val = 0; 4358 /* Advertise 100-BaseTX EEE ability */ 4359 if (advertise & ADVERTISED_100baseT_Full) 4360 val |= MDIO_AN_EEE_ADV_100TX; 4361 /* Advertise 1000-BaseT EEE ability */ 4362 if (advertise & ADVERTISED_1000baseT_Full) 4363 val |= MDIO_AN_EEE_ADV_1000T; 4364 4365 if (!tp->eee.eee_enabled) { 4366 val = 0; 4367 linkmode_zero(tp->eee.advertised); 4368 } else { 4369 mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val); 4370 } 4371 4372 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4373 if (err) 4374 val = 0; 4375 4376 switch (tg3_asic_rev(tp)) { 4377 case ASIC_REV_5717: 4378 case ASIC_REV_57765: 4379 case ASIC_REV_57766: 4380 case ASIC_REV_5719: 4381 /* If we advertised any eee advertisements above... */ 4382 if (val) 4383 val = MII_TG3_DSP_TAP26_ALNOKO | 4384 MII_TG3_DSP_TAP26_RMRXSTO | 4385 MII_TG3_DSP_TAP26_OPCSINPT; 4386 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4387 fallthrough; 4388 case ASIC_REV_5720: 4389 case ASIC_REV_5762: 4390 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4391 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4392 MII_TG3_DSP_CH34TP2_HIBW01); 4393 } 4394 4395 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4396 if (!err) 4397 err = err2; 4398 } 4399 4400 done: 4401 return err; 4402 } 4403 4404 static void tg3_phy_copper_begin(struct tg3 *tp) 4405 { 4406 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4407 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4408 u32 adv, fc; 4409 4410 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4411 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4412 adv = ADVERTISED_10baseT_Half | 4413 ADVERTISED_10baseT_Full; 4414 if (tg3_flag(tp, WOL_SPEED_100MB)) 4415 adv |= ADVERTISED_100baseT_Half | 4416 ADVERTISED_100baseT_Full; 4417 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4418 if (!(tp->phy_flags & 4419 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4420 adv |= ADVERTISED_1000baseT_Half; 4421 adv |= ADVERTISED_1000baseT_Full; 4422 } 4423 4424 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4425 } else { 4426 adv = tp->link_config.advertising; 4427 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4428 adv &= ~(ADVERTISED_1000baseT_Half | 4429 ADVERTISED_1000baseT_Full); 4430 4431 fc = tp->link_config.flowctrl; 4432 } 4433 4434 tg3_phy_autoneg_cfg(tp, adv, fc); 4435 4436 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4437 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4438 /* Normally during power down we want to autonegotiate 4439 * the lowest possible speed for WOL. However, to avoid 4440 * link flap, we leave it untouched. 4441 */ 4442 return; 4443 } 4444 4445 tg3_writephy(tp, MII_BMCR, 4446 BMCR_ANENABLE | BMCR_ANRESTART); 4447 } else { 4448 int i; 4449 u32 bmcr, orig_bmcr; 4450 4451 tp->link_config.active_speed = tp->link_config.speed; 4452 tp->link_config.active_duplex = tp->link_config.duplex; 4453 4454 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4455 /* With autoneg disabled, 5715 only links up when the 4456 * advertisement register has the configured speed 4457 * enabled. 4458 */ 4459 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4460 } 4461 4462 bmcr = 0; 4463 switch (tp->link_config.speed) { 4464 default: 4465 case SPEED_10: 4466 break; 4467 4468 case SPEED_100: 4469 bmcr |= BMCR_SPEED100; 4470 break; 4471 4472 case SPEED_1000: 4473 bmcr |= BMCR_SPEED1000; 4474 break; 4475 } 4476 4477 if (tp->link_config.duplex == DUPLEX_FULL) 4478 bmcr |= BMCR_FULLDPLX; 4479 4480 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4481 (bmcr != orig_bmcr)) { 4482 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4483 for (i = 0; i < 1500; i++) { 4484 u32 tmp; 4485 4486 udelay(10); 4487 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4488 tg3_readphy(tp, MII_BMSR, &tmp)) 4489 continue; 4490 if (!(tmp & BMSR_LSTATUS)) { 4491 udelay(40); 4492 break; 4493 } 4494 } 4495 tg3_writephy(tp, MII_BMCR, bmcr); 4496 udelay(40); 4497 } 4498 } 4499 } 4500 4501 static int tg3_phy_pull_config(struct tg3 *tp) 4502 { 4503 int err; 4504 u32 val; 4505 4506 err = tg3_readphy(tp, MII_BMCR, &val); 4507 if (err) 4508 goto done; 4509 4510 if (!(val & BMCR_ANENABLE)) { 4511 tp->link_config.autoneg = AUTONEG_DISABLE; 4512 tp->link_config.advertising = 0; 4513 tg3_flag_clear(tp, PAUSE_AUTONEG); 4514 4515 err = -EIO; 4516 4517 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4518 case 0: 4519 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4520 goto done; 4521 4522 tp->link_config.speed = SPEED_10; 4523 break; 4524 case BMCR_SPEED100: 4525 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4526 goto done; 4527 4528 tp->link_config.speed = SPEED_100; 4529 break; 4530 case BMCR_SPEED1000: 4531 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4532 tp->link_config.speed = SPEED_1000; 4533 break; 4534 } 4535 fallthrough; 4536 default: 4537 goto done; 4538 } 4539 4540 if (val & BMCR_FULLDPLX) 4541 tp->link_config.duplex = DUPLEX_FULL; 4542 else 4543 tp->link_config.duplex = DUPLEX_HALF; 4544 4545 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4546 4547 err = 0; 4548 goto done; 4549 } 4550 4551 tp->link_config.autoneg = AUTONEG_ENABLE; 4552 tp->link_config.advertising = ADVERTISED_Autoneg; 4553 tg3_flag_set(tp, PAUSE_AUTONEG); 4554 4555 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4556 u32 adv; 4557 4558 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4559 if (err) 4560 goto done; 4561 4562 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4563 tp->link_config.advertising |= adv | ADVERTISED_TP; 4564 4565 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4566 } else { 4567 tp->link_config.advertising |= ADVERTISED_FIBRE; 4568 } 4569 4570 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4571 u32 adv; 4572 4573 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4574 err = tg3_readphy(tp, MII_CTRL1000, &val); 4575 if (err) 4576 goto done; 4577 4578 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4579 } else { 4580 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4581 if (err) 4582 goto done; 4583 4584 adv = tg3_decode_flowctrl_1000X(val); 4585 tp->link_config.flowctrl = adv; 4586 4587 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4588 adv = mii_adv_to_ethtool_adv_x(val); 4589 } 4590 4591 tp->link_config.advertising |= adv; 4592 } 4593 4594 done: 4595 return err; 4596 } 4597 4598 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4599 { 4600 int err; 4601 4602 /* Turn off tap power management. */ 4603 /* Set Extended packet length bit */ 4604 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4605 4606 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4607 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4608 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4609 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4610 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4611 4612 udelay(40); 4613 4614 return err; 4615 } 4616 4617 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4618 { 4619 struct ethtool_keee eee = {}; 4620 4621 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4622 return true; 4623 4624 tg3_eee_pull_config(tp, &eee); 4625 4626 if (tp->eee.eee_enabled) { 4627 if (!linkmode_equal(tp->eee.advertised, eee.advertised) || 4628 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4629 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4630 return false; 4631 } else { 4632 /* EEE is disabled but we're advertising */ 4633 if (!linkmode_empty(eee.advertised)) 4634 return false; 4635 } 4636 4637 return true; 4638 } 4639 4640 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4641 { 4642 u32 advmsk, tgtadv, advertising; 4643 4644 advertising = tp->link_config.advertising; 4645 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4646 4647 advmsk = ADVERTISE_ALL; 4648 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4649 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4650 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4651 } 4652 4653 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4654 return false; 4655 4656 if ((*lcladv & advmsk) != tgtadv) 4657 return false; 4658 4659 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4660 u32 tg3_ctrl; 4661 4662 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4663 4664 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4665 return false; 4666 4667 if (tgtadv && 4668 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4669 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4670 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4671 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4672 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4673 } else { 4674 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4675 } 4676 4677 if (tg3_ctrl != tgtadv) 4678 return false; 4679 } 4680 4681 return true; 4682 } 4683 4684 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4685 { 4686 u32 lpeth = 0; 4687 4688 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4689 u32 val; 4690 4691 if (tg3_readphy(tp, MII_STAT1000, &val)) 4692 return false; 4693 4694 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4695 } 4696 4697 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4698 return false; 4699 4700 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4701 tp->link_config.rmt_adv = lpeth; 4702 4703 return true; 4704 } 4705 4706 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4707 { 4708 if (curr_link_up != tp->link_up) { 4709 if (curr_link_up) { 4710 netif_carrier_on(tp->dev); 4711 } else { 4712 netif_carrier_off(tp->dev); 4713 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4714 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4715 } 4716 4717 tg3_link_report(tp); 4718 return true; 4719 } 4720 4721 return false; 4722 } 4723 4724 static void tg3_clear_mac_status(struct tg3 *tp) 4725 { 4726 tw32(MAC_EVENT, 0); 4727 4728 tw32_f(MAC_STATUS, 4729 MAC_STATUS_SYNC_CHANGED | 4730 MAC_STATUS_CFG_CHANGED | 4731 MAC_STATUS_MI_COMPLETION | 4732 MAC_STATUS_LNKSTATE_CHANGED); 4733 udelay(40); 4734 } 4735 4736 static void tg3_setup_eee(struct tg3 *tp) 4737 { 4738 u32 val; 4739 4740 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4741 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4742 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4743 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4744 4745 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4746 4747 tw32_f(TG3_CPMU_EEE_CTRL, 4748 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4749 4750 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4751 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4752 TG3_CPMU_EEEMD_LPI_IN_RX | 4753 TG3_CPMU_EEEMD_EEE_ENABLE; 4754 4755 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4756 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4757 4758 if (tg3_flag(tp, ENABLE_APE)) 4759 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4760 4761 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4762 4763 tw32_f(TG3_CPMU_EEE_DBTMR1, 4764 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4765 (tp->eee.tx_lpi_timer & 0xffff)); 4766 4767 tw32_f(TG3_CPMU_EEE_DBTMR2, 4768 TG3_CPMU_DBTMR2_APE_TX_2047US | 4769 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4770 } 4771 4772 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4773 { 4774 bool current_link_up; 4775 u32 bmsr, val; 4776 u32 lcl_adv, rmt_adv; 4777 u32 current_speed; 4778 u8 current_duplex; 4779 int i, err; 4780 4781 tg3_clear_mac_status(tp); 4782 4783 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4784 tw32_f(MAC_MI_MODE, 4785 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4786 udelay(80); 4787 } 4788 4789 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4790 4791 /* Some third-party PHYs need to be reset on link going 4792 * down. 4793 */ 4794 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4795 tg3_asic_rev(tp) == ASIC_REV_5704 || 4796 tg3_asic_rev(tp) == ASIC_REV_5705) && 4797 tp->link_up) { 4798 tg3_readphy(tp, MII_BMSR, &bmsr); 4799 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4800 !(bmsr & BMSR_LSTATUS)) 4801 force_reset = true; 4802 } 4803 if (force_reset) 4804 tg3_phy_reset(tp); 4805 4806 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4807 tg3_readphy(tp, MII_BMSR, &bmsr); 4808 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4809 !tg3_flag(tp, INIT_COMPLETE)) 4810 bmsr = 0; 4811 4812 if (!(bmsr & BMSR_LSTATUS)) { 4813 err = tg3_init_5401phy_dsp(tp); 4814 if (err) 4815 return err; 4816 4817 tg3_readphy(tp, MII_BMSR, &bmsr); 4818 for (i = 0; i < 1000; i++) { 4819 udelay(10); 4820 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4821 (bmsr & BMSR_LSTATUS)) { 4822 udelay(40); 4823 break; 4824 } 4825 } 4826 4827 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4828 TG3_PHY_REV_BCM5401_B0 && 4829 !(bmsr & BMSR_LSTATUS) && 4830 tp->link_config.active_speed == SPEED_1000) { 4831 err = tg3_phy_reset(tp); 4832 if (!err) 4833 err = tg3_init_5401phy_dsp(tp); 4834 if (err) 4835 return err; 4836 } 4837 } 4838 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4839 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4840 /* 5701 {A0,B0} CRC bug workaround */ 4841 tg3_writephy(tp, 0x15, 0x0a75); 4842 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4843 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4845 } 4846 4847 /* Clear pending interrupts... */ 4848 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4849 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4850 4851 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4852 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4853 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4854 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4855 4856 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4857 tg3_asic_rev(tp) == ASIC_REV_5701) { 4858 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4859 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4860 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4861 else 4862 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4863 } 4864 4865 current_link_up = false; 4866 current_speed = SPEED_UNKNOWN; 4867 current_duplex = DUPLEX_UNKNOWN; 4868 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4869 tp->link_config.rmt_adv = 0; 4870 4871 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4872 err = tg3_phy_auxctl_read(tp, 4873 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4874 &val); 4875 if (!err && !(val & (1 << 10))) { 4876 tg3_phy_auxctl_write(tp, 4877 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4878 val | (1 << 10)); 4879 goto relink; 4880 } 4881 } 4882 4883 bmsr = 0; 4884 for (i = 0; i < 100; i++) { 4885 tg3_readphy(tp, MII_BMSR, &bmsr); 4886 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4887 (bmsr & BMSR_LSTATUS)) 4888 break; 4889 udelay(40); 4890 } 4891 4892 if (bmsr & BMSR_LSTATUS) { 4893 u32 aux_stat, bmcr; 4894 4895 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4896 for (i = 0; i < 2000; i++) { 4897 udelay(10); 4898 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4899 aux_stat) 4900 break; 4901 } 4902 4903 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4904 ¤t_speed, 4905 ¤t_duplex); 4906 4907 bmcr = 0; 4908 for (i = 0; i < 200; i++) { 4909 tg3_readphy(tp, MII_BMCR, &bmcr); 4910 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4911 continue; 4912 if (bmcr && bmcr != 0x7fff) 4913 break; 4914 udelay(10); 4915 } 4916 4917 lcl_adv = 0; 4918 rmt_adv = 0; 4919 4920 tp->link_config.active_speed = current_speed; 4921 tp->link_config.active_duplex = current_duplex; 4922 4923 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4924 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4925 4926 if ((bmcr & BMCR_ANENABLE) && 4927 eee_config_ok && 4928 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4929 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4930 current_link_up = true; 4931 4932 /* EEE settings changes take effect only after a phy 4933 * reset. If we have skipped a reset due to Link Flap 4934 * Avoidance being enabled, do it now. 4935 */ 4936 if (!eee_config_ok && 4937 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4938 !force_reset) { 4939 tg3_setup_eee(tp); 4940 tg3_phy_reset(tp); 4941 } 4942 } else { 4943 if (!(bmcr & BMCR_ANENABLE) && 4944 tp->link_config.speed == current_speed && 4945 tp->link_config.duplex == current_duplex) { 4946 current_link_up = true; 4947 } 4948 } 4949 4950 if (current_link_up && 4951 tp->link_config.active_duplex == DUPLEX_FULL) { 4952 u32 reg, bit; 4953 4954 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4955 reg = MII_TG3_FET_GEN_STAT; 4956 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4957 } else { 4958 reg = MII_TG3_EXT_STAT; 4959 bit = MII_TG3_EXT_STAT_MDIX; 4960 } 4961 4962 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4963 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4964 4965 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4966 } 4967 } 4968 4969 relink: 4970 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4971 tg3_phy_copper_begin(tp); 4972 4973 if (tg3_flag(tp, ROBOSWITCH)) { 4974 current_link_up = true; 4975 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4976 current_speed = SPEED_1000; 4977 current_duplex = DUPLEX_FULL; 4978 tp->link_config.active_speed = current_speed; 4979 tp->link_config.active_duplex = current_duplex; 4980 } 4981 4982 tg3_readphy(tp, MII_BMSR, &bmsr); 4983 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4984 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4985 current_link_up = true; 4986 } 4987 4988 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4989 if (current_link_up) { 4990 if (tp->link_config.active_speed == SPEED_100 || 4991 tp->link_config.active_speed == SPEED_10) 4992 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4993 else 4994 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4995 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 4996 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4997 else 4998 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4999 5000 /* In order for the 5750 core in BCM4785 chip to work properly 5001 * in RGMII mode, the Led Control Register must be set up. 5002 */ 5003 if (tg3_flag(tp, RGMII_MODE)) { 5004 u32 led_ctrl = tr32(MAC_LED_CTRL); 5005 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5006 5007 if (tp->link_config.active_speed == SPEED_10) 5008 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5009 else if (tp->link_config.active_speed == SPEED_100) 5010 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5011 LED_CTRL_100MBPS_ON); 5012 else if (tp->link_config.active_speed == SPEED_1000) 5013 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5014 LED_CTRL_1000MBPS_ON); 5015 5016 tw32(MAC_LED_CTRL, led_ctrl); 5017 udelay(40); 5018 } 5019 5020 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5021 if (tp->link_config.active_duplex == DUPLEX_HALF) 5022 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5023 5024 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5025 if (current_link_up && 5026 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5027 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5028 else 5029 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5030 } 5031 5032 /* ??? Without this setting Netgear GA302T PHY does not 5033 * ??? send/receive packets... 5034 */ 5035 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5036 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5037 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5038 tw32_f(MAC_MI_MODE, tp->mi_mode); 5039 udelay(80); 5040 } 5041 5042 tw32_f(MAC_MODE, tp->mac_mode); 5043 udelay(40); 5044 5045 tg3_phy_eee_adjust(tp, current_link_up); 5046 5047 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5048 /* Polled via timer. */ 5049 tw32_f(MAC_EVENT, 0); 5050 } else { 5051 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5052 } 5053 udelay(40); 5054 5055 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5056 current_link_up && 5057 tp->link_config.active_speed == SPEED_1000 && 5058 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5059 udelay(120); 5060 tw32_f(MAC_STATUS, 5061 (MAC_STATUS_SYNC_CHANGED | 5062 MAC_STATUS_CFG_CHANGED)); 5063 udelay(40); 5064 tg3_write_mem(tp, 5065 NIC_SRAM_FIRMWARE_MBOX, 5066 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5067 } 5068 5069 /* Prevent send BD corruption. */ 5070 if (tg3_flag(tp, CLKREQ_BUG)) { 5071 if (tp->link_config.active_speed == SPEED_100 || 5072 tp->link_config.active_speed == SPEED_10) 5073 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5074 PCI_EXP_LNKCTL_CLKREQ_EN); 5075 else 5076 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5077 PCI_EXP_LNKCTL_CLKREQ_EN); 5078 } 5079 5080 tg3_test_and_report_link_chg(tp, current_link_up); 5081 5082 return 0; 5083 } 5084 5085 struct tg3_fiber_aneginfo { 5086 int state; 5087 #define ANEG_STATE_UNKNOWN 0 5088 #define ANEG_STATE_AN_ENABLE 1 5089 #define ANEG_STATE_RESTART_INIT 2 5090 #define ANEG_STATE_RESTART 3 5091 #define ANEG_STATE_DISABLE_LINK_OK 4 5092 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5093 #define ANEG_STATE_ABILITY_DETECT 6 5094 #define ANEG_STATE_ACK_DETECT_INIT 7 5095 #define ANEG_STATE_ACK_DETECT 8 5096 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5097 #define ANEG_STATE_COMPLETE_ACK 10 5098 #define ANEG_STATE_IDLE_DETECT_INIT 11 5099 #define ANEG_STATE_IDLE_DETECT 12 5100 #define ANEG_STATE_LINK_OK 13 5101 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5102 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5103 5104 u32 flags; 5105 #define MR_AN_ENABLE 0x00000001 5106 #define MR_RESTART_AN 0x00000002 5107 #define MR_AN_COMPLETE 0x00000004 5108 #define MR_PAGE_RX 0x00000008 5109 #define MR_NP_LOADED 0x00000010 5110 #define MR_TOGGLE_TX 0x00000020 5111 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5112 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5113 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5114 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5115 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5116 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5117 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5118 #define MR_TOGGLE_RX 0x00002000 5119 #define MR_NP_RX 0x00004000 5120 5121 #define MR_LINK_OK 0x80000000 5122 5123 unsigned long link_time, cur_time; 5124 5125 u32 ability_match_cfg; 5126 int ability_match_count; 5127 5128 char ability_match, idle_match, ack_match; 5129 5130 u32 txconfig, rxconfig; 5131 #define ANEG_CFG_NP 0x00000080 5132 #define ANEG_CFG_ACK 0x00000040 5133 #define ANEG_CFG_RF2 0x00000020 5134 #define ANEG_CFG_RF1 0x00000010 5135 #define ANEG_CFG_PS2 0x00000001 5136 #define ANEG_CFG_PS1 0x00008000 5137 #define ANEG_CFG_HD 0x00004000 5138 #define ANEG_CFG_FD 0x00002000 5139 #define ANEG_CFG_INVAL 0x00001f06 5140 5141 }; 5142 #define ANEG_OK 0 5143 #define ANEG_DONE 1 5144 #define ANEG_TIMER_ENAB 2 5145 #define ANEG_FAILED -1 5146 5147 #define ANEG_STATE_SETTLE_TIME 10000 5148 5149 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5150 struct tg3_fiber_aneginfo *ap) 5151 { 5152 u16 flowctrl; 5153 unsigned long delta; 5154 u32 rx_cfg_reg; 5155 int ret; 5156 5157 if (ap->state == ANEG_STATE_UNKNOWN) { 5158 ap->rxconfig = 0; 5159 ap->link_time = 0; 5160 ap->cur_time = 0; 5161 ap->ability_match_cfg = 0; 5162 ap->ability_match_count = 0; 5163 ap->ability_match = 0; 5164 ap->idle_match = 0; 5165 ap->ack_match = 0; 5166 } 5167 ap->cur_time++; 5168 5169 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5170 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5171 5172 if (rx_cfg_reg != ap->ability_match_cfg) { 5173 ap->ability_match_cfg = rx_cfg_reg; 5174 ap->ability_match = 0; 5175 ap->ability_match_count = 0; 5176 } else { 5177 if (++ap->ability_match_count > 1) { 5178 ap->ability_match = 1; 5179 ap->ability_match_cfg = rx_cfg_reg; 5180 } 5181 } 5182 if (rx_cfg_reg & ANEG_CFG_ACK) 5183 ap->ack_match = 1; 5184 else 5185 ap->ack_match = 0; 5186 5187 ap->idle_match = 0; 5188 } else { 5189 ap->idle_match = 1; 5190 ap->ability_match_cfg = 0; 5191 ap->ability_match_count = 0; 5192 ap->ability_match = 0; 5193 ap->ack_match = 0; 5194 5195 rx_cfg_reg = 0; 5196 } 5197 5198 ap->rxconfig = rx_cfg_reg; 5199 ret = ANEG_OK; 5200 5201 switch (ap->state) { 5202 case ANEG_STATE_UNKNOWN: 5203 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5204 ap->state = ANEG_STATE_AN_ENABLE; 5205 5206 fallthrough; 5207 case ANEG_STATE_AN_ENABLE: 5208 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5209 if (ap->flags & MR_AN_ENABLE) { 5210 ap->link_time = 0; 5211 ap->cur_time = 0; 5212 ap->ability_match_cfg = 0; 5213 ap->ability_match_count = 0; 5214 ap->ability_match = 0; 5215 ap->idle_match = 0; 5216 ap->ack_match = 0; 5217 5218 ap->state = ANEG_STATE_RESTART_INIT; 5219 } else { 5220 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5221 } 5222 break; 5223 5224 case ANEG_STATE_RESTART_INIT: 5225 ap->link_time = ap->cur_time; 5226 ap->flags &= ~(MR_NP_LOADED); 5227 ap->txconfig = 0; 5228 tw32(MAC_TX_AUTO_NEG, 0); 5229 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5230 tw32_f(MAC_MODE, tp->mac_mode); 5231 udelay(40); 5232 5233 ret = ANEG_TIMER_ENAB; 5234 ap->state = ANEG_STATE_RESTART; 5235 5236 fallthrough; 5237 case ANEG_STATE_RESTART: 5238 delta = ap->cur_time - ap->link_time; 5239 if (delta > ANEG_STATE_SETTLE_TIME) 5240 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5241 else 5242 ret = ANEG_TIMER_ENAB; 5243 break; 5244 5245 case ANEG_STATE_DISABLE_LINK_OK: 5246 ret = ANEG_DONE; 5247 break; 5248 5249 case ANEG_STATE_ABILITY_DETECT_INIT: 5250 ap->flags &= ~(MR_TOGGLE_TX); 5251 ap->txconfig = ANEG_CFG_FD; 5252 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5253 if (flowctrl & ADVERTISE_1000XPAUSE) 5254 ap->txconfig |= ANEG_CFG_PS1; 5255 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5256 ap->txconfig |= ANEG_CFG_PS2; 5257 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5258 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5259 tw32_f(MAC_MODE, tp->mac_mode); 5260 udelay(40); 5261 5262 ap->state = ANEG_STATE_ABILITY_DETECT; 5263 break; 5264 5265 case ANEG_STATE_ABILITY_DETECT: 5266 if (ap->ability_match != 0 && ap->rxconfig != 0) 5267 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5268 break; 5269 5270 case ANEG_STATE_ACK_DETECT_INIT: 5271 ap->txconfig |= ANEG_CFG_ACK; 5272 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5273 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5274 tw32_f(MAC_MODE, tp->mac_mode); 5275 udelay(40); 5276 5277 ap->state = ANEG_STATE_ACK_DETECT; 5278 5279 fallthrough; 5280 case ANEG_STATE_ACK_DETECT: 5281 if (ap->ack_match != 0) { 5282 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5283 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5284 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5285 } else { 5286 ap->state = ANEG_STATE_AN_ENABLE; 5287 } 5288 } else if (ap->ability_match != 0 && 5289 ap->rxconfig == 0) { 5290 ap->state = ANEG_STATE_AN_ENABLE; 5291 } 5292 break; 5293 5294 case ANEG_STATE_COMPLETE_ACK_INIT: 5295 if (ap->rxconfig & ANEG_CFG_INVAL) { 5296 ret = ANEG_FAILED; 5297 break; 5298 } 5299 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5300 MR_LP_ADV_HALF_DUPLEX | 5301 MR_LP_ADV_SYM_PAUSE | 5302 MR_LP_ADV_ASYM_PAUSE | 5303 MR_LP_ADV_REMOTE_FAULT1 | 5304 MR_LP_ADV_REMOTE_FAULT2 | 5305 MR_LP_ADV_NEXT_PAGE | 5306 MR_TOGGLE_RX | 5307 MR_NP_RX); 5308 if (ap->rxconfig & ANEG_CFG_FD) 5309 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5310 if (ap->rxconfig & ANEG_CFG_HD) 5311 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5312 if (ap->rxconfig & ANEG_CFG_PS1) 5313 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5314 if (ap->rxconfig & ANEG_CFG_PS2) 5315 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5316 if (ap->rxconfig & ANEG_CFG_RF1) 5317 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5318 if (ap->rxconfig & ANEG_CFG_RF2) 5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5320 if (ap->rxconfig & ANEG_CFG_NP) 5321 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5322 5323 ap->link_time = ap->cur_time; 5324 5325 ap->flags ^= (MR_TOGGLE_TX); 5326 if (ap->rxconfig & 0x0008) 5327 ap->flags |= MR_TOGGLE_RX; 5328 if (ap->rxconfig & ANEG_CFG_NP) 5329 ap->flags |= MR_NP_RX; 5330 ap->flags |= MR_PAGE_RX; 5331 5332 ap->state = ANEG_STATE_COMPLETE_ACK; 5333 ret = ANEG_TIMER_ENAB; 5334 break; 5335 5336 case ANEG_STATE_COMPLETE_ACK: 5337 if (ap->ability_match != 0 && 5338 ap->rxconfig == 0) { 5339 ap->state = ANEG_STATE_AN_ENABLE; 5340 break; 5341 } 5342 delta = ap->cur_time - ap->link_time; 5343 if (delta > ANEG_STATE_SETTLE_TIME) { 5344 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5345 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5346 } else { 5347 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5348 !(ap->flags & MR_NP_RX)) { 5349 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5350 } else { 5351 ret = ANEG_FAILED; 5352 } 5353 } 5354 } 5355 break; 5356 5357 case ANEG_STATE_IDLE_DETECT_INIT: 5358 ap->link_time = ap->cur_time; 5359 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5360 tw32_f(MAC_MODE, tp->mac_mode); 5361 udelay(40); 5362 5363 ap->state = ANEG_STATE_IDLE_DETECT; 5364 ret = ANEG_TIMER_ENAB; 5365 break; 5366 5367 case ANEG_STATE_IDLE_DETECT: 5368 if (ap->ability_match != 0 && 5369 ap->rxconfig == 0) { 5370 ap->state = ANEG_STATE_AN_ENABLE; 5371 break; 5372 } 5373 delta = ap->cur_time - ap->link_time; 5374 if (delta > ANEG_STATE_SETTLE_TIME) { 5375 /* XXX another gem from the Broadcom driver :( */ 5376 ap->state = ANEG_STATE_LINK_OK; 5377 } 5378 break; 5379 5380 case ANEG_STATE_LINK_OK: 5381 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5382 ret = ANEG_DONE; 5383 break; 5384 5385 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5386 /* ??? unimplemented */ 5387 break; 5388 5389 case ANEG_STATE_NEXT_PAGE_WAIT: 5390 /* ??? unimplemented */ 5391 break; 5392 5393 default: 5394 ret = ANEG_FAILED; 5395 break; 5396 } 5397 5398 return ret; 5399 } 5400 5401 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5402 { 5403 int res = 0; 5404 struct tg3_fiber_aneginfo aninfo; 5405 int status = ANEG_FAILED; 5406 unsigned int tick; 5407 u32 tmp; 5408 5409 tw32_f(MAC_TX_AUTO_NEG, 0); 5410 5411 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5412 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5413 udelay(40); 5414 5415 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5416 udelay(40); 5417 5418 memset(&aninfo, 0, sizeof(aninfo)); 5419 aninfo.flags |= MR_AN_ENABLE; 5420 aninfo.state = ANEG_STATE_UNKNOWN; 5421 aninfo.cur_time = 0; 5422 tick = 0; 5423 while (++tick < 195000) { 5424 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5425 if (status == ANEG_DONE || status == ANEG_FAILED) 5426 break; 5427 5428 udelay(1); 5429 } 5430 5431 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5432 tw32_f(MAC_MODE, tp->mac_mode); 5433 udelay(40); 5434 5435 *txflags = aninfo.txconfig; 5436 *rxflags = aninfo.flags; 5437 5438 if (status == ANEG_DONE && 5439 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5440 MR_LP_ADV_FULL_DUPLEX))) 5441 res = 1; 5442 5443 return res; 5444 } 5445 5446 static void tg3_init_bcm8002(struct tg3 *tp) 5447 { 5448 u32 mac_status = tr32(MAC_STATUS); 5449 int i; 5450 5451 /* Reset when initting first time or we have a link. */ 5452 if (tg3_flag(tp, INIT_COMPLETE) && 5453 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5454 return; 5455 5456 /* Set PLL lock range. */ 5457 tg3_writephy(tp, 0x16, 0x8007); 5458 5459 /* SW reset */ 5460 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5461 5462 /* Wait for reset to complete. */ 5463 /* XXX schedule_timeout() ... */ 5464 for (i = 0; i < 500; i++) 5465 udelay(10); 5466 5467 /* Config mode; select PMA/Ch 1 regs. */ 5468 tg3_writephy(tp, 0x10, 0x8411); 5469 5470 /* Enable auto-lock and comdet, select txclk for tx. */ 5471 tg3_writephy(tp, 0x11, 0x0a10); 5472 5473 tg3_writephy(tp, 0x18, 0x00a0); 5474 tg3_writephy(tp, 0x16, 0x41ff); 5475 5476 /* Assert and deassert POR. */ 5477 tg3_writephy(tp, 0x13, 0x0400); 5478 udelay(40); 5479 tg3_writephy(tp, 0x13, 0x0000); 5480 5481 tg3_writephy(tp, 0x11, 0x0a50); 5482 udelay(40); 5483 tg3_writephy(tp, 0x11, 0x0a10); 5484 5485 /* Wait for signal to stabilize */ 5486 /* XXX schedule_timeout() ... */ 5487 for (i = 0; i < 15000; i++) 5488 udelay(10); 5489 5490 /* Deselect the channel register so we can read the PHYID 5491 * later. 5492 */ 5493 tg3_writephy(tp, 0x10, 0x8011); 5494 } 5495 5496 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5497 { 5498 u16 flowctrl; 5499 bool current_link_up; 5500 u32 sg_dig_ctrl, sg_dig_status; 5501 u32 serdes_cfg, expected_sg_dig_ctrl; 5502 int workaround, port_a; 5503 5504 serdes_cfg = 0; 5505 workaround = 0; 5506 port_a = 1; 5507 current_link_up = false; 5508 5509 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5510 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5511 workaround = 1; 5512 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5513 port_a = 0; 5514 5515 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5516 /* preserve bits 20-23 for voltage regulator */ 5517 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5518 } 5519 5520 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5521 5522 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5523 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5524 if (workaround) { 5525 u32 val = serdes_cfg; 5526 5527 if (port_a) 5528 val |= 0xc010000; 5529 else 5530 val |= 0x4010000; 5531 tw32_f(MAC_SERDES_CFG, val); 5532 } 5533 5534 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5535 } 5536 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5537 tg3_setup_flow_control(tp, 0, 0); 5538 current_link_up = true; 5539 } 5540 goto out; 5541 } 5542 5543 /* Want auto-negotiation. */ 5544 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5545 5546 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5547 if (flowctrl & ADVERTISE_1000XPAUSE) 5548 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5549 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5550 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5551 5552 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5553 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5554 tp->serdes_counter && 5555 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5556 MAC_STATUS_RCVD_CFG)) == 5557 MAC_STATUS_PCS_SYNCED)) { 5558 tp->serdes_counter--; 5559 current_link_up = true; 5560 goto out; 5561 } 5562 restart_autoneg: 5563 if (workaround) 5564 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5565 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5566 udelay(5); 5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5568 5569 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5570 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5571 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5572 MAC_STATUS_SIGNAL_DET)) { 5573 sg_dig_status = tr32(SG_DIG_STATUS); 5574 mac_status = tr32(MAC_STATUS); 5575 5576 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5577 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5578 u32 local_adv = 0, remote_adv = 0; 5579 5580 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5581 local_adv |= ADVERTISE_1000XPAUSE; 5582 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5583 local_adv |= ADVERTISE_1000XPSE_ASYM; 5584 5585 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5586 remote_adv |= LPA_1000XPAUSE; 5587 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5588 remote_adv |= LPA_1000XPAUSE_ASYM; 5589 5590 tp->link_config.rmt_adv = 5591 mii_adv_to_ethtool_adv_x(remote_adv); 5592 5593 tg3_setup_flow_control(tp, local_adv, remote_adv); 5594 current_link_up = true; 5595 tp->serdes_counter = 0; 5596 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5597 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5598 if (tp->serdes_counter) 5599 tp->serdes_counter--; 5600 else { 5601 if (workaround) { 5602 u32 val = serdes_cfg; 5603 5604 if (port_a) 5605 val |= 0xc010000; 5606 else 5607 val |= 0x4010000; 5608 5609 tw32_f(MAC_SERDES_CFG, val); 5610 } 5611 5612 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5613 udelay(40); 5614 5615 /* Link parallel detection - link is up */ 5616 /* only if we have PCS_SYNC and not */ 5617 /* receiving config code words */ 5618 mac_status = tr32(MAC_STATUS); 5619 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5620 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5621 tg3_setup_flow_control(tp, 0, 0); 5622 current_link_up = true; 5623 tp->phy_flags |= 5624 TG3_PHYFLG_PARALLEL_DETECT; 5625 tp->serdes_counter = 5626 SERDES_PARALLEL_DET_TIMEOUT; 5627 } else 5628 goto restart_autoneg; 5629 } 5630 } 5631 } else { 5632 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5633 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5634 } 5635 5636 out: 5637 return current_link_up; 5638 } 5639 5640 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5641 { 5642 bool current_link_up = false; 5643 5644 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5645 goto out; 5646 5647 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5648 u32 txflags, rxflags; 5649 int i; 5650 5651 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5652 u32 local_adv = 0, remote_adv = 0; 5653 5654 if (txflags & ANEG_CFG_PS1) 5655 local_adv |= ADVERTISE_1000XPAUSE; 5656 if (txflags & ANEG_CFG_PS2) 5657 local_adv |= ADVERTISE_1000XPSE_ASYM; 5658 5659 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5660 remote_adv |= LPA_1000XPAUSE; 5661 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5662 remote_adv |= LPA_1000XPAUSE_ASYM; 5663 5664 tp->link_config.rmt_adv = 5665 mii_adv_to_ethtool_adv_x(remote_adv); 5666 5667 tg3_setup_flow_control(tp, local_adv, remote_adv); 5668 5669 current_link_up = true; 5670 } 5671 for (i = 0; i < 30; i++) { 5672 udelay(20); 5673 tw32_f(MAC_STATUS, 5674 (MAC_STATUS_SYNC_CHANGED | 5675 MAC_STATUS_CFG_CHANGED)); 5676 udelay(40); 5677 if ((tr32(MAC_STATUS) & 5678 (MAC_STATUS_SYNC_CHANGED | 5679 MAC_STATUS_CFG_CHANGED)) == 0) 5680 break; 5681 } 5682 5683 mac_status = tr32(MAC_STATUS); 5684 if (!current_link_up && 5685 (mac_status & MAC_STATUS_PCS_SYNCED) && 5686 !(mac_status & MAC_STATUS_RCVD_CFG)) 5687 current_link_up = true; 5688 } else { 5689 tg3_setup_flow_control(tp, 0, 0); 5690 5691 /* Forcing 1000FD link up. */ 5692 current_link_up = true; 5693 5694 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5695 udelay(40); 5696 5697 tw32_f(MAC_MODE, tp->mac_mode); 5698 udelay(40); 5699 } 5700 5701 out: 5702 return current_link_up; 5703 } 5704 5705 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5706 { 5707 u32 orig_pause_cfg; 5708 u32 orig_active_speed; 5709 u8 orig_active_duplex; 5710 u32 mac_status; 5711 bool current_link_up; 5712 int i; 5713 5714 orig_pause_cfg = tp->link_config.active_flowctrl; 5715 orig_active_speed = tp->link_config.active_speed; 5716 orig_active_duplex = tp->link_config.active_duplex; 5717 5718 if (!tg3_flag(tp, HW_AUTONEG) && 5719 tp->link_up && 5720 tg3_flag(tp, INIT_COMPLETE)) { 5721 mac_status = tr32(MAC_STATUS); 5722 mac_status &= (MAC_STATUS_PCS_SYNCED | 5723 MAC_STATUS_SIGNAL_DET | 5724 MAC_STATUS_CFG_CHANGED | 5725 MAC_STATUS_RCVD_CFG); 5726 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5727 MAC_STATUS_SIGNAL_DET)) { 5728 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5729 MAC_STATUS_CFG_CHANGED)); 5730 return 0; 5731 } 5732 } 5733 5734 tw32_f(MAC_TX_AUTO_NEG, 0); 5735 5736 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5737 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5738 tw32_f(MAC_MODE, tp->mac_mode); 5739 udelay(40); 5740 5741 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5742 tg3_init_bcm8002(tp); 5743 5744 /* Enable link change event even when serdes polling. */ 5745 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5746 udelay(40); 5747 5748 tp->link_config.rmt_adv = 0; 5749 mac_status = tr32(MAC_STATUS); 5750 5751 if (tg3_flag(tp, HW_AUTONEG)) 5752 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5753 else 5754 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5755 5756 tp->napi[0].hw_status->status = 5757 (SD_STATUS_UPDATED | 5758 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5759 5760 for (i = 0; i < 100; i++) { 5761 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5762 MAC_STATUS_CFG_CHANGED)); 5763 udelay(5); 5764 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5765 MAC_STATUS_CFG_CHANGED | 5766 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5767 break; 5768 } 5769 5770 mac_status = tr32(MAC_STATUS); 5771 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5772 current_link_up = false; 5773 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5774 tp->serdes_counter == 0) { 5775 tw32_f(MAC_MODE, (tp->mac_mode | 5776 MAC_MODE_SEND_CONFIGS)); 5777 udelay(1); 5778 tw32_f(MAC_MODE, tp->mac_mode); 5779 } 5780 } 5781 5782 if (current_link_up) { 5783 tp->link_config.active_speed = SPEED_1000; 5784 tp->link_config.active_duplex = DUPLEX_FULL; 5785 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5786 LED_CTRL_LNKLED_OVERRIDE | 5787 LED_CTRL_1000MBPS_ON)); 5788 } else { 5789 tp->link_config.active_speed = SPEED_UNKNOWN; 5790 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5791 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5792 LED_CTRL_LNKLED_OVERRIDE | 5793 LED_CTRL_TRAFFIC_OVERRIDE)); 5794 } 5795 5796 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5797 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5798 if (orig_pause_cfg != now_pause_cfg || 5799 orig_active_speed != tp->link_config.active_speed || 5800 orig_active_duplex != tp->link_config.active_duplex) 5801 tg3_link_report(tp); 5802 } 5803 5804 return 0; 5805 } 5806 5807 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5808 { 5809 int err = 0; 5810 u32 bmsr, bmcr; 5811 u32 current_speed = SPEED_UNKNOWN; 5812 u8 current_duplex = DUPLEX_UNKNOWN; 5813 bool current_link_up = false; 5814 u32 local_adv, remote_adv, sgsr; 5815 5816 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5817 tg3_asic_rev(tp) == ASIC_REV_5720) && 5818 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5819 (sgsr & SERDES_TG3_SGMII_MODE)) { 5820 5821 if (force_reset) 5822 tg3_phy_reset(tp); 5823 5824 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5825 5826 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5827 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5828 } else { 5829 current_link_up = true; 5830 if (sgsr & SERDES_TG3_SPEED_1000) { 5831 current_speed = SPEED_1000; 5832 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5833 } else if (sgsr & SERDES_TG3_SPEED_100) { 5834 current_speed = SPEED_100; 5835 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5836 } else { 5837 current_speed = SPEED_10; 5838 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5839 } 5840 5841 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5842 current_duplex = DUPLEX_FULL; 5843 else 5844 current_duplex = DUPLEX_HALF; 5845 } 5846 5847 tw32_f(MAC_MODE, tp->mac_mode); 5848 udelay(40); 5849 5850 tg3_clear_mac_status(tp); 5851 5852 goto fiber_setup_done; 5853 } 5854 5855 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5856 tw32_f(MAC_MODE, tp->mac_mode); 5857 udelay(40); 5858 5859 tg3_clear_mac_status(tp); 5860 5861 if (force_reset) 5862 tg3_phy_reset(tp); 5863 5864 tp->link_config.rmt_adv = 0; 5865 5866 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5867 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5868 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5869 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5870 bmsr |= BMSR_LSTATUS; 5871 else 5872 bmsr &= ~BMSR_LSTATUS; 5873 } 5874 5875 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5876 5877 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5878 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5879 /* do nothing, just check for link up at the end */ 5880 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5881 u32 adv, newadv; 5882 5883 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5884 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5885 ADVERTISE_1000XPAUSE | 5886 ADVERTISE_1000XPSE_ASYM | 5887 ADVERTISE_SLCT); 5888 5889 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5890 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5891 5892 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5893 tg3_writephy(tp, MII_ADVERTISE, newadv); 5894 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5895 tg3_writephy(tp, MII_BMCR, bmcr); 5896 5897 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5898 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5899 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5900 5901 return err; 5902 } 5903 } else { 5904 u32 new_bmcr; 5905 5906 bmcr &= ~BMCR_SPEED1000; 5907 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5908 5909 if (tp->link_config.duplex == DUPLEX_FULL) 5910 new_bmcr |= BMCR_FULLDPLX; 5911 5912 if (new_bmcr != bmcr) { 5913 /* BMCR_SPEED1000 is a reserved bit that needs 5914 * to be set on write. 5915 */ 5916 new_bmcr |= BMCR_SPEED1000; 5917 5918 /* Force a linkdown */ 5919 if (tp->link_up) { 5920 u32 adv; 5921 5922 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5923 adv &= ~(ADVERTISE_1000XFULL | 5924 ADVERTISE_1000XHALF | 5925 ADVERTISE_SLCT); 5926 tg3_writephy(tp, MII_ADVERTISE, adv); 5927 tg3_writephy(tp, MII_BMCR, bmcr | 5928 BMCR_ANRESTART | 5929 BMCR_ANENABLE); 5930 udelay(10); 5931 tg3_carrier_off(tp); 5932 } 5933 tg3_writephy(tp, MII_BMCR, new_bmcr); 5934 bmcr = new_bmcr; 5935 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5936 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5937 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5938 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5939 bmsr |= BMSR_LSTATUS; 5940 else 5941 bmsr &= ~BMSR_LSTATUS; 5942 } 5943 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5944 } 5945 } 5946 5947 if (bmsr & BMSR_LSTATUS) { 5948 current_speed = SPEED_1000; 5949 current_link_up = true; 5950 if (bmcr & BMCR_FULLDPLX) 5951 current_duplex = DUPLEX_FULL; 5952 else 5953 current_duplex = DUPLEX_HALF; 5954 5955 local_adv = 0; 5956 remote_adv = 0; 5957 5958 if (bmcr & BMCR_ANENABLE) { 5959 u32 common; 5960 5961 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5962 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5963 common = local_adv & remote_adv; 5964 if (common & (ADVERTISE_1000XHALF | 5965 ADVERTISE_1000XFULL)) { 5966 if (common & ADVERTISE_1000XFULL) 5967 current_duplex = DUPLEX_FULL; 5968 else 5969 current_duplex = DUPLEX_HALF; 5970 5971 tp->link_config.rmt_adv = 5972 mii_adv_to_ethtool_adv_x(remote_adv); 5973 } else if (!tg3_flag(tp, 5780_CLASS)) { 5974 /* Link is up via parallel detect */ 5975 } else { 5976 current_link_up = false; 5977 } 5978 } 5979 } 5980 5981 fiber_setup_done: 5982 if (current_link_up && current_duplex == DUPLEX_FULL) 5983 tg3_setup_flow_control(tp, local_adv, remote_adv); 5984 5985 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5986 if (tp->link_config.active_duplex == DUPLEX_HALF) 5987 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5988 5989 tw32_f(MAC_MODE, tp->mac_mode); 5990 udelay(40); 5991 5992 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5993 5994 tp->link_config.active_speed = current_speed; 5995 tp->link_config.active_duplex = current_duplex; 5996 5997 tg3_test_and_report_link_chg(tp, current_link_up); 5998 return err; 5999 } 6000 6001 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6002 { 6003 if (tp->serdes_counter) { 6004 /* Give autoneg time to complete. */ 6005 tp->serdes_counter--; 6006 return; 6007 } 6008 6009 if (!tp->link_up && 6010 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6011 u32 bmcr; 6012 6013 tg3_readphy(tp, MII_BMCR, &bmcr); 6014 if (bmcr & BMCR_ANENABLE) { 6015 u32 phy1, phy2; 6016 6017 /* Select shadow register 0x1f */ 6018 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6019 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6020 6021 /* Select expansion interrupt status register */ 6022 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6023 MII_TG3_DSP_EXP1_INT_STAT); 6024 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6025 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6026 6027 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6028 /* We have signal detect and not receiving 6029 * config code words, link is up by parallel 6030 * detection. 6031 */ 6032 6033 bmcr &= ~BMCR_ANENABLE; 6034 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6035 tg3_writephy(tp, MII_BMCR, bmcr); 6036 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6037 } 6038 } 6039 } else if (tp->link_up && 6040 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6041 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6042 u32 phy2; 6043 6044 /* Select expansion interrupt status register */ 6045 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6046 MII_TG3_DSP_EXP1_INT_STAT); 6047 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6048 if (phy2 & 0x20) { 6049 u32 bmcr; 6050 6051 /* Config code words received, turn on autoneg. */ 6052 tg3_readphy(tp, MII_BMCR, &bmcr); 6053 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6054 6055 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6056 6057 } 6058 } 6059 } 6060 6061 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6062 { 6063 u32 val; 6064 int err; 6065 6066 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6067 err = tg3_setup_fiber_phy(tp, force_reset); 6068 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6069 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6070 else 6071 err = tg3_setup_copper_phy(tp, force_reset); 6072 6073 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6074 u32 scale; 6075 6076 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6077 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6078 scale = 65; 6079 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6080 scale = 6; 6081 else 6082 scale = 12; 6083 6084 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6085 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6086 tw32(GRC_MISC_CFG, val); 6087 } 6088 6089 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6090 (6 << TX_LENGTHS_IPG_SHIFT); 6091 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6092 tg3_asic_rev(tp) == ASIC_REV_5762) 6093 val |= tr32(MAC_TX_LENGTHS) & 6094 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6095 TX_LENGTHS_CNT_DWN_VAL_MSK); 6096 6097 if (tp->link_config.active_speed == SPEED_1000 && 6098 tp->link_config.active_duplex == DUPLEX_HALF) 6099 tw32(MAC_TX_LENGTHS, val | 6100 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6101 else 6102 tw32(MAC_TX_LENGTHS, val | 6103 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6104 6105 if (!tg3_flag(tp, 5705_PLUS)) { 6106 if (tp->link_up) { 6107 tw32(HOSTCC_STAT_COAL_TICKS, 6108 tp->coal.stats_block_coalesce_usecs); 6109 } else { 6110 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6111 } 6112 } 6113 6114 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6115 val = tr32(PCIE_PWR_MGMT_THRESH); 6116 if (!tp->link_up) 6117 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6118 tp->pwrmgmt_thresh; 6119 else 6120 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6121 tw32(PCIE_PWR_MGMT_THRESH, val); 6122 } 6123 6124 return err; 6125 } 6126 6127 /* tp->lock must be held */ 6128 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) 6129 { 6130 u64 stamp; 6131 6132 ptp_read_system_prets(sts); 6133 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6134 ptp_read_system_postts(sts); 6135 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6136 6137 return stamp; 6138 } 6139 6140 /* tp->lock must be held */ 6141 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6142 { 6143 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6144 6145 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6146 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6147 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6148 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6149 } 6150 6151 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6152 static inline void tg3_full_unlock(struct tg3 *tp); 6153 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6154 { 6155 struct tg3 *tp = netdev_priv(dev); 6156 6157 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6158 SOF_TIMESTAMPING_RX_SOFTWARE | 6159 SOF_TIMESTAMPING_SOFTWARE; 6160 6161 if (tg3_flag(tp, PTP_CAPABLE)) { 6162 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6163 SOF_TIMESTAMPING_RX_HARDWARE | 6164 SOF_TIMESTAMPING_RAW_HARDWARE; 6165 } 6166 6167 if (tp->ptp_clock) 6168 info->phc_index = ptp_clock_index(tp->ptp_clock); 6169 else 6170 info->phc_index = -1; 6171 6172 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6173 6174 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6175 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6176 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6177 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6178 return 0; 6179 } 6180 6181 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 6182 { 6183 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6184 u64 correction; 6185 bool neg_adj; 6186 6187 /* Frequency adjustment is performed using hardware with a 24 bit 6188 * accumulator and a programmable correction value. On each clk, the 6189 * correction value gets added to the accumulator and when it 6190 * overflows, the time counter is incremented/decremented. 6191 */ 6192 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction); 6193 6194 tg3_full_lock(tp, 0); 6195 6196 if (correction) 6197 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6198 TG3_EAV_REF_CLK_CORRECT_EN | 6199 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | 6200 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK)); 6201 else 6202 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6203 6204 tg3_full_unlock(tp); 6205 6206 return 0; 6207 } 6208 6209 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6210 { 6211 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6212 6213 tg3_full_lock(tp, 0); 6214 tp->ptp_adjust += delta; 6215 tg3_full_unlock(tp); 6216 6217 return 0; 6218 } 6219 6220 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 6221 struct ptp_system_timestamp *sts) 6222 { 6223 u64 ns; 6224 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6225 6226 tg3_full_lock(tp, 0); 6227 ns = tg3_refclk_read(tp, sts); 6228 ns += tp->ptp_adjust; 6229 tg3_full_unlock(tp); 6230 6231 *ts = ns_to_timespec64(ns); 6232 6233 return 0; 6234 } 6235 6236 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6237 const struct timespec64 *ts) 6238 { 6239 u64 ns; 6240 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6241 6242 ns = timespec64_to_ns(ts); 6243 6244 tg3_full_lock(tp, 0); 6245 tg3_refclk_write(tp, ns); 6246 tp->ptp_adjust = 0; 6247 tg3_full_unlock(tp); 6248 6249 return 0; 6250 } 6251 6252 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6253 struct ptp_clock_request *rq, int on) 6254 { 6255 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6256 u32 clock_ctl; 6257 int rval = 0; 6258 6259 switch (rq->type) { 6260 case PTP_CLK_REQ_PEROUT: 6261 /* Reject requests with unsupported flags */ 6262 if (rq->perout.flags) 6263 return -EOPNOTSUPP; 6264 6265 if (rq->perout.index != 0) 6266 return -EINVAL; 6267 6268 tg3_full_lock(tp, 0); 6269 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6270 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6271 6272 if (on) { 6273 u64 nsec; 6274 6275 nsec = rq->perout.start.sec * 1000000000ULL + 6276 rq->perout.start.nsec; 6277 6278 if (rq->perout.period.sec || rq->perout.period.nsec) { 6279 netdev_warn(tp->dev, 6280 "Device supports only a one-shot timesync output, period must be 0\n"); 6281 rval = -EINVAL; 6282 goto err_out; 6283 } 6284 6285 if (nsec & (1ULL << 63)) { 6286 netdev_warn(tp->dev, 6287 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6288 rval = -EINVAL; 6289 goto err_out; 6290 } 6291 6292 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6293 tw32(TG3_EAV_WATCHDOG0_MSB, 6294 TG3_EAV_WATCHDOG0_EN | 6295 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6296 6297 tw32(TG3_EAV_REF_CLCK_CTL, 6298 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6299 } else { 6300 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6301 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6302 } 6303 6304 err_out: 6305 tg3_full_unlock(tp); 6306 return rval; 6307 6308 default: 6309 break; 6310 } 6311 6312 return -EOPNOTSUPP; 6313 } 6314 6315 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6316 struct skb_shared_hwtstamps *timestamp) 6317 { 6318 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6319 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6320 tp->ptp_adjust); 6321 } 6322 6323 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock) 6324 { 6325 *hwclock = tr32(TG3_TX_TSTAMP_LSB); 6326 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6327 } 6328 6329 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp) 6330 { 6331 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6332 struct skb_shared_hwtstamps timestamp; 6333 u64 hwclock; 6334 6335 if (tp->ptp_txts_retrycnt > 2) 6336 goto done; 6337 6338 tg3_read_tx_tstamp(tp, &hwclock); 6339 6340 if (hwclock != tp->pre_tx_ts) { 6341 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6342 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp); 6343 goto done; 6344 } 6345 tp->ptp_txts_retrycnt++; 6346 return HZ / 10; 6347 done: 6348 dev_consume_skb_any(tp->tx_tstamp_skb); 6349 tp->tx_tstamp_skb = NULL; 6350 tp->ptp_txts_retrycnt = 0; 6351 tp->pre_tx_ts = 0; 6352 return -1; 6353 } 6354 6355 static const struct ptp_clock_info tg3_ptp_caps = { 6356 .owner = THIS_MODULE, 6357 .name = "tg3 clock", 6358 .max_adj = 250000000, 6359 .n_alarm = 0, 6360 .n_ext_ts = 0, 6361 .n_per_out = 1, 6362 .n_pins = 0, 6363 .pps = 0, 6364 .adjfine = tg3_ptp_adjfine, 6365 .adjtime = tg3_ptp_adjtime, 6366 .do_aux_work = tg3_ptp_ts_aux_work, 6367 .gettimex64 = tg3_ptp_gettimex, 6368 .settime64 = tg3_ptp_settime, 6369 .enable = tg3_ptp_enable, 6370 }; 6371 6372 /* tp->lock must be held */ 6373 static void tg3_ptp_init(struct tg3 *tp) 6374 { 6375 if (!tg3_flag(tp, PTP_CAPABLE)) 6376 return; 6377 6378 /* Initialize the hardware clock to the system time. */ 6379 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6380 tp->ptp_adjust = 0; 6381 tp->ptp_info = tg3_ptp_caps; 6382 } 6383 6384 /* tp->lock must be held */ 6385 static void tg3_ptp_resume(struct tg3 *tp) 6386 { 6387 if (!tg3_flag(tp, PTP_CAPABLE)) 6388 return; 6389 6390 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6391 tp->ptp_adjust = 0; 6392 } 6393 6394 static void tg3_ptp_fini(struct tg3 *tp) 6395 { 6396 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6397 return; 6398 6399 ptp_clock_unregister(tp->ptp_clock); 6400 tp->ptp_clock = NULL; 6401 tp->ptp_adjust = 0; 6402 dev_consume_skb_any(tp->tx_tstamp_skb); 6403 tp->tx_tstamp_skb = NULL; 6404 } 6405 6406 static inline int tg3_irq_sync(struct tg3 *tp) 6407 { 6408 return tp->irq_sync; 6409 } 6410 6411 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6412 { 6413 int i; 6414 6415 dst = (u32 *)((u8 *)dst + off); 6416 for (i = 0; i < len; i += sizeof(u32)) 6417 *dst++ = tr32(off + i); 6418 } 6419 6420 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6421 { 6422 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6423 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6424 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6425 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6426 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6427 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6428 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6429 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6430 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6431 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6432 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6433 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6434 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6435 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6436 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6437 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6438 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6439 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6440 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6441 6442 if (tg3_flag(tp, SUPPORT_MSIX)) 6443 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6444 6445 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6446 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6447 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6448 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6449 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6450 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6451 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6452 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6453 6454 if (!tg3_flag(tp, 5705_PLUS)) { 6455 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6456 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6457 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6458 } 6459 6460 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6461 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6462 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6463 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6464 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6465 6466 if (tg3_flag(tp, NVRAM)) 6467 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6468 } 6469 6470 static void tg3_dump_state(struct tg3 *tp) 6471 { 6472 int i; 6473 u32 *regs; 6474 6475 /* If it is a PCI error, all registers will be 0xffff, 6476 * we don't dump them out, just report the error and return 6477 */ 6478 if (tp->pdev->error_state != pci_channel_io_normal) { 6479 netdev_err(tp->dev, "PCI channel ERROR!\n"); 6480 return; 6481 } 6482 6483 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6484 if (!regs) 6485 return; 6486 6487 if (tg3_flag(tp, PCI_EXPRESS)) { 6488 /* Read up to but not including private PCI registers */ 6489 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6490 regs[i / sizeof(u32)] = tr32(i); 6491 } else 6492 tg3_dump_legacy_regs(tp, regs); 6493 6494 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6495 if (!regs[i + 0] && !regs[i + 1] && 6496 !regs[i + 2] && !regs[i + 3]) 6497 continue; 6498 6499 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6500 i * 4, 6501 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6502 } 6503 6504 kfree(regs); 6505 6506 for (i = 0; i < tp->irq_cnt; i++) { 6507 struct tg3_napi *tnapi = &tp->napi[i]; 6508 6509 /* SW status block */ 6510 netdev_err(tp->dev, 6511 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6512 i, 6513 tnapi->hw_status->status, 6514 tnapi->hw_status->status_tag, 6515 tnapi->hw_status->rx_jumbo_consumer, 6516 tnapi->hw_status->rx_consumer, 6517 tnapi->hw_status->rx_mini_consumer, 6518 tnapi->hw_status->idx[0].rx_producer, 6519 tnapi->hw_status->idx[0].tx_consumer); 6520 6521 netdev_err(tp->dev, 6522 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6523 i, 6524 tnapi->last_tag, tnapi->last_irq_tag, 6525 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6526 tnapi->rx_rcb_ptr, 6527 tnapi->prodring.rx_std_prod_idx, 6528 tnapi->prodring.rx_std_cons_idx, 6529 tnapi->prodring.rx_jmb_prod_idx, 6530 tnapi->prodring.rx_jmb_cons_idx); 6531 } 6532 } 6533 6534 /* This is called whenever we suspect that the system chipset is re- 6535 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6536 * is bogus tx completions. We try to recover by setting the 6537 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6538 * in the workqueue. 6539 */ 6540 static void tg3_tx_recover(struct tg3 *tp) 6541 { 6542 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6543 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6544 6545 netdev_warn(tp->dev, 6546 "The system may be re-ordering memory-mapped I/O " 6547 "cycles to the network device, attempting to recover. " 6548 "Please report the problem to the driver maintainer " 6549 "and include system chipset information.\n"); 6550 6551 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6552 } 6553 6554 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6555 { 6556 /* Tell compiler to fetch tx indices from memory. */ 6557 barrier(); 6558 return tnapi->tx_pending - 6559 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6560 } 6561 6562 /* Tigon3 never reports partial packet sends. So we do not 6563 * need special logic to handle SKBs that have not had all 6564 * of their frags sent yet, like SunGEM does. 6565 */ 6566 static void tg3_tx(struct tg3_napi *tnapi) 6567 { 6568 struct tg3 *tp = tnapi->tp; 6569 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6570 u32 sw_idx = tnapi->tx_cons; 6571 struct netdev_queue *txq; 6572 int index = tnapi - tp->napi; 6573 unsigned int pkts_compl = 0, bytes_compl = 0; 6574 6575 if (tg3_flag(tp, ENABLE_TSS)) 6576 index--; 6577 6578 txq = netdev_get_tx_queue(tp->dev, index); 6579 6580 while (sw_idx != hw_idx) { 6581 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6582 bool complete_skb_later = false; 6583 struct sk_buff *skb = ri->skb; 6584 int i, tx_bug = 0; 6585 6586 if (unlikely(skb == NULL)) { 6587 tg3_tx_recover(tp); 6588 return; 6589 } 6590 6591 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6592 struct skb_shared_hwtstamps timestamp; 6593 u64 hwclock; 6594 6595 tg3_read_tx_tstamp(tp, &hwclock); 6596 if (hwclock != tp->pre_tx_ts) { 6597 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6598 skb_tstamp_tx(skb, ×tamp); 6599 tp->pre_tx_ts = 0; 6600 } else { 6601 tp->tx_tstamp_skb = skb; 6602 complete_skb_later = true; 6603 } 6604 } 6605 6606 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), 6607 skb_headlen(skb), DMA_TO_DEVICE); 6608 6609 ri->skb = NULL; 6610 6611 while (ri->fragmented) { 6612 ri->fragmented = false; 6613 sw_idx = NEXT_TX(sw_idx); 6614 ri = &tnapi->tx_buffers[sw_idx]; 6615 } 6616 6617 sw_idx = NEXT_TX(sw_idx); 6618 6619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6620 ri = &tnapi->tx_buffers[sw_idx]; 6621 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6622 tx_bug = 1; 6623 6624 dma_unmap_page(&tp->pdev->dev, 6625 dma_unmap_addr(ri, mapping), 6626 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6627 DMA_TO_DEVICE); 6628 6629 while (ri->fragmented) { 6630 ri->fragmented = false; 6631 sw_idx = NEXT_TX(sw_idx); 6632 ri = &tnapi->tx_buffers[sw_idx]; 6633 } 6634 6635 sw_idx = NEXT_TX(sw_idx); 6636 } 6637 6638 pkts_compl++; 6639 bytes_compl += skb->len; 6640 6641 if (!complete_skb_later) 6642 dev_consume_skb_any(skb); 6643 else 6644 ptp_schedule_worker(tp->ptp_clock, 0); 6645 6646 if (unlikely(tx_bug)) { 6647 tg3_tx_recover(tp); 6648 return; 6649 } 6650 } 6651 6652 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6653 6654 tnapi->tx_cons = sw_idx; 6655 6656 /* Need to make the tx_cons update visible to __tg3_start_xmit() 6657 * before checking for netif_queue_stopped(). Without the 6658 * memory barrier, there is a small possibility that __tg3_start_xmit() 6659 * will miss it and cause the queue to be stopped forever. 6660 */ 6661 smp_mb(); 6662 6663 if (unlikely(netif_tx_queue_stopped(txq) && 6664 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6665 __netif_tx_lock(txq, smp_processor_id()); 6666 if (netif_tx_queue_stopped(txq) && 6667 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6668 netif_tx_wake_queue(txq); 6669 __netif_tx_unlock(txq); 6670 } 6671 } 6672 6673 static void tg3_frag_free(bool is_frag, void *data) 6674 { 6675 if (is_frag) 6676 skb_free_frag(data); 6677 else 6678 kfree(data); 6679 } 6680 6681 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6682 { 6683 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6684 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6685 6686 if (!ri->data) 6687 return; 6688 6689 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, 6690 DMA_FROM_DEVICE); 6691 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6692 ri->data = NULL; 6693 } 6694 6695 6696 /* Returns size of skb allocated or < 0 on error. 6697 * 6698 * We only need to fill in the address because the other members 6699 * of the RX descriptor are invariant, see tg3_init_rings. 6700 * 6701 * Note the purposeful assymetry of cpu vs. chip accesses. For 6702 * posting buffers we only dirty the first cache line of the RX 6703 * descriptor (containing the address). Whereas for the RX status 6704 * buffers the cpu only reads the last cacheline of the RX descriptor 6705 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6706 */ 6707 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6708 u32 opaque_key, u32 dest_idx_unmasked, 6709 unsigned int *frag_size) 6710 { 6711 struct tg3_rx_buffer_desc *desc; 6712 struct ring_info *map; 6713 u8 *data; 6714 dma_addr_t mapping; 6715 int skb_size, data_size, dest_idx; 6716 6717 switch (opaque_key) { 6718 case RXD_OPAQUE_RING_STD: 6719 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6720 desc = &tpr->rx_std[dest_idx]; 6721 map = &tpr->rx_std_buffers[dest_idx]; 6722 data_size = tp->rx_pkt_map_sz; 6723 break; 6724 6725 case RXD_OPAQUE_RING_JUMBO: 6726 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6727 desc = &tpr->rx_jmb[dest_idx].std; 6728 map = &tpr->rx_jmb_buffers[dest_idx]; 6729 data_size = TG3_RX_JMB_MAP_SZ; 6730 break; 6731 6732 default: 6733 return -EINVAL; 6734 } 6735 6736 /* Do not overwrite any of the map or rp information 6737 * until we are sure we can commit to a new buffer. 6738 * 6739 * Callers depend upon this behavior and assume that 6740 * we leave everything unchanged if we fail. 6741 */ 6742 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6743 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6744 if (skb_size <= PAGE_SIZE) { 6745 data = napi_alloc_frag(skb_size); 6746 *frag_size = skb_size; 6747 } else { 6748 data = kmalloc(skb_size, GFP_ATOMIC); 6749 *frag_size = 0; 6750 } 6751 if (!data) 6752 return -ENOMEM; 6753 6754 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), 6755 data_size, DMA_FROM_DEVICE); 6756 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { 6757 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6758 return -EIO; 6759 } 6760 6761 map->data = data; 6762 dma_unmap_addr_set(map, mapping, mapping); 6763 6764 desc->addr_hi = ((u64)mapping >> 32); 6765 desc->addr_lo = ((u64)mapping & 0xffffffff); 6766 6767 return data_size; 6768 } 6769 6770 /* We only need to move over in the address because the other 6771 * members of the RX descriptor are invariant. See notes above 6772 * tg3_alloc_rx_data for full details. 6773 */ 6774 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6775 struct tg3_rx_prodring_set *dpr, 6776 u32 opaque_key, int src_idx, 6777 u32 dest_idx_unmasked) 6778 { 6779 struct tg3 *tp = tnapi->tp; 6780 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6781 struct ring_info *src_map, *dest_map; 6782 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6783 int dest_idx; 6784 6785 switch (opaque_key) { 6786 case RXD_OPAQUE_RING_STD: 6787 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6788 dest_desc = &dpr->rx_std[dest_idx]; 6789 dest_map = &dpr->rx_std_buffers[dest_idx]; 6790 src_desc = &spr->rx_std[src_idx]; 6791 src_map = &spr->rx_std_buffers[src_idx]; 6792 break; 6793 6794 case RXD_OPAQUE_RING_JUMBO: 6795 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6796 dest_desc = &dpr->rx_jmb[dest_idx].std; 6797 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6798 src_desc = &spr->rx_jmb[src_idx].std; 6799 src_map = &spr->rx_jmb_buffers[src_idx]; 6800 break; 6801 6802 default: 6803 return; 6804 } 6805 6806 dest_map->data = src_map->data; 6807 dma_unmap_addr_set(dest_map, mapping, 6808 dma_unmap_addr(src_map, mapping)); 6809 dest_desc->addr_hi = src_desc->addr_hi; 6810 dest_desc->addr_lo = src_desc->addr_lo; 6811 6812 /* Ensure that the update to the skb happens after the physical 6813 * addresses have been transferred to the new BD location. 6814 */ 6815 smp_wmb(); 6816 6817 src_map->data = NULL; 6818 } 6819 6820 /* The RX ring scheme is composed of multiple rings which post fresh 6821 * buffers to the chip, and one special ring the chip uses to report 6822 * status back to the host. 6823 * 6824 * The special ring reports the status of received packets to the 6825 * host. The chip does not write into the original descriptor the 6826 * RX buffer was obtained from. The chip simply takes the original 6827 * descriptor as provided by the host, updates the status and length 6828 * field, then writes this into the next status ring entry. 6829 * 6830 * Each ring the host uses to post buffers to the chip is described 6831 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6832 * it is first placed into the on-chip ram. When the packet's length 6833 * is known, it walks down the TG3_BDINFO entries to select the ring. 6834 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6835 * which is within the range of the new packet's length is chosen. 6836 * 6837 * The "separate ring for rx status" scheme may sound queer, but it makes 6838 * sense from a cache coherency perspective. If only the host writes 6839 * to the buffer post rings, and only the chip writes to the rx status 6840 * rings, then cache lines never move beyond shared-modified state. 6841 * If both the host and chip were to write into the same ring, cache line 6842 * eviction could occur since both entities want it in an exclusive state. 6843 */ 6844 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6845 { 6846 struct tg3 *tp = tnapi->tp; 6847 u32 work_mask, rx_std_posted = 0; 6848 u32 std_prod_idx, jmb_prod_idx; 6849 u32 sw_idx = tnapi->rx_rcb_ptr; 6850 u16 hw_idx; 6851 int received; 6852 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6853 6854 hw_idx = *(tnapi->rx_rcb_prod_idx); 6855 /* 6856 * We need to order the read of hw_idx and the read of 6857 * the opaque cookie. 6858 */ 6859 rmb(); 6860 work_mask = 0; 6861 received = 0; 6862 std_prod_idx = tpr->rx_std_prod_idx; 6863 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6864 while (sw_idx != hw_idx && budget > 0) { 6865 struct ring_info *ri; 6866 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6867 unsigned int len; 6868 struct sk_buff *skb; 6869 dma_addr_t dma_addr; 6870 u32 opaque_key, desc_idx, *post_ptr; 6871 u8 *data; 6872 u64 tstamp = 0; 6873 6874 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6875 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6876 if (opaque_key == RXD_OPAQUE_RING_STD) { 6877 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6878 dma_addr = dma_unmap_addr(ri, mapping); 6879 data = ri->data; 6880 post_ptr = &std_prod_idx; 6881 rx_std_posted++; 6882 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6883 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6884 dma_addr = dma_unmap_addr(ri, mapping); 6885 data = ri->data; 6886 post_ptr = &jmb_prod_idx; 6887 } else 6888 goto next_pkt_nopost; 6889 6890 work_mask |= opaque_key; 6891 6892 if (desc->err_vlan & RXD_ERR_MASK) { 6893 drop_it: 6894 tg3_recycle_rx(tnapi, tpr, opaque_key, 6895 desc_idx, *post_ptr); 6896 drop_it_no_recycle: 6897 /* Other statistics kept track of by card. */ 6898 tnapi->rx_dropped++; 6899 goto next_pkt; 6900 } 6901 6902 prefetch(data + TG3_RX_OFFSET(tp)); 6903 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6904 ETH_FCS_LEN; 6905 6906 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6907 RXD_FLAG_PTPSTAT_PTPV1 || 6908 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6909 RXD_FLAG_PTPSTAT_PTPV2) { 6910 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6911 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6912 } 6913 6914 if (len > TG3_RX_COPY_THRESH(tp)) { 6915 int skb_size; 6916 unsigned int frag_size; 6917 6918 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6919 *post_ptr, &frag_size); 6920 if (skb_size < 0) 6921 goto drop_it; 6922 6923 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, 6924 DMA_FROM_DEVICE); 6925 6926 /* Ensure that the update to the data happens 6927 * after the usage of the old DMA mapping. 6928 */ 6929 smp_wmb(); 6930 6931 ri->data = NULL; 6932 6933 if (frag_size) 6934 skb = build_skb(data, frag_size); 6935 else 6936 skb = slab_build_skb(data); 6937 if (!skb) { 6938 tg3_frag_free(frag_size != 0, data); 6939 goto drop_it_no_recycle; 6940 } 6941 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6942 } else { 6943 tg3_recycle_rx(tnapi, tpr, opaque_key, 6944 desc_idx, *post_ptr); 6945 6946 skb = netdev_alloc_skb(tp->dev, 6947 len + TG3_RAW_IP_ALIGN); 6948 if (skb == NULL) 6949 goto drop_it_no_recycle; 6950 6951 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6952 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, 6953 DMA_FROM_DEVICE); 6954 memcpy(skb->data, 6955 data + TG3_RX_OFFSET(tp), 6956 len); 6957 dma_sync_single_for_device(&tp->pdev->dev, dma_addr, 6958 len, DMA_FROM_DEVICE); 6959 } 6960 6961 skb_put(skb, len); 6962 if (tstamp) 6963 tg3_hwclock_to_timestamp(tp, tstamp, 6964 skb_hwtstamps(skb)); 6965 6966 if ((tp->dev->features & NETIF_F_RXCSUM) && 6967 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6968 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6969 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6970 skb->ip_summed = CHECKSUM_UNNECESSARY; 6971 else 6972 skb_checksum_none_assert(skb); 6973 6974 skb->protocol = eth_type_trans(skb, tp->dev); 6975 6976 if (len > (tp->dev->mtu + ETH_HLEN) && 6977 skb->protocol != htons(ETH_P_8021Q) && 6978 skb->protocol != htons(ETH_P_8021AD)) { 6979 dev_kfree_skb_any(skb); 6980 goto drop_it_no_recycle; 6981 } 6982 6983 if (desc->type_flags & RXD_FLAG_VLAN && 6984 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6985 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6986 desc->err_vlan & RXD_VLAN_MASK); 6987 6988 napi_gro_receive(&tnapi->napi, skb); 6989 6990 received++; 6991 budget--; 6992 6993 next_pkt: 6994 (*post_ptr)++; 6995 6996 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6997 tpr->rx_std_prod_idx = std_prod_idx & 6998 tp->rx_std_ring_mask; 6999 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7000 tpr->rx_std_prod_idx); 7001 work_mask &= ~RXD_OPAQUE_RING_STD; 7002 rx_std_posted = 0; 7003 } 7004 next_pkt_nopost: 7005 sw_idx++; 7006 sw_idx &= tp->rx_ret_ring_mask; 7007 7008 /* Refresh hw_idx to see if there is new work */ 7009 if (sw_idx == hw_idx) { 7010 hw_idx = *(tnapi->rx_rcb_prod_idx); 7011 rmb(); 7012 } 7013 } 7014 7015 /* ACK the status ring. */ 7016 tnapi->rx_rcb_ptr = sw_idx; 7017 tw32_rx_mbox(tnapi->consmbox, sw_idx); 7018 7019 /* Refill RX ring(s). */ 7020 if (!tg3_flag(tp, ENABLE_RSS)) { 7021 /* Sync BD data before updating mailbox */ 7022 wmb(); 7023 7024 if (work_mask & RXD_OPAQUE_RING_STD) { 7025 tpr->rx_std_prod_idx = std_prod_idx & 7026 tp->rx_std_ring_mask; 7027 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7028 tpr->rx_std_prod_idx); 7029 } 7030 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 7031 tpr->rx_jmb_prod_idx = jmb_prod_idx & 7032 tp->rx_jmb_ring_mask; 7033 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7034 tpr->rx_jmb_prod_idx); 7035 } 7036 } else if (work_mask) { 7037 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 7038 * updated before the producer indices can be updated. 7039 */ 7040 smp_wmb(); 7041 7042 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 7043 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 7044 7045 if (tnapi != &tp->napi[1]) { 7046 tp->rx_refill = true; 7047 napi_schedule(&tp->napi[1].napi); 7048 } 7049 } 7050 7051 return received; 7052 } 7053 7054 static void tg3_poll_link(struct tg3 *tp) 7055 { 7056 /* handle link change and other phy events */ 7057 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7058 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7059 7060 if (sblk->status & SD_STATUS_LINK_CHG) { 7061 sblk->status = SD_STATUS_UPDATED | 7062 (sblk->status & ~SD_STATUS_LINK_CHG); 7063 spin_lock(&tp->lock); 7064 if (tg3_flag(tp, USE_PHYLIB)) { 7065 tw32_f(MAC_STATUS, 7066 (MAC_STATUS_SYNC_CHANGED | 7067 MAC_STATUS_CFG_CHANGED | 7068 MAC_STATUS_MI_COMPLETION | 7069 MAC_STATUS_LNKSTATE_CHANGED)); 7070 udelay(40); 7071 } else 7072 tg3_setup_phy(tp, false); 7073 spin_unlock(&tp->lock); 7074 } 7075 } 7076 } 7077 7078 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7079 struct tg3_rx_prodring_set *dpr, 7080 struct tg3_rx_prodring_set *spr) 7081 { 7082 u32 si, di, cpycnt, src_prod_idx; 7083 int i, err = 0; 7084 7085 while (1) { 7086 src_prod_idx = spr->rx_std_prod_idx; 7087 7088 /* Make sure updates to the rx_std_buffers[] entries and the 7089 * standard producer index are seen in the correct order. 7090 */ 7091 smp_rmb(); 7092 7093 if (spr->rx_std_cons_idx == src_prod_idx) 7094 break; 7095 7096 if (spr->rx_std_cons_idx < src_prod_idx) 7097 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7098 else 7099 cpycnt = tp->rx_std_ring_mask + 1 - 7100 spr->rx_std_cons_idx; 7101 7102 cpycnt = min(cpycnt, 7103 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7104 7105 si = spr->rx_std_cons_idx; 7106 di = dpr->rx_std_prod_idx; 7107 7108 for (i = di; i < di + cpycnt; i++) { 7109 if (dpr->rx_std_buffers[i].data) { 7110 cpycnt = i - di; 7111 err = -ENOSPC; 7112 break; 7113 } 7114 } 7115 7116 if (!cpycnt) 7117 break; 7118 7119 /* Ensure that updates to the rx_std_buffers ring and the 7120 * shadowed hardware producer ring from tg3_recycle_skb() are 7121 * ordered correctly WRT the skb check above. 7122 */ 7123 smp_rmb(); 7124 7125 memcpy(&dpr->rx_std_buffers[di], 7126 &spr->rx_std_buffers[si], 7127 cpycnt * sizeof(struct ring_info)); 7128 7129 for (i = 0; i < cpycnt; i++, di++, si++) { 7130 struct tg3_rx_buffer_desc *sbd, *dbd; 7131 sbd = &spr->rx_std[si]; 7132 dbd = &dpr->rx_std[di]; 7133 dbd->addr_hi = sbd->addr_hi; 7134 dbd->addr_lo = sbd->addr_lo; 7135 } 7136 7137 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7138 tp->rx_std_ring_mask; 7139 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7140 tp->rx_std_ring_mask; 7141 } 7142 7143 while (1) { 7144 src_prod_idx = spr->rx_jmb_prod_idx; 7145 7146 /* Make sure updates to the rx_jmb_buffers[] entries and 7147 * the jumbo producer index are seen in the correct order. 7148 */ 7149 smp_rmb(); 7150 7151 if (spr->rx_jmb_cons_idx == src_prod_idx) 7152 break; 7153 7154 if (spr->rx_jmb_cons_idx < src_prod_idx) 7155 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7156 else 7157 cpycnt = tp->rx_jmb_ring_mask + 1 - 7158 spr->rx_jmb_cons_idx; 7159 7160 cpycnt = min(cpycnt, 7161 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7162 7163 si = spr->rx_jmb_cons_idx; 7164 di = dpr->rx_jmb_prod_idx; 7165 7166 for (i = di; i < di + cpycnt; i++) { 7167 if (dpr->rx_jmb_buffers[i].data) { 7168 cpycnt = i - di; 7169 err = -ENOSPC; 7170 break; 7171 } 7172 } 7173 7174 if (!cpycnt) 7175 break; 7176 7177 /* Ensure that updates to the rx_jmb_buffers ring and the 7178 * shadowed hardware producer ring from tg3_recycle_skb() are 7179 * ordered correctly WRT the skb check above. 7180 */ 7181 smp_rmb(); 7182 7183 memcpy(&dpr->rx_jmb_buffers[di], 7184 &spr->rx_jmb_buffers[si], 7185 cpycnt * sizeof(struct ring_info)); 7186 7187 for (i = 0; i < cpycnt; i++, di++, si++) { 7188 struct tg3_rx_buffer_desc *sbd, *dbd; 7189 sbd = &spr->rx_jmb[si].std; 7190 dbd = &dpr->rx_jmb[di].std; 7191 dbd->addr_hi = sbd->addr_hi; 7192 dbd->addr_lo = sbd->addr_lo; 7193 } 7194 7195 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7196 tp->rx_jmb_ring_mask; 7197 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7198 tp->rx_jmb_ring_mask; 7199 } 7200 7201 return err; 7202 } 7203 7204 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7205 { 7206 struct tg3 *tp = tnapi->tp; 7207 7208 /* run TX completion thread */ 7209 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7210 tg3_tx(tnapi); 7211 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7212 return work_done; 7213 } 7214 7215 if (!tnapi->rx_rcb_prod_idx) 7216 return work_done; 7217 7218 /* run RX thread, within the bounds set by NAPI. 7219 * All RX "locking" is done by ensuring outside 7220 * code synchronizes with tg3->napi.poll() 7221 */ 7222 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7223 work_done += tg3_rx(tnapi, budget - work_done); 7224 7225 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7226 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7227 int i, err = 0; 7228 u32 std_prod_idx = dpr->rx_std_prod_idx; 7229 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7230 7231 tp->rx_refill = false; 7232 for (i = 1; i <= tp->rxq_cnt; i++) 7233 err |= tg3_rx_prodring_xfer(tp, dpr, 7234 &tp->napi[i].prodring); 7235 7236 wmb(); 7237 7238 if (std_prod_idx != dpr->rx_std_prod_idx) 7239 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7240 dpr->rx_std_prod_idx); 7241 7242 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7243 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7244 dpr->rx_jmb_prod_idx); 7245 7246 if (err) 7247 tw32_f(HOSTCC_MODE, tp->coal_now); 7248 } 7249 7250 return work_done; 7251 } 7252 7253 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7254 { 7255 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7256 schedule_work(&tp->reset_task); 7257 } 7258 7259 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7260 { 7261 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7262 cancel_work_sync(&tp->reset_task); 7263 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7264 } 7265 7266 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7267 { 7268 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7269 struct tg3 *tp = tnapi->tp; 7270 int work_done = 0; 7271 struct tg3_hw_status *sblk = tnapi->hw_status; 7272 7273 while (1) { 7274 work_done = tg3_poll_work(tnapi, work_done, budget); 7275 7276 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7277 goto tx_recovery; 7278 7279 if (unlikely(work_done >= budget)) 7280 break; 7281 7282 /* tp->last_tag is used in tg3_int_reenable() below 7283 * to tell the hw how much work has been processed, 7284 * so we must read it before checking for more work. 7285 */ 7286 tnapi->last_tag = sblk->status_tag; 7287 tnapi->last_irq_tag = tnapi->last_tag; 7288 rmb(); 7289 7290 /* check for RX/TX work to do */ 7291 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7292 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7293 7294 /* This test here is not race free, but will reduce 7295 * the number of interrupts by looping again. 7296 */ 7297 if (tnapi == &tp->napi[1] && tp->rx_refill) 7298 continue; 7299 7300 napi_complete_done(napi, work_done); 7301 /* Reenable interrupts. */ 7302 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7303 7304 /* This test here is synchronized by napi_schedule() 7305 * and napi_complete() to close the race condition. 7306 */ 7307 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7308 tw32(HOSTCC_MODE, tp->coalesce_mode | 7309 HOSTCC_MODE_ENABLE | 7310 tnapi->coal_now); 7311 } 7312 break; 7313 } 7314 } 7315 7316 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7317 return work_done; 7318 7319 tx_recovery: 7320 /* work_done is guaranteed to be less than budget. */ 7321 napi_complete(napi); 7322 tg3_reset_task_schedule(tp); 7323 return work_done; 7324 } 7325 7326 static void tg3_process_error(struct tg3 *tp) 7327 { 7328 u32 val; 7329 bool real_error = false; 7330 7331 if (tg3_flag(tp, ERROR_PROCESSED)) 7332 return; 7333 7334 /* Check Flow Attention register */ 7335 val = tr32(HOSTCC_FLOW_ATTN); 7336 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7337 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7338 real_error = true; 7339 } 7340 7341 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7342 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7343 real_error = true; 7344 } 7345 7346 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7347 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7348 real_error = true; 7349 } 7350 7351 if (!real_error) 7352 return; 7353 7354 tg3_dump_state(tp); 7355 7356 tg3_flag_set(tp, ERROR_PROCESSED); 7357 tg3_reset_task_schedule(tp); 7358 } 7359 7360 static int tg3_poll(struct napi_struct *napi, int budget) 7361 { 7362 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7363 struct tg3 *tp = tnapi->tp; 7364 int work_done = 0; 7365 struct tg3_hw_status *sblk = tnapi->hw_status; 7366 7367 while (1) { 7368 if (sblk->status & SD_STATUS_ERROR) 7369 tg3_process_error(tp); 7370 7371 tg3_poll_link(tp); 7372 7373 work_done = tg3_poll_work(tnapi, work_done, budget); 7374 7375 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7376 goto tx_recovery; 7377 7378 if (unlikely(work_done >= budget)) 7379 break; 7380 7381 if (tg3_flag(tp, TAGGED_STATUS)) { 7382 /* tp->last_tag is used in tg3_int_reenable() below 7383 * to tell the hw how much work has been processed, 7384 * so we must read it before checking for more work. 7385 */ 7386 tnapi->last_tag = sblk->status_tag; 7387 tnapi->last_irq_tag = tnapi->last_tag; 7388 rmb(); 7389 } else 7390 sblk->status &= ~SD_STATUS_UPDATED; 7391 7392 if (likely(!tg3_has_work(tnapi))) { 7393 napi_complete_done(napi, work_done); 7394 tg3_int_reenable(tnapi); 7395 break; 7396 } 7397 } 7398 7399 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7400 return work_done; 7401 7402 tx_recovery: 7403 /* work_done is guaranteed to be less than budget. */ 7404 napi_complete(napi); 7405 tg3_reset_task_schedule(tp); 7406 return work_done; 7407 } 7408 7409 static void tg3_napi_disable(struct tg3 *tp) 7410 { 7411 int i; 7412 7413 for (i = tp->irq_cnt - 1; i >= 0; i--) 7414 napi_disable(&tp->napi[i].napi); 7415 } 7416 7417 static void tg3_napi_enable(struct tg3 *tp) 7418 { 7419 int i; 7420 7421 for (i = 0; i < tp->irq_cnt; i++) 7422 napi_enable(&tp->napi[i].napi); 7423 } 7424 7425 static void tg3_napi_init(struct tg3 *tp) 7426 { 7427 int i; 7428 7429 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll); 7430 for (i = 1; i < tp->irq_cnt; i++) 7431 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix); 7432 } 7433 7434 static void tg3_napi_fini(struct tg3 *tp) 7435 { 7436 int i; 7437 7438 for (i = 0; i < tp->irq_cnt; i++) 7439 netif_napi_del(&tp->napi[i].napi); 7440 } 7441 7442 static inline void tg3_netif_stop(struct tg3 *tp) 7443 { 7444 netif_trans_update(tp->dev); /* prevent tx timeout */ 7445 tg3_napi_disable(tp); 7446 netif_carrier_off(tp->dev); 7447 netif_tx_disable(tp->dev); 7448 } 7449 7450 /* tp->lock must be held */ 7451 static inline void tg3_netif_start(struct tg3 *tp) 7452 { 7453 tg3_ptp_resume(tp); 7454 7455 /* NOTE: unconditional netif_tx_wake_all_queues is only 7456 * appropriate so long as all callers are assured to 7457 * have free tx slots (such as after tg3_init_hw) 7458 */ 7459 netif_tx_wake_all_queues(tp->dev); 7460 7461 if (tp->link_up) 7462 netif_carrier_on(tp->dev); 7463 7464 tg3_napi_enable(tp); 7465 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7466 tg3_enable_ints(tp); 7467 } 7468 7469 static void tg3_irq_quiesce(struct tg3 *tp) 7470 __releases(tp->lock) 7471 __acquires(tp->lock) 7472 { 7473 int i; 7474 7475 BUG_ON(tp->irq_sync); 7476 7477 tp->irq_sync = 1; 7478 smp_mb(); 7479 7480 spin_unlock_bh(&tp->lock); 7481 7482 for (i = 0; i < tp->irq_cnt; i++) 7483 synchronize_irq(tp->napi[i].irq_vec); 7484 7485 spin_lock_bh(&tp->lock); 7486 } 7487 7488 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7489 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7490 * with as well. Most of the time, this is not necessary except when 7491 * shutting down the device. 7492 */ 7493 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7494 { 7495 spin_lock_bh(&tp->lock); 7496 if (irq_sync) 7497 tg3_irq_quiesce(tp); 7498 } 7499 7500 static inline void tg3_full_unlock(struct tg3 *tp) 7501 { 7502 spin_unlock_bh(&tp->lock); 7503 } 7504 7505 /* One-shot MSI handler - Chip automatically disables interrupt 7506 * after sending MSI so driver doesn't have to do it. 7507 */ 7508 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7509 { 7510 struct tg3_napi *tnapi = dev_id; 7511 struct tg3 *tp = tnapi->tp; 7512 7513 prefetch(tnapi->hw_status); 7514 if (tnapi->rx_rcb) 7515 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7516 7517 if (likely(!tg3_irq_sync(tp))) 7518 napi_schedule(&tnapi->napi); 7519 7520 return IRQ_HANDLED; 7521 } 7522 7523 /* MSI ISR - No need to check for interrupt sharing and no need to 7524 * flush status block and interrupt mailbox. PCI ordering rules 7525 * guarantee that MSI will arrive after the status block. 7526 */ 7527 static irqreturn_t tg3_msi(int irq, void *dev_id) 7528 { 7529 struct tg3_napi *tnapi = dev_id; 7530 struct tg3 *tp = tnapi->tp; 7531 7532 prefetch(tnapi->hw_status); 7533 if (tnapi->rx_rcb) 7534 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7535 /* 7536 * Writing any value to intr-mbox-0 clears PCI INTA# and 7537 * chip-internal interrupt pending events. 7538 * Writing non-zero to intr-mbox-0 additional tells the 7539 * NIC to stop sending us irqs, engaging "in-intr-handler" 7540 * event coalescing. 7541 */ 7542 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7543 if (likely(!tg3_irq_sync(tp))) 7544 napi_schedule(&tnapi->napi); 7545 7546 return IRQ_RETVAL(1); 7547 } 7548 7549 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7550 { 7551 struct tg3_napi *tnapi = dev_id; 7552 struct tg3 *tp = tnapi->tp; 7553 struct tg3_hw_status *sblk = tnapi->hw_status; 7554 unsigned int handled = 1; 7555 7556 /* In INTx mode, it is possible for the interrupt to arrive at 7557 * the CPU before the status block posted prior to the interrupt. 7558 * Reading the PCI State register will confirm whether the 7559 * interrupt is ours and will flush the status block. 7560 */ 7561 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7562 if (tg3_flag(tp, CHIP_RESETTING) || 7563 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7564 handled = 0; 7565 goto out; 7566 } 7567 } 7568 7569 /* 7570 * Writing any value to intr-mbox-0 clears PCI INTA# and 7571 * chip-internal interrupt pending events. 7572 * Writing non-zero to intr-mbox-0 additional tells the 7573 * NIC to stop sending us irqs, engaging "in-intr-handler" 7574 * event coalescing. 7575 * 7576 * Flush the mailbox to de-assert the IRQ immediately to prevent 7577 * spurious interrupts. The flush impacts performance but 7578 * excessive spurious interrupts can be worse in some cases. 7579 */ 7580 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7581 if (tg3_irq_sync(tp)) 7582 goto out; 7583 sblk->status &= ~SD_STATUS_UPDATED; 7584 if (likely(tg3_has_work(tnapi))) { 7585 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7586 napi_schedule(&tnapi->napi); 7587 } else { 7588 /* No work, shared interrupt perhaps? re-enable 7589 * interrupts, and flush that PCI write 7590 */ 7591 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7592 0x00000000); 7593 } 7594 out: 7595 return IRQ_RETVAL(handled); 7596 } 7597 7598 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7599 { 7600 struct tg3_napi *tnapi = dev_id; 7601 struct tg3 *tp = tnapi->tp; 7602 struct tg3_hw_status *sblk = tnapi->hw_status; 7603 unsigned int handled = 1; 7604 7605 /* In INTx mode, it is possible for the interrupt to arrive at 7606 * the CPU before the status block posted prior to the interrupt. 7607 * Reading the PCI State register will confirm whether the 7608 * interrupt is ours and will flush the status block. 7609 */ 7610 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7611 if (tg3_flag(tp, CHIP_RESETTING) || 7612 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7613 handled = 0; 7614 goto out; 7615 } 7616 } 7617 7618 /* 7619 * writing any value to intr-mbox-0 clears PCI INTA# and 7620 * chip-internal interrupt pending events. 7621 * writing non-zero to intr-mbox-0 additional tells the 7622 * NIC to stop sending us irqs, engaging "in-intr-handler" 7623 * event coalescing. 7624 * 7625 * Flush the mailbox to de-assert the IRQ immediately to prevent 7626 * spurious interrupts. The flush impacts performance but 7627 * excessive spurious interrupts can be worse in some cases. 7628 */ 7629 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7630 7631 /* 7632 * In a shared interrupt configuration, sometimes other devices' 7633 * interrupts will scream. We record the current status tag here 7634 * so that the above check can report that the screaming interrupts 7635 * are unhandled. Eventually they will be silenced. 7636 */ 7637 tnapi->last_irq_tag = sblk->status_tag; 7638 7639 if (tg3_irq_sync(tp)) 7640 goto out; 7641 7642 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7643 7644 napi_schedule(&tnapi->napi); 7645 7646 out: 7647 return IRQ_RETVAL(handled); 7648 } 7649 7650 /* ISR for interrupt test */ 7651 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7652 { 7653 struct tg3_napi *tnapi = dev_id; 7654 struct tg3 *tp = tnapi->tp; 7655 struct tg3_hw_status *sblk = tnapi->hw_status; 7656 7657 if ((sblk->status & SD_STATUS_UPDATED) || 7658 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7659 tg3_disable_ints(tp); 7660 return IRQ_RETVAL(1); 7661 } 7662 return IRQ_RETVAL(0); 7663 } 7664 7665 #ifdef CONFIG_NET_POLL_CONTROLLER 7666 static void tg3_poll_controller(struct net_device *dev) 7667 { 7668 int i; 7669 struct tg3 *tp = netdev_priv(dev); 7670 7671 if (tg3_irq_sync(tp)) 7672 return; 7673 7674 for (i = 0; i < tp->irq_cnt; i++) 7675 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7676 } 7677 #endif 7678 7679 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue) 7680 { 7681 struct tg3 *tp = netdev_priv(dev); 7682 7683 if (netif_msg_tx_err(tp)) { 7684 netdev_err(dev, "transmit timed out, resetting\n"); 7685 tg3_dump_state(tp); 7686 } 7687 7688 tg3_reset_task_schedule(tp); 7689 } 7690 7691 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7692 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7693 { 7694 u32 base = (u32) mapping & 0xffffffff; 7695 7696 return base + len + 8 < base; 7697 } 7698 7699 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7700 * of any 4GB boundaries: 4G, 8G, etc 7701 */ 7702 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7703 u32 len, u32 mss) 7704 { 7705 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7706 u32 base = (u32) mapping & 0xffffffff; 7707 7708 return ((base + len + (mss & 0x3fff)) < base); 7709 } 7710 return 0; 7711 } 7712 7713 /* Test for DMA addresses > 40-bit */ 7714 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7715 int len) 7716 { 7717 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7718 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7719 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7720 return 0; 7721 #else 7722 return 0; 7723 #endif 7724 } 7725 7726 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7727 dma_addr_t mapping, u32 len, u32 flags, 7728 u32 mss, u32 vlan) 7729 { 7730 txbd->addr_hi = ((u64) mapping >> 32); 7731 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7732 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7733 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7734 } 7735 7736 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7737 dma_addr_t map, u32 len, u32 flags, 7738 u32 mss, u32 vlan) 7739 { 7740 struct tg3 *tp = tnapi->tp; 7741 bool hwbug = false; 7742 7743 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7744 hwbug = true; 7745 7746 if (tg3_4g_overflow_test(map, len)) 7747 hwbug = true; 7748 7749 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7750 hwbug = true; 7751 7752 if (tg3_40bit_overflow_test(tp, map, len)) 7753 hwbug = true; 7754 7755 if (tp->dma_limit) { 7756 u32 prvidx = *entry; 7757 u32 tmp_flag = flags & ~TXD_FLAG_END; 7758 while (len > tp->dma_limit && *budget) { 7759 u32 frag_len = tp->dma_limit; 7760 len -= tp->dma_limit; 7761 7762 /* Avoid the 8byte DMA problem */ 7763 if (len <= 8) { 7764 len += tp->dma_limit / 2; 7765 frag_len = tp->dma_limit / 2; 7766 } 7767 7768 tnapi->tx_buffers[*entry].fragmented = true; 7769 7770 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7771 frag_len, tmp_flag, mss, vlan); 7772 *budget -= 1; 7773 prvidx = *entry; 7774 *entry = NEXT_TX(*entry); 7775 7776 map += frag_len; 7777 } 7778 7779 if (len) { 7780 if (*budget) { 7781 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7782 len, flags, mss, vlan); 7783 *budget -= 1; 7784 *entry = NEXT_TX(*entry); 7785 } else { 7786 hwbug = true; 7787 tnapi->tx_buffers[prvidx].fragmented = false; 7788 } 7789 } 7790 } else { 7791 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7792 len, flags, mss, vlan); 7793 *entry = NEXT_TX(*entry); 7794 } 7795 7796 return hwbug; 7797 } 7798 7799 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7800 { 7801 int i; 7802 struct sk_buff *skb; 7803 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7804 7805 skb = txb->skb; 7806 txb->skb = NULL; 7807 7808 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), 7809 skb_headlen(skb), DMA_TO_DEVICE); 7810 7811 while (txb->fragmented) { 7812 txb->fragmented = false; 7813 entry = NEXT_TX(entry); 7814 txb = &tnapi->tx_buffers[entry]; 7815 } 7816 7817 for (i = 0; i <= last; i++) { 7818 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7819 7820 entry = NEXT_TX(entry); 7821 txb = &tnapi->tx_buffers[entry]; 7822 7823 dma_unmap_page(&tnapi->tp->pdev->dev, 7824 dma_unmap_addr(txb, mapping), 7825 skb_frag_size(frag), DMA_TO_DEVICE); 7826 7827 while (txb->fragmented) { 7828 txb->fragmented = false; 7829 entry = NEXT_TX(entry); 7830 txb = &tnapi->tx_buffers[entry]; 7831 } 7832 } 7833 } 7834 7835 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7836 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7837 struct sk_buff **pskb, 7838 u32 *entry, u32 *budget, 7839 u32 base_flags, u32 mss, u32 vlan) 7840 { 7841 struct tg3 *tp = tnapi->tp; 7842 struct sk_buff *new_skb, *skb = *pskb; 7843 dma_addr_t new_addr = 0; 7844 int ret = 0; 7845 7846 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7847 new_skb = skb_copy(skb, GFP_ATOMIC); 7848 else { 7849 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7850 7851 new_skb = skb_copy_expand(skb, 7852 skb_headroom(skb) + more_headroom, 7853 skb_tailroom(skb), GFP_ATOMIC); 7854 } 7855 7856 if (!new_skb) { 7857 ret = -1; 7858 } else { 7859 /* New SKB is guaranteed to be linear. */ 7860 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, 7861 new_skb->len, DMA_TO_DEVICE); 7862 /* Make sure the mapping succeeded */ 7863 if (dma_mapping_error(&tp->pdev->dev, new_addr)) { 7864 dev_kfree_skb_any(new_skb); 7865 ret = -1; 7866 } else { 7867 u32 save_entry = *entry; 7868 7869 base_flags |= TXD_FLAG_END; 7870 7871 tnapi->tx_buffers[*entry].skb = new_skb; 7872 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7873 mapping, new_addr); 7874 7875 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7876 new_skb->len, base_flags, 7877 mss, vlan)) { 7878 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7879 dev_kfree_skb_any(new_skb); 7880 ret = -1; 7881 } 7882 } 7883 } 7884 7885 dev_consume_skb_any(skb); 7886 *pskb = new_skb; 7887 return ret; 7888 } 7889 7890 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7891 { 7892 /* Check if we will never have enough descriptors, 7893 * as gso_segs can be more than current ring size 7894 */ 7895 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7896 } 7897 7898 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *); 7899 7900 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7901 * indicated in tg3_tx_frag_set() 7902 */ 7903 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7904 struct netdev_queue *txq, struct sk_buff *skb) 7905 { 7906 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7907 struct sk_buff *segs, *seg, *next; 7908 7909 /* Estimate the number of fragments in the worst case */ 7910 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7911 netif_tx_stop_queue(txq); 7912 7913 /* netif_tx_stop_queue() must be done before checking 7914 * checking tx index in tg3_tx_avail() below, because in 7915 * tg3_tx(), we update tx index before checking for 7916 * netif_tx_queue_stopped(). 7917 */ 7918 smp_mb(); 7919 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7920 return NETDEV_TX_BUSY; 7921 7922 netif_tx_wake_queue(txq); 7923 } 7924 7925 segs = skb_gso_segment(skb, tp->dev->features & 7926 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7927 if (IS_ERR(segs) || !segs) { 7928 tnapi->tx_dropped++; 7929 goto tg3_tso_bug_end; 7930 } 7931 7932 skb_list_walk_safe(segs, seg, next) { 7933 skb_mark_not_on_list(seg); 7934 __tg3_start_xmit(seg, tp->dev); 7935 } 7936 7937 tg3_tso_bug_end: 7938 dev_consume_skb_any(skb); 7939 7940 return NETDEV_TX_OK; 7941 } 7942 7943 /* hard_start_xmit for all devices */ 7944 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7945 { 7946 struct tg3 *tp = netdev_priv(dev); 7947 u32 len, entry, base_flags, mss, vlan = 0; 7948 u32 budget; 7949 int i = -1, would_hit_hwbug; 7950 dma_addr_t mapping; 7951 struct tg3_napi *tnapi; 7952 struct netdev_queue *txq; 7953 unsigned int last; 7954 struct iphdr *iph = NULL; 7955 struct tcphdr *tcph = NULL; 7956 __sum16 tcp_csum = 0, ip_csum = 0; 7957 __be16 ip_tot_len = 0; 7958 7959 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7960 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7961 if (tg3_flag(tp, ENABLE_TSS)) 7962 tnapi++; 7963 7964 budget = tg3_tx_avail(tnapi); 7965 7966 /* We are running in BH disabled context with netif_tx_lock 7967 * and TX reclaim runs via tp->napi.poll inside of a software 7968 * interrupt. Furthermore, IRQ processing runs lockless so we have 7969 * no IRQ context deadlocks to worry about either. Rejoice! 7970 */ 7971 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7972 if (!netif_tx_queue_stopped(txq)) { 7973 netif_tx_stop_queue(txq); 7974 7975 /* This is a hard error, log it. */ 7976 netdev_err(dev, 7977 "BUG! Tx Ring full when queue awake!\n"); 7978 } 7979 return NETDEV_TX_BUSY; 7980 } 7981 7982 entry = tnapi->tx_prod; 7983 base_flags = 0; 7984 7985 mss = skb_shinfo(skb)->gso_size; 7986 if (mss) { 7987 u32 tcp_opt_len, hdr_len; 7988 7989 if (skb_cow_head(skb, 0)) 7990 goto drop; 7991 7992 iph = ip_hdr(skb); 7993 tcp_opt_len = tcp_optlen(skb); 7994 7995 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN; 7996 7997 /* HW/FW can not correctly segment packets that have been 7998 * vlan encapsulated. 7999 */ 8000 if (skb->protocol == htons(ETH_P_8021Q) || 8001 skb->protocol == htons(ETH_P_8021AD)) { 8002 if (tg3_tso_bug_gso_check(tnapi, skb)) 8003 return tg3_tso_bug(tp, tnapi, txq, skb); 8004 goto drop; 8005 } 8006 8007 if (!skb_is_gso_v6(skb)) { 8008 if (unlikely((ETH_HLEN + hdr_len) > 80) && 8009 tg3_flag(tp, TSO_BUG)) { 8010 if (tg3_tso_bug_gso_check(tnapi, skb)) 8011 return tg3_tso_bug(tp, tnapi, txq, skb); 8012 goto drop; 8013 } 8014 ip_csum = iph->check; 8015 ip_tot_len = iph->tot_len; 8016 iph->check = 0; 8017 iph->tot_len = htons(mss + hdr_len); 8018 } 8019 8020 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 8021 TXD_FLAG_CPU_POST_DMA); 8022 8023 tcph = tcp_hdr(skb); 8024 tcp_csum = tcph->check; 8025 8026 if (tg3_flag(tp, HW_TSO_1) || 8027 tg3_flag(tp, HW_TSO_2) || 8028 tg3_flag(tp, HW_TSO_3)) { 8029 tcph->check = 0; 8030 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 8031 } else { 8032 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 8033 0, IPPROTO_TCP, 0); 8034 } 8035 8036 if (tg3_flag(tp, HW_TSO_3)) { 8037 mss |= (hdr_len & 0xc) << 12; 8038 if (hdr_len & 0x10) 8039 base_flags |= 0x00000010; 8040 base_flags |= (hdr_len & 0x3e0) << 5; 8041 } else if (tg3_flag(tp, HW_TSO_2)) 8042 mss |= hdr_len << 9; 8043 else if (tg3_flag(tp, HW_TSO_1) || 8044 tg3_asic_rev(tp) == ASIC_REV_5705) { 8045 if (tcp_opt_len || iph->ihl > 5) { 8046 int tsflags; 8047 8048 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8049 mss |= (tsflags << 11); 8050 } 8051 } else { 8052 if (tcp_opt_len || iph->ihl > 5) { 8053 int tsflags; 8054 8055 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8056 base_flags |= tsflags << 12; 8057 } 8058 } 8059 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 8060 /* HW/FW can not correctly checksum packets that have been 8061 * vlan encapsulated. 8062 */ 8063 if (skb->protocol == htons(ETH_P_8021Q) || 8064 skb->protocol == htons(ETH_P_8021AD)) { 8065 if (skb_checksum_help(skb)) 8066 goto drop; 8067 } else { 8068 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8069 } 8070 } 8071 8072 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8073 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8074 base_flags |= TXD_FLAG_JMB_PKT; 8075 8076 if (skb_vlan_tag_present(skb)) { 8077 base_flags |= TXD_FLAG_VLAN; 8078 vlan = skb_vlan_tag_get(skb); 8079 } 8080 8081 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8082 tg3_flag(tp, TX_TSTAMP_EN)) { 8083 tg3_full_lock(tp, 0); 8084 if (!tp->pre_tx_ts) { 8085 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8086 base_flags |= TXD_FLAG_HWTSTAMP; 8087 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts); 8088 } 8089 tg3_full_unlock(tp); 8090 } 8091 8092 len = skb_headlen(skb); 8093 8094 mapping = dma_map_single(&tp->pdev->dev, skb->data, len, 8095 DMA_TO_DEVICE); 8096 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8097 goto drop; 8098 8099 8100 tnapi->tx_buffers[entry].skb = skb; 8101 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8102 8103 would_hit_hwbug = 0; 8104 8105 if (tg3_flag(tp, 5701_DMA_BUG)) 8106 would_hit_hwbug = 1; 8107 8108 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8109 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8110 mss, vlan)) { 8111 would_hit_hwbug = 1; 8112 } else if (skb_shinfo(skb)->nr_frags > 0) { 8113 u32 tmp_mss = mss; 8114 8115 if (!tg3_flag(tp, HW_TSO_1) && 8116 !tg3_flag(tp, HW_TSO_2) && 8117 !tg3_flag(tp, HW_TSO_3)) 8118 tmp_mss = 0; 8119 8120 /* Now loop through additional data 8121 * fragments, and queue them. 8122 */ 8123 last = skb_shinfo(skb)->nr_frags - 1; 8124 for (i = 0; i <= last; i++) { 8125 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8126 8127 len = skb_frag_size(frag); 8128 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8129 len, DMA_TO_DEVICE); 8130 8131 tnapi->tx_buffers[entry].skb = NULL; 8132 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8133 mapping); 8134 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8135 goto dma_error; 8136 8137 if (!budget || 8138 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8139 len, base_flags | 8140 ((i == last) ? TXD_FLAG_END : 0), 8141 tmp_mss, vlan)) { 8142 would_hit_hwbug = 1; 8143 break; 8144 } 8145 } 8146 } 8147 8148 if (would_hit_hwbug) { 8149 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8150 8151 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8152 /* If it's a TSO packet, do GSO instead of 8153 * allocating and copying to a large linear SKB 8154 */ 8155 if (ip_tot_len) { 8156 iph->check = ip_csum; 8157 iph->tot_len = ip_tot_len; 8158 } 8159 tcph->check = tcp_csum; 8160 return tg3_tso_bug(tp, tnapi, txq, skb); 8161 } 8162 8163 /* If the workaround fails due to memory/mapping 8164 * failure, silently drop this packet. 8165 */ 8166 entry = tnapi->tx_prod; 8167 budget = tg3_tx_avail(tnapi); 8168 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8169 base_flags, mss, vlan)) 8170 goto drop_nofree; 8171 } 8172 8173 skb_tx_timestamp(skb); 8174 netdev_tx_sent_queue(txq, skb->len); 8175 8176 /* Sync BD data before updating mailbox */ 8177 wmb(); 8178 8179 tnapi->tx_prod = entry; 8180 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8181 netif_tx_stop_queue(txq); 8182 8183 /* netif_tx_stop_queue() must be done before checking 8184 * checking tx index in tg3_tx_avail() below, because in 8185 * tg3_tx(), we update tx index before checking for 8186 * netif_tx_queue_stopped(). 8187 */ 8188 smp_mb(); 8189 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8190 netif_tx_wake_queue(txq); 8191 } 8192 8193 return NETDEV_TX_OK; 8194 8195 dma_error: 8196 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8197 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8198 drop: 8199 dev_kfree_skb_any(skb); 8200 drop_nofree: 8201 tnapi->tx_dropped++; 8202 return NETDEV_TX_OK; 8203 } 8204 8205 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 8206 { 8207 struct netdev_queue *txq; 8208 u16 skb_queue_mapping; 8209 netdev_tx_t ret; 8210 8211 skb_queue_mapping = skb_get_queue_mapping(skb); 8212 txq = netdev_get_tx_queue(dev, skb_queue_mapping); 8213 8214 ret = __tg3_start_xmit(skb, dev); 8215 8216 /* Notify the hardware that packets are ready by updating the TX ring 8217 * tail pointer. We respect netdev_xmit_more() thus avoiding poking 8218 * the hardware for every packet. To guarantee forward progress the TX 8219 * ring must be drained when it is full as indicated by 8220 * netif_xmit_stopped(). This needs to happen even when the current 8221 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets 8222 * queued by previous __tg3_start_xmit() calls might get stuck in 8223 * the queue forever. 8224 */ 8225 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8226 struct tg3_napi *tnapi; 8227 struct tg3 *tp; 8228 8229 tp = netdev_priv(dev); 8230 tnapi = &tp->napi[skb_queue_mapping]; 8231 8232 if (tg3_flag(tp, ENABLE_TSS)) 8233 tnapi++; 8234 8235 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 8236 } 8237 8238 return ret; 8239 } 8240 8241 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8242 { 8243 if (enable) { 8244 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8245 MAC_MODE_PORT_MODE_MASK); 8246 8247 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8248 8249 if (!tg3_flag(tp, 5705_PLUS)) 8250 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8251 8252 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8253 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8254 else 8255 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8256 } else { 8257 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8258 8259 if (tg3_flag(tp, 5705_PLUS) || 8260 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8261 tg3_asic_rev(tp) == ASIC_REV_5700) 8262 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8263 } 8264 8265 tw32(MAC_MODE, tp->mac_mode); 8266 udelay(40); 8267 } 8268 8269 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8270 { 8271 u32 val, bmcr, mac_mode, ptest = 0; 8272 8273 tg3_phy_toggle_apd(tp, false); 8274 tg3_phy_toggle_automdix(tp, false); 8275 8276 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8277 return -EIO; 8278 8279 bmcr = BMCR_FULLDPLX; 8280 switch (speed) { 8281 case SPEED_10: 8282 break; 8283 case SPEED_100: 8284 bmcr |= BMCR_SPEED100; 8285 break; 8286 case SPEED_1000: 8287 default: 8288 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8289 speed = SPEED_100; 8290 bmcr |= BMCR_SPEED100; 8291 } else { 8292 speed = SPEED_1000; 8293 bmcr |= BMCR_SPEED1000; 8294 } 8295 } 8296 8297 if (extlpbk) { 8298 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8299 tg3_readphy(tp, MII_CTRL1000, &val); 8300 val |= CTL1000_AS_MASTER | 8301 CTL1000_ENABLE_MASTER; 8302 tg3_writephy(tp, MII_CTRL1000, val); 8303 } else { 8304 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8305 MII_TG3_FET_PTEST_TRIM_2; 8306 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8307 } 8308 } else 8309 bmcr |= BMCR_LOOPBACK; 8310 8311 tg3_writephy(tp, MII_BMCR, bmcr); 8312 8313 /* The write needs to be flushed for the FETs */ 8314 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8315 tg3_readphy(tp, MII_BMCR, &bmcr); 8316 8317 udelay(40); 8318 8319 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8320 tg3_asic_rev(tp) == ASIC_REV_5785) { 8321 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8322 MII_TG3_FET_PTEST_FRC_TX_LINK | 8323 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8324 8325 /* The write needs to be flushed for the AC131 */ 8326 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8327 } 8328 8329 /* Reset to prevent losing 1st rx packet intermittently */ 8330 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8331 tg3_flag(tp, 5780_CLASS)) { 8332 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8333 udelay(10); 8334 tw32_f(MAC_RX_MODE, tp->rx_mode); 8335 } 8336 8337 mac_mode = tp->mac_mode & 8338 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8339 if (speed == SPEED_1000) 8340 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8341 else 8342 mac_mode |= MAC_MODE_PORT_MODE_MII; 8343 8344 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8345 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8346 8347 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8348 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8349 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8350 mac_mode |= MAC_MODE_LINK_POLARITY; 8351 8352 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8353 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8354 } 8355 8356 tw32(MAC_MODE, mac_mode); 8357 udelay(40); 8358 8359 return 0; 8360 } 8361 8362 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8363 { 8364 struct tg3 *tp = netdev_priv(dev); 8365 8366 if (features & NETIF_F_LOOPBACK) { 8367 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8368 return; 8369 8370 spin_lock_bh(&tp->lock); 8371 tg3_mac_loopback(tp, true); 8372 netif_carrier_on(tp->dev); 8373 spin_unlock_bh(&tp->lock); 8374 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8375 } else { 8376 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8377 return; 8378 8379 spin_lock_bh(&tp->lock); 8380 tg3_mac_loopback(tp, false); 8381 /* Force link status check */ 8382 tg3_setup_phy(tp, true); 8383 spin_unlock_bh(&tp->lock); 8384 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8385 } 8386 } 8387 8388 static netdev_features_t tg3_fix_features(struct net_device *dev, 8389 netdev_features_t features) 8390 { 8391 struct tg3 *tp = netdev_priv(dev); 8392 8393 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8394 features &= ~NETIF_F_ALL_TSO; 8395 8396 return features; 8397 } 8398 8399 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8400 { 8401 netdev_features_t changed = dev->features ^ features; 8402 8403 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8404 tg3_set_loopback(dev, features); 8405 8406 return 0; 8407 } 8408 8409 static void tg3_rx_prodring_free(struct tg3 *tp, 8410 struct tg3_rx_prodring_set *tpr) 8411 { 8412 int i; 8413 8414 if (tpr != &tp->napi[0].prodring) { 8415 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8416 i = (i + 1) & tp->rx_std_ring_mask) 8417 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8418 tp->rx_pkt_map_sz); 8419 8420 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8421 for (i = tpr->rx_jmb_cons_idx; 8422 i != tpr->rx_jmb_prod_idx; 8423 i = (i + 1) & tp->rx_jmb_ring_mask) { 8424 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8425 TG3_RX_JMB_MAP_SZ); 8426 } 8427 } 8428 8429 return; 8430 } 8431 8432 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8433 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8434 tp->rx_pkt_map_sz); 8435 8436 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8437 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8438 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8439 TG3_RX_JMB_MAP_SZ); 8440 } 8441 } 8442 8443 /* Initialize rx rings for packet processing. 8444 * 8445 * The chip has been shut down and the driver detached from 8446 * the networking, so no interrupts or new tx packets will 8447 * end up in the driver. tp->{tx,}lock are held and thus 8448 * we may not sleep. 8449 */ 8450 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8451 struct tg3_rx_prodring_set *tpr) 8452 { 8453 u32 i, rx_pkt_dma_sz; 8454 8455 tpr->rx_std_cons_idx = 0; 8456 tpr->rx_std_prod_idx = 0; 8457 tpr->rx_jmb_cons_idx = 0; 8458 tpr->rx_jmb_prod_idx = 0; 8459 8460 if (tpr != &tp->napi[0].prodring) { 8461 memset(&tpr->rx_std_buffers[0], 0, 8462 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8463 if (tpr->rx_jmb_buffers) 8464 memset(&tpr->rx_jmb_buffers[0], 0, 8465 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8466 goto done; 8467 } 8468 8469 /* Zero out all descriptors. */ 8470 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8471 8472 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8473 if (tg3_flag(tp, 5780_CLASS) && 8474 tp->dev->mtu > ETH_DATA_LEN) 8475 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8476 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8477 8478 /* Initialize invariants of the rings, we only set this 8479 * stuff once. This works because the card does not 8480 * write into the rx buffer posting rings. 8481 */ 8482 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8483 struct tg3_rx_buffer_desc *rxd; 8484 8485 rxd = &tpr->rx_std[i]; 8486 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8487 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8488 rxd->opaque = (RXD_OPAQUE_RING_STD | 8489 (i << RXD_OPAQUE_INDEX_SHIFT)); 8490 } 8491 8492 /* Now allocate fresh SKBs for each rx ring. */ 8493 for (i = 0; i < tp->rx_pending; i++) { 8494 unsigned int frag_size; 8495 8496 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8497 &frag_size) < 0) { 8498 netdev_warn(tp->dev, 8499 "Using a smaller RX standard ring. Only " 8500 "%d out of %d buffers were allocated " 8501 "successfully\n", i, tp->rx_pending); 8502 if (i == 0) 8503 goto initfail; 8504 tp->rx_pending = i; 8505 break; 8506 } 8507 } 8508 8509 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8510 goto done; 8511 8512 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8513 8514 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8515 goto done; 8516 8517 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8518 struct tg3_rx_buffer_desc *rxd; 8519 8520 rxd = &tpr->rx_jmb[i].std; 8521 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8522 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8523 RXD_FLAG_JUMBO; 8524 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8525 (i << RXD_OPAQUE_INDEX_SHIFT)); 8526 } 8527 8528 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8529 unsigned int frag_size; 8530 8531 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8532 &frag_size) < 0) { 8533 netdev_warn(tp->dev, 8534 "Using a smaller RX jumbo ring. Only %d " 8535 "out of %d buffers were allocated " 8536 "successfully\n", i, tp->rx_jumbo_pending); 8537 if (i == 0) 8538 goto initfail; 8539 tp->rx_jumbo_pending = i; 8540 break; 8541 } 8542 } 8543 8544 done: 8545 return 0; 8546 8547 initfail: 8548 tg3_rx_prodring_free(tp, tpr); 8549 return -ENOMEM; 8550 } 8551 8552 static void tg3_rx_prodring_fini(struct tg3 *tp, 8553 struct tg3_rx_prodring_set *tpr) 8554 { 8555 kfree(tpr->rx_std_buffers); 8556 tpr->rx_std_buffers = NULL; 8557 kfree(tpr->rx_jmb_buffers); 8558 tpr->rx_jmb_buffers = NULL; 8559 if (tpr->rx_std) { 8560 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8561 tpr->rx_std, tpr->rx_std_mapping); 8562 tpr->rx_std = NULL; 8563 } 8564 if (tpr->rx_jmb) { 8565 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8566 tpr->rx_jmb, tpr->rx_jmb_mapping); 8567 tpr->rx_jmb = NULL; 8568 } 8569 } 8570 8571 static int tg3_rx_prodring_init(struct tg3 *tp, 8572 struct tg3_rx_prodring_set *tpr) 8573 { 8574 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8575 GFP_KERNEL); 8576 if (!tpr->rx_std_buffers) 8577 return -ENOMEM; 8578 8579 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8580 TG3_RX_STD_RING_BYTES(tp), 8581 &tpr->rx_std_mapping, 8582 GFP_KERNEL); 8583 if (!tpr->rx_std) 8584 goto err_out; 8585 8586 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8587 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8588 GFP_KERNEL); 8589 if (!tpr->rx_jmb_buffers) 8590 goto err_out; 8591 8592 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8593 TG3_RX_JMB_RING_BYTES(tp), 8594 &tpr->rx_jmb_mapping, 8595 GFP_KERNEL); 8596 if (!tpr->rx_jmb) 8597 goto err_out; 8598 } 8599 8600 return 0; 8601 8602 err_out: 8603 tg3_rx_prodring_fini(tp, tpr); 8604 return -ENOMEM; 8605 } 8606 8607 /* Free up pending packets in all rx/tx rings. 8608 * 8609 * The chip has been shut down and the driver detached from 8610 * the networking, so no interrupts or new tx packets will 8611 * end up in the driver. tp->{tx,}lock is not held and we are not 8612 * in an interrupt context and thus may sleep. 8613 */ 8614 static void tg3_free_rings(struct tg3 *tp) 8615 { 8616 int i, j; 8617 8618 for (j = 0; j < tp->irq_cnt; j++) { 8619 struct tg3_napi *tnapi = &tp->napi[j]; 8620 8621 tg3_rx_prodring_free(tp, &tnapi->prodring); 8622 8623 if (!tnapi->tx_buffers) 8624 continue; 8625 8626 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8627 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8628 8629 if (!skb) 8630 continue; 8631 8632 tg3_tx_skb_unmap(tnapi, i, 8633 skb_shinfo(skb)->nr_frags - 1); 8634 8635 dev_consume_skb_any(skb); 8636 } 8637 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8638 } 8639 } 8640 8641 /* Initialize tx/rx rings for packet processing. 8642 * 8643 * The chip has been shut down and the driver detached from 8644 * the networking, so no interrupts or new tx packets will 8645 * end up in the driver. tp->{tx,}lock are held and thus 8646 * we may not sleep. 8647 */ 8648 static int tg3_init_rings(struct tg3 *tp) 8649 { 8650 int i; 8651 8652 /* Free up all the SKBs. */ 8653 tg3_free_rings(tp); 8654 8655 for (i = 0; i < tp->irq_cnt; i++) { 8656 struct tg3_napi *tnapi = &tp->napi[i]; 8657 8658 tnapi->last_tag = 0; 8659 tnapi->last_irq_tag = 0; 8660 tnapi->hw_status->status = 0; 8661 tnapi->hw_status->status_tag = 0; 8662 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8663 8664 tnapi->tx_prod = 0; 8665 tnapi->tx_cons = 0; 8666 if (tnapi->tx_ring) 8667 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8668 8669 tnapi->rx_rcb_ptr = 0; 8670 if (tnapi->rx_rcb) 8671 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8672 8673 if (tnapi->prodring.rx_std && 8674 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8675 tg3_free_rings(tp); 8676 return -ENOMEM; 8677 } 8678 } 8679 8680 return 0; 8681 } 8682 8683 static void tg3_mem_tx_release(struct tg3 *tp) 8684 { 8685 int i; 8686 8687 for (i = 0; i < tp->irq_max; i++) { 8688 struct tg3_napi *tnapi = &tp->napi[i]; 8689 8690 if (tnapi->tx_ring) { 8691 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8692 tnapi->tx_ring, tnapi->tx_desc_mapping); 8693 tnapi->tx_ring = NULL; 8694 } 8695 8696 kfree(tnapi->tx_buffers); 8697 tnapi->tx_buffers = NULL; 8698 } 8699 } 8700 8701 static int tg3_mem_tx_acquire(struct tg3 *tp) 8702 { 8703 int i; 8704 struct tg3_napi *tnapi = &tp->napi[0]; 8705 8706 /* If multivector TSS is enabled, vector 0 does not handle 8707 * tx interrupts. Don't allocate any resources for it. 8708 */ 8709 if (tg3_flag(tp, ENABLE_TSS)) 8710 tnapi++; 8711 8712 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8713 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, 8714 sizeof(struct tg3_tx_ring_info), 8715 GFP_KERNEL); 8716 if (!tnapi->tx_buffers) 8717 goto err_out; 8718 8719 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8720 TG3_TX_RING_BYTES, 8721 &tnapi->tx_desc_mapping, 8722 GFP_KERNEL); 8723 if (!tnapi->tx_ring) 8724 goto err_out; 8725 } 8726 8727 return 0; 8728 8729 err_out: 8730 tg3_mem_tx_release(tp); 8731 return -ENOMEM; 8732 } 8733 8734 static void tg3_mem_rx_release(struct tg3 *tp) 8735 { 8736 int i; 8737 8738 for (i = 0; i < tp->irq_max; i++) { 8739 struct tg3_napi *tnapi = &tp->napi[i]; 8740 8741 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8742 8743 if (!tnapi->rx_rcb) 8744 continue; 8745 8746 dma_free_coherent(&tp->pdev->dev, 8747 TG3_RX_RCB_RING_BYTES(tp), 8748 tnapi->rx_rcb, 8749 tnapi->rx_rcb_mapping); 8750 tnapi->rx_rcb = NULL; 8751 } 8752 } 8753 8754 static int tg3_mem_rx_acquire(struct tg3 *tp) 8755 { 8756 unsigned int i, limit; 8757 8758 limit = tp->rxq_cnt; 8759 8760 /* If RSS is enabled, we need a (dummy) producer ring 8761 * set on vector zero. This is the true hw prodring. 8762 */ 8763 if (tg3_flag(tp, ENABLE_RSS)) 8764 limit++; 8765 8766 for (i = 0; i < limit; i++) { 8767 struct tg3_napi *tnapi = &tp->napi[i]; 8768 8769 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8770 goto err_out; 8771 8772 /* If multivector RSS is enabled, vector 0 8773 * does not handle rx or tx interrupts. 8774 * Don't allocate any resources for it. 8775 */ 8776 if (!i && tg3_flag(tp, ENABLE_RSS)) 8777 continue; 8778 8779 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8780 TG3_RX_RCB_RING_BYTES(tp), 8781 &tnapi->rx_rcb_mapping, 8782 GFP_KERNEL); 8783 if (!tnapi->rx_rcb) 8784 goto err_out; 8785 } 8786 8787 return 0; 8788 8789 err_out: 8790 tg3_mem_rx_release(tp); 8791 return -ENOMEM; 8792 } 8793 8794 /* 8795 * Must not be invoked with interrupt sources disabled and 8796 * the hardware shutdown down. 8797 */ 8798 static void tg3_free_consistent(struct tg3 *tp) 8799 { 8800 int i; 8801 8802 for (i = 0; i < tp->irq_cnt; i++) { 8803 struct tg3_napi *tnapi = &tp->napi[i]; 8804 8805 if (tnapi->hw_status) { 8806 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8807 tnapi->hw_status, 8808 tnapi->status_mapping); 8809 tnapi->hw_status = NULL; 8810 } 8811 } 8812 8813 tg3_mem_rx_release(tp); 8814 tg3_mem_tx_release(tp); 8815 8816 /* tp->hw_stats can be referenced safely: 8817 * 1. under rtnl_lock 8818 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. 8819 */ 8820 if (tp->hw_stats) { 8821 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8822 tp->hw_stats, tp->stats_mapping); 8823 tp->hw_stats = NULL; 8824 } 8825 } 8826 8827 /* 8828 * Must not be invoked with interrupt sources disabled and 8829 * the hardware shutdown down. Can sleep. 8830 */ 8831 static int tg3_alloc_consistent(struct tg3 *tp) 8832 { 8833 int i; 8834 8835 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8836 sizeof(struct tg3_hw_stats), 8837 &tp->stats_mapping, GFP_KERNEL); 8838 if (!tp->hw_stats) 8839 goto err_out; 8840 8841 for (i = 0; i < tp->irq_cnt; i++) { 8842 struct tg3_napi *tnapi = &tp->napi[i]; 8843 struct tg3_hw_status *sblk; 8844 8845 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8846 TG3_HW_STATUS_SIZE, 8847 &tnapi->status_mapping, 8848 GFP_KERNEL); 8849 if (!tnapi->hw_status) 8850 goto err_out; 8851 8852 sblk = tnapi->hw_status; 8853 8854 if (tg3_flag(tp, ENABLE_RSS)) { 8855 u16 *prodptr = NULL; 8856 8857 /* 8858 * When RSS is enabled, the status block format changes 8859 * slightly. The "rx_jumbo_consumer", "reserved", 8860 * and "rx_mini_consumer" members get mapped to the 8861 * other three rx return ring producer indexes. 8862 */ 8863 switch (i) { 8864 case 1: 8865 prodptr = &sblk->idx[0].rx_producer; 8866 break; 8867 case 2: 8868 prodptr = &sblk->rx_jumbo_consumer; 8869 break; 8870 case 3: 8871 prodptr = &sblk->reserved; 8872 break; 8873 case 4: 8874 prodptr = &sblk->rx_mini_consumer; 8875 break; 8876 } 8877 tnapi->rx_rcb_prod_idx = prodptr; 8878 } else { 8879 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8880 } 8881 } 8882 8883 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8884 goto err_out; 8885 8886 return 0; 8887 8888 err_out: 8889 tg3_free_consistent(tp); 8890 return -ENOMEM; 8891 } 8892 8893 #define MAX_WAIT_CNT 1000 8894 8895 /* To stop a block, clear the enable bit and poll till it 8896 * clears. tp->lock is held. 8897 */ 8898 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8899 { 8900 unsigned int i; 8901 u32 val; 8902 8903 if (tg3_flag(tp, 5705_PLUS)) { 8904 switch (ofs) { 8905 case RCVLSC_MODE: 8906 case DMAC_MODE: 8907 case MBFREE_MODE: 8908 case BUFMGR_MODE: 8909 case MEMARB_MODE: 8910 /* We can't enable/disable these bits of the 8911 * 5705/5750, just say success. 8912 */ 8913 return 0; 8914 8915 default: 8916 break; 8917 } 8918 } 8919 8920 val = tr32(ofs); 8921 val &= ~enable_bit; 8922 tw32_f(ofs, val); 8923 8924 for (i = 0; i < MAX_WAIT_CNT; i++) { 8925 if (pci_channel_offline(tp->pdev)) { 8926 dev_err(&tp->pdev->dev, 8927 "tg3_stop_block device offline, " 8928 "ofs=%lx enable_bit=%x\n", 8929 ofs, enable_bit); 8930 return -ENODEV; 8931 } 8932 8933 udelay(100); 8934 val = tr32(ofs); 8935 if ((val & enable_bit) == 0) 8936 break; 8937 } 8938 8939 if (i == MAX_WAIT_CNT && !silent) { 8940 dev_err(&tp->pdev->dev, 8941 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8942 ofs, enable_bit); 8943 return -ENODEV; 8944 } 8945 8946 return 0; 8947 } 8948 8949 /* tp->lock is held. */ 8950 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8951 { 8952 int i, err; 8953 8954 tg3_disable_ints(tp); 8955 8956 if (pci_channel_offline(tp->pdev)) { 8957 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8958 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8959 err = -ENODEV; 8960 goto err_no_dev; 8961 } 8962 8963 tp->rx_mode &= ~RX_MODE_ENABLE; 8964 tw32_f(MAC_RX_MODE, tp->rx_mode); 8965 udelay(10); 8966 8967 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8968 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8969 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8970 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8971 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8972 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8973 8974 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8975 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8976 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8977 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8978 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8979 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8980 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8981 8982 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8983 tw32_f(MAC_MODE, tp->mac_mode); 8984 udelay(40); 8985 8986 tp->tx_mode &= ~TX_MODE_ENABLE; 8987 tw32_f(MAC_TX_MODE, tp->tx_mode); 8988 8989 for (i = 0; i < MAX_WAIT_CNT; i++) { 8990 udelay(100); 8991 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8992 break; 8993 } 8994 if (i >= MAX_WAIT_CNT) { 8995 dev_err(&tp->pdev->dev, 8996 "%s timed out, TX_MODE_ENABLE will not clear " 8997 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8998 err |= -ENODEV; 8999 } 9000 9001 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 9002 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 9003 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 9004 9005 tw32(FTQ_RESET, 0xffffffff); 9006 tw32(FTQ_RESET, 0x00000000); 9007 9008 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 9009 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 9010 9011 err_no_dev: 9012 for (i = 0; i < tp->irq_cnt; i++) { 9013 struct tg3_napi *tnapi = &tp->napi[i]; 9014 if (tnapi->hw_status) 9015 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9016 } 9017 9018 return err; 9019 } 9020 9021 /* Save PCI command register before chip reset */ 9022 static void tg3_save_pci_state(struct tg3 *tp) 9023 { 9024 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 9025 } 9026 9027 /* Restore PCI state after chip reset */ 9028 static void tg3_restore_pci_state(struct tg3 *tp) 9029 { 9030 u32 val; 9031 9032 /* Re-enable indirect register accesses. */ 9033 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 9034 tp->misc_host_ctrl); 9035 9036 /* Set MAX PCI retry to zero. */ 9037 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 9038 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 9039 tg3_flag(tp, PCIX_MODE)) 9040 val |= PCISTATE_RETRY_SAME_DMA; 9041 /* Allow reads and writes to the APE register and memory space. */ 9042 if (tg3_flag(tp, ENABLE_APE)) 9043 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 9044 PCISTATE_ALLOW_APE_SHMEM_WR | 9045 PCISTATE_ALLOW_APE_PSPACE_WR; 9046 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 9047 9048 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 9049 9050 if (!tg3_flag(tp, PCI_EXPRESS)) { 9051 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 9052 tp->pci_cacheline_sz); 9053 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 9054 tp->pci_lat_timer); 9055 } 9056 9057 /* Make sure PCI-X relaxed ordering bit is clear. */ 9058 if (tg3_flag(tp, PCIX_MODE)) { 9059 u16 pcix_cmd; 9060 9061 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 9062 &pcix_cmd); 9063 pcix_cmd &= ~PCI_X_CMD_ERO; 9064 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 9065 pcix_cmd); 9066 } 9067 9068 if (tg3_flag(tp, 5780_CLASS)) { 9069 9070 /* Chip reset on 5780 will reset MSI enable bit, 9071 * so need to restore it. 9072 */ 9073 if (tg3_flag(tp, USING_MSI)) { 9074 u16 ctrl; 9075 9076 pci_read_config_word(tp->pdev, 9077 tp->msi_cap + PCI_MSI_FLAGS, 9078 &ctrl); 9079 pci_write_config_word(tp->pdev, 9080 tp->msi_cap + PCI_MSI_FLAGS, 9081 ctrl | PCI_MSI_FLAGS_ENABLE); 9082 val = tr32(MSGINT_MODE); 9083 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 9084 } 9085 } 9086 } 9087 9088 static void tg3_override_clk(struct tg3 *tp) 9089 { 9090 u32 val; 9091 9092 switch (tg3_asic_rev(tp)) { 9093 case ASIC_REV_5717: 9094 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9095 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9096 TG3_CPMU_MAC_ORIDE_ENABLE); 9097 break; 9098 9099 case ASIC_REV_5719: 9100 case ASIC_REV_5720: 9101 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9102 break; 9103 9104 default: 9105 return; 9106 } 9107 } 9108 9109 static void tg3_restore_clk(struct tg3 *tp) 9110 { 9111 u32 val; 9112 9113 switch (tg3_asic_rev(tp)) { 9114 case ASIC_REV_5717: 9115 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9116 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9117 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9118 break; 9119 9120 case ASIC_REV_5719: 9121 case ASIC_REV_5720: 9122 val = tr32(TG3_CPMU_CLCK_ORIDE); 9123 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9124 break; 9125 9126 default: 9127 return; 9128 } 9129 } 9130 9131 /* tp->lock is held. */ 9132 static int tg3_chip_reset(struct tg3 *tp) 9133 __releases(tp->lock) 9134 __acquires(tp->lock) 9135 { 9136 u32 val; 9137 void (*write_op)(struct tg3 *, u32, u32); 9138 int i, err; 9139 9140 if (!pci_device_is_present(tp->pdev)) 9141 return -ENODEV; 9142 9143 tg3_nvram_lock(tp); 9144 9145 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9146 9147 /* No matching tg3_nvram_unlock() after this because 9148 * chip reset below will undo the nvram lock. 9149 */ 9150 tp->nvram_lock_cnt = 0; 9151 9152 /* GRC_MISC_CFG core clock reset will clear the memory 9153 * enable bit in PCI register 4 and the MSI enable bit 9154 * on some chips, so we save relevant registers here. 9155 */ 9156 tg3_save_pci_state(tp); 9157 9158 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9159 tg3_flag(tp, 5755_PLUS)) 9160 tw32(GRC_FASTBOOT_PC, 0); 9161 9162 /* 9163 * We must avoid the readl() that normally takes place. 9164 * It locks machines, causes machine checks, and other 9165 * fun things. So, temporarily disable the 5701 9166 * hardware workaround, while we do the reset. 9167 */ 9168 write_op = tp->write32; 9169 if (write_op == tg3_write_flush_reg32) 9170 tp->write32 = tg3_write32; 9171 9172 /* Prevent the irq handler from reading or writing PCI registers 9173 * during chip reset when the memory enable bit in the PCI command 9174 * register may be cleared. The chip does not generate interrupt 9175 * at this time, but the irq handler may still be called due to irq 9176 * sharing or irqpoll. 9177 */ 9178 tg3_flag_set(tp, CHIP_RESETTING); 9179 for (i = 0; i < tp->irq_cnt; i++) { 9180 struct tg3_napi *tnapi = &tp->napi[i]; 9181 if (tnapi->hw_status) { 9182 tnapi->hw_status->status = 0; 9183 tnapi->hw_status->status_tag = 0; 9184 } 9185 tnapi->last_tag = 0; 9186 tnapi->last_irq_tag = 0; 9187 } 9188 smp_mb(); 9189 9190 tg3_full_unlock(tp); 9191 9192 for (i = 0; i < tp->irq_cnt; i++) 9193 synchronize_irq(tp->napi[i].irq_vec); 9194 9195 tg3_full_lock(tp, 0); 9196 9197 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9198 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9199 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9200 } 9201 9202 /* do the reset */ 9203 val = GRC_MISC_CFG_CORECLK_RESET; 9204 9205 if (tg3_flag(tp, PCI_EXPRESS)) { 9206 /* Force PCIe 1.0a mode */ 9207 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9208 !tg3_flag(tp, 57765_PLUS) && 9209 tr32(TG3_PCIE_PHY_TSTCTL) == 9210 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9211 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9212 9213 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9214 tw32(GRC_MISC_CFG, (1 << 29)); 9215 val |= (1 << 29); 9216 } 9217 } 9218 9219 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9220 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9221 tw32(GRC_VCPU_EXT_CTRL, 9222 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9223 } 9224 9225 /* Set the clock to the highest frequency to avoid timeouts. With link 9226 * aware mode, the clock speed could be slow and bootcode does not 9227 * complete within the expected time. Override the clock to allow the 9228 * bootcode to finish sooner and then restore it. 9229 */ 9230 tg3_override_clk(tp); 9231 9232 /* Manage gphy power for all CPMU absent PCIe devices. */ 9233 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9234 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9235 9236 tw32(GRC_MISC_CFG, val); 9237 9238 /* restore 5701 hardware bug workaround write method */ 9239 tp->write32 = write_op; 9240 9241 /* Unfortunately, we have to delay before the PCI read back. 9242 * Some 575X chips even will not respond to a PCI cfg access 9243 * when the reset command is given to the chip. 9244 * 9245 * How do these hardware designers expect things to work 9246 * properly if the PCI write is posted for a long period 9247 * of time? It is always necessary to have some method by 9248 * which a register read back can occur to push the write 9249 * out which does the reset. 9250 * 9251 * For most tg3 variants the trick below was working. 9252 * Ho hum... 9253 */ 9254 udelay(120); 9255 9256 /* Flush PCI posted writes. The normal MMIO registers 9257 * are inaccessible at this time so this is the only 9258 * way to make this reliably (actually, this is no longer 9259 * the case, see above). I tried to use indirect 9260 * register read/write but this upset some 5701 variants. 9261 */ 9262 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9263 9264 udelay(120); 9265 9266 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9267 u16 val16; 9268 9269 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9270 int j; 9271 u32 cfg_val; 9272 9273 /* Wait for link training to complete. */ 9274 for (j = 0; j < 5000; j++) 9275 udelay(100); 9276 9277 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9278 pci_write_config_dword(tp->pdev, 0xc4, 9279 cfg_val | (1 << 15)); 9280 } 9281 9282 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9283 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9284 /* 9285 * Older PCIe devices only support the 128 byte 9286 * MPS setting. Enforce the restriction. 9287 */ 9288 if (!tg3_flag(tp, CPMU_PRESENT)) 9289 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9290 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9291 9292 /* Clear error status */ 9293 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9294 PCI_EXP_DEVSTA_CED | 9295 PCI_EXP_DEVSTA_NFED | 9296 PCI_EXP_DEVSTA_FED | 9297 PCI_EXP_DEVSTA_URD); 9298 } 9299 9300 tg3_restore_pci_state(tp); 9301 9302 tg3_flag_clear(tp, CHIP_RESETTING); 9303 tg3_flag_clear(tp, ERROR_PROCESSED); 9304 9305 val = 0; 9306 if (tg3_flag(tp, 5780_CLASS)) 9307 val = tr32(MEMARB_MODE); 9308 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9309 9310 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9311 tg3_stop_fw(tp); 9312 tw32(0x5000, 0x400); 9313 } 9314 9315 if (tg3_flag(tp, IS_SSB_CORE)) { 9316 /* 9317 * BCM4785: In order to avoid repercussions from using 9318 * potentially defective internal ROM, stop the Rx RISC CPU, 9319 * which is not required. 9320 */ 9321 tg3_stop_fw(tp); 9322 tg3_halt_cpu(tp, RX_CPU_BASE); 9323 } 9324 9325 err = tg3_poll_fw(tp); 9326 if (err) 9327 return err; 9328 9329 tw32(GRC_MODE, tp->grc_mode); 9330 9331 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9332 val = tr32(0xc4); 9333 9334 tw32(0xc4, val | (1 << 15)); 9335 } 9336 9337 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9338 tg3_asic_rev(tp) == ASIC_REV_5705) { 9339 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9340 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9341 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9342 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9343 } 9344 9345 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9346 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9347 val = tp->mac_mode; 9348 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9349 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9350 val = tp->mac_mode; 9351 } else 9352 val = 0; 9353 9354 tw32_f(MAC_MODE, val); 9355 udelay(40); 9356 9357 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9358 9359 tg3_mdio_start(tp); 9360 9361 if (tg3_flag(tp, PCI_EXPRESS) && 9362 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9363 tg3_asic_rev(tp) != ASIC_REV_5785 && 9364 !tg3_flag(tp, 57765_PLUS)) { 9365 val = tr32(0x7c00); 9366 9367 tw32(0x7c00, val | (1 << 25)); 9368 } 9369 9370 tg3_restore_clk(tp); 9371 9372 /* Increase the core clock speed to fix tx timeout issue for 5762 9373 * with 100Mbps link speed. 9374 */ 9375 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 9376 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9377 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9378 TG3_CPMU_MAC_ORIDE_ENABLE); 9379 } 9380 9381 /* Reprobe ASF enable state. */ 9382 tg3_flag_clear(tp, ENABLE_ASF); 9383 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9384 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9385 9386 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9387 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9388 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9389 u32 nic_cfg; 9390 9391 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9392 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9393 tg3_flag_set(tp, ENABLE_ASF); 9394 tp->last_event_jiffies = jiffies; 9395 if (tg3_flag(tp, 5750_PLUS)) 9396 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9397 9398 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9399 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9400 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9401 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9402 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9403 } 9404 } 9405 9406 return 0; 9407 } 9408 9409 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9410 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9411 static void __tg3_set_rx_mode(struct net_device *); 9412 9413 /* tp->lock is held. */ 9414 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9415 { 9416 int err, i; 9417 9418 tg3_stop_fw(tp); 9419 9420 tg3_write_sig_pre_reset(tp, kind); 9421 9422 tg3_abort_hw(tp, silent); 9423 err = tg3_chip_reset(tp); 9424 9425 __tg3_set_mac_addr(tp, false); 9426 9427 tg3_write_sig_legacy(tp, kind); 9428 tg3_write_sig_post_reset(tp, kind); 9429 9430 if (tp->hw_stats) { 9431 /* Save the stats across chip resets... */ 9432 tg3_get_nstats(tp, &tp->net_stats_prev); 9433 tg3_get_estats(tp, &tp->estats_prev); 9434 9435 /* And make sure the next sample is new data */ 9436 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9437 9438 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) { 9439 struct tg3_napi *tnapi = &tp->napi[i]; 9440 9441 tnapi->rx_dropped = 0; 9442 tnapi->tx_dropped = 0; 9443 } 9444 } 9445 9446 return err; 9447 } 9448 9449 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9450 { 9451 struct tg3 *tp = netdev_priv(dev); 9452 struct sockaddr *addr = p; 9453 int err = 0; 9454 bool skip_mac_1 = false; 9455 9456 if (!is_valid_ether_addr(addr->sa_data)) 9457 return -EADDRNOTAVAIL; 9458 9459 eth_hw_addr_set(dev, addr->sa_data); 9460 9461 if (!netif_running(dev)) 9462 return 0; 9463 9464 if (tg3_flag(tp, ENABLE_ASF)) { 9465 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9466 9467 addr0_high = tr32(MAC_ADDR_0_HIGH); 9468 addr0_low = tr32(MAC_ADDR_0_LOW); 9469 addr1_high = tr32(MAC_ADDR_1_HIGH); 9470 addr1_low = tr32(MAC_ADDR_1_LOW); 9471 9472 /* Skip MAC addr 1 if ASF is using it. */ 9473 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9474 !(addr1_high == 0 && addr1_low == 0)) 9475 skip_mac_1 = true; 9476 } 9477 spin_lock_bh(&tp->lock); 9478 __tg3_set_mac_addr(tp, skip_mac_1); 9479 __tg3_set_rx_mode(dev); 9480 spin_unlock_bh(&tp->lock); 9481 9482 return err; 9483 } 9484 9485 /* tp->lock is held. */ 9486 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9487 dma_addr_t mapping, u32 maxlen_flags, 9488 u32 nic_addr) 9489 { 9490 tg3_write_mem(tp, 9491 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9492 ((u64) mapping >> 32)); 9493 tg3_write_mem(tp, 9494 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9495 ((u64) mapping & 0xffffffff)); 9496 tg3_write_mem(tp, 9497 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9498 maxlen_flags); 9499 9500 if (!tg3_flag(tp, 5705_PLUS)) 9501 tg3_write_mem(tp, 9502 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9503 nic_addr); 9504 } 9505 9506 9507 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9508 { 9509 int i = 0; 9510 9511 if (!tg3_flag(tp, ENABLE_TSS)) { 9512 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9513 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9514 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9515 } else { 9516 tw32(HOSTCC_TXCOL_TICKS, 0); 9517 tw32(HOSTCC_TXMAX_FRAMES, 0); 9518 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9519 9520 for (; i < tp->txq_cnt; i++) { 9521 u32 reg; 9522 9523 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9524 tw32(reg, ec->tx_coalesce_usecs); 9525 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9526 tw32(reg, ec->tx_max_coalesced_frames); 9527 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9528 tw32(reg, ec->tx_max_coalesced_frames_irq); 9529 } 9530 } 9531 9532 for (; i < tp->irq_max - 1; i++) { 9533 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9534 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9535 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9536 } 9537 } 9538 9539 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9540 { 9541 int i = 0; 9542 u32 limit = tp->rxq_cnt; 9543 9544 if (!tg3_flag(tp, ENABLE_RSS)) { 9545 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9546 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9547 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9548 limit--; 9549 } else { 9550 tw32(HOSTCC_RXCOL_TICKS, 0); 9551 tw32(HOSTCC_RXMAX_FRAMES, 0); 9552 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9553 } 9554 9555 for (; i < limit; i++) { 9556 u32 reg; 9557 9558 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9559 tw32(reg, ec->rx_coalesce_usecs); 9560 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9561 tw32(reg, ec->rx_max_coalesced_frames); 9562 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9563 tw32(reg, ec->rx_max_coalesced_frames_irq); 9564 } 9565 9566 for (; i < tp->irq_max - 1; i++) { 9567 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9568 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9569 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9570 } 9571 } 9572 9573 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9574 { 9575 tg3_coal_tx_init(tp, ec); 9576 tg3_coal_rx_init(tp, ec); 9577 9578 if (!tg3_flag(tp, 5705_PLUS)) { 9579 u32 val = ec->stats_block_coalesce_usecs; 9580 9581 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9582 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9583 9584 if (!tp->link_up) 9585 val = 0; 9586 9587 tw32(HOSTCC_STAT_COAL_TICKS, val); 9588 } 9589 } 9590 9591 /* tp->lock is held. */ 9592 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9593 { 9594 u32 txrcb, limit; 9595 9596 /* Disable all transmit rings but the first. */ 9597 if (!tg3_flag(tp, 5705_PLUS)) 9598 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9599 else if (tg3_flag(tp, 5717_PLUS)) 9600 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9601 else if (tg3_flag(tp, 57765_CLASS) || 9602 tg3_asic_rev(tp) == ASIC_REV_5762) 9603 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9604 else 9605 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9606 9607 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9608 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9609 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9610 BDINFO_FLAGS_DISABLED); 9611 } 9612 9613 /* tp->lock is held. */ 9614 static void tg3_tx_rcbs_init(struct tg3 *tp) 9615 { 9616 int i = 0; 9617 u32 txrcb = NIC_SRAM_SEND_RCB; 9618 9619 if (tg3_flag(tp, ENABLE_TSS)) 9620 i++; 9621 9622 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9623 struct tg3_napi *tnapi = &tp->napi[i]; 9624 9625 if (!tnapi->tx_ring) 9626 continue; 9627 9628 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9629 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9630 NIC_SRAM_TX_BUFFER_DESC); 9631 } 9632 } 9633 9634 /* tp->lock is held. */ 9635 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9636 { 9637 u32 rxrcb, limit; 9638 9639 /* Disable all receive return rings but the first. */ 9640 if (tg3_flag(tp, 5717_PLUS)) 9641 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9642 else if (!tg3_flag(tp, 5705_PLUS)) 9643 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9644 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9645 tg3_asic_rev(tp) == ASIC_REV_5762 || 9646 tg3_flag(tp, 57765_CLASS)) 9647 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9648 else 9649 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9650 9651 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9652 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9653 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9654 BDINFO_FLAGS_DISABLED); 9655 } 9656 9657 /* tp->lock is held. */ 9658 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9659 { 9660 int i = 0; 9661 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9662 9663 if (tg3_flag(tp, ENABLE_RSS)) 9664 i++; 9665 9666 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9667 struct tg3_napi *tnapi = &tp->napi[i]; 9668 9669 if (!tnapi->rx_rcb) 9670 continue; 9671 9672 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9673 (tp->rx_ret_ring_mask + 1) << 9674 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9675 } 9676 } 9677 9678 /* tp->lock is held. */ 9679 static void tg3_rings_reset(struct tg3 *tp) 9680 { 9681 int i; 9682 u32 stblk; 9683 struct tg3_napi *tnapi = &tp->napi[0]; 9684 9685 tg3_tx_rcbs_disable(tp); 9686 9687 tg3_rx_ret_rcbs_disable(tp); 9688 9689 /* Disable interrupts */ 9690 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9691 tp->napi[0].chk_msi_cnt = 0; 9692 tp->napi[0].last_rx_cons = 0; 9693 tp->napi[0].last_tx_cons = 0; 9694 9695 /* Zero mailbox registers. */ 9696 if (tg3_flag(tp, SUPPORT_MSIX)) { 9697 for (i = 1; i < tp->irq_max; i++) { 9698 tp->napi[i].tx_prod = 0; 9699 tp->napi[i].tx_cons = 0; 9700 if (tg3_flag(tp, ENABLE_TSS)) 9701 tw32_mailbox(tp->napi[i].prodmbox, 0); 9702 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9703 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9704 tp->napi[i].chk_msi_cnt = 0; 9705 tp->napi[i].last_rx_cons = 0; 9706 tp->napi[i].last_tx_cons = 0; 9707 } 9708 if (!tg3_flag(tp, ENABLE_TSS)) 9709 tw32_mailbox(tp->napi[0].prodmbox, 0); 9710 } else { 9711 tp->napi[0].tx_prod = 0; 9712 tp->napi[0].tx_cons = 0; 9713 tw32_mailbox(tp->napi[0].prodmbox, 0); 9714 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9715 } 9716 9717 /* Make sure the NIC-based send BD rings are disabled. */ 9718 if (!tg3_flag(tp, 5705_PLUS)) { 9719 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9720 for (i = 0; i < 16; i++) 9721 tw32_tx_mbox(mbox + i * 8, 0); 9722 } 9723 9724 /* Clear status block in ram. */ 9725 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9726 9727 /* Set status block DMA address */ 9728 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9729 ((u64) tnapi->status_mapping >> 32)); 9730 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9731 ((u64) tnapi->status_mapping & 0xffffffff)); 9732 9733 stblk = HOSTCC_STATBLCK_RING1; 9734 9735 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9736 u64 mapping = (u64)tnapi->status_mapping; 9737 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9738 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9739 stblk += 8; 9740 9741 /* Clear status block in ram. */ 9742 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9743 } 9744 9745 tg3_tx_rcbs_init(tp); 9746 tg3_rx_ret_rcbs_init(tp); 9747 } 9748 9749 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9750 { 9751 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9752 9753 if (!tg3_flag(tp, 5750_PLUS) || 9754 tg3_flag(tp, 5780_CLASS) || 9755 tg3_asic_rev(tp) == ASIC_REV_5750 || 9756 tg3_asic_rev(tp) == ASIC_REV_5752 || 9757 tg3_flag(tp, 57765_PLUS)) 9758 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9759 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9760 tg3_asic_rev(tp) == ASIC_REV_5787) 9761 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9762 else 9763 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9764 9765 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9766 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9767 9768 val = min(nic_rep_thresh, host_rep_thresh); 9769 tw32(RCVBDI_STD_THRESH, val); 9770 9771 if (tg3_flag(tp, 57765_PLUS)) 9772 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9773 9774 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9775 return; 9776 9777 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9778 9779 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9780 9781 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9782 tw32(RCVBDI_JUMBO_THRESH, val); 9783 9784 if (tg3_flag(tp, 57765_PLUS)) 9785 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9786 } 9787 9788 static inline u32 calc_crc(unsigned char *buf, int len) 9789 { 9790 u32 reg; 9791 u32 tmp; 9792 int j, k; 9793 9794 reg = 0xffffffff; 9795 9796 for (j = 0; j < len; j++) { 9797 reg ^= buf[j]; 9798 9799 for (k = 0; k < 8; k++) { 9800 tmp = reg & 0x01; 9801 9802 reg >>= 1; 9803 9804 if (tmp) 9805 reg ^= CRC32_POLY_LE; 9806 } 9807 } 9808 9809 return ~reg; 9810 } 9811 9812 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9813 { 9814 /* accept or reject all multicast frames */ 9815 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9816 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9817 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9818 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9819 } 9820 9821 static void __tg3_set_rx_mode(struct net_device *dev) 9822 { 9823 struct tg3 *tp = netdev_priv(dev); 9824 u32 rx_mode; 9825 9826 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9827 RX_MODE_KEEP_VLAN_TAG); 9828 9829 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9830 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9831 * flag clear. 9832 */ 9833 if (!tg3_flag(tp, ENABLE_ASF)) 9834 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9835 #endif 9836 9837 if (dev->flags & IFF_PROMISC) { 9838 /* Promiscuous mode. */ 9839 rx_mode |= RX_MODE_PROMISC; 9840 } else if (dev->flags & IFF_ALLMULTI) { 9841 /* Accept all multicast. */ 9842 tg3_set_multi(tp, 1); 9843 } else if (netdev_mc_empty(dev)) { 9844 /* Reject all multicast. */ 9845 tg3_set_multi(tp, 0); 9846 } else { 9847 /* Accept one or more multicast(s). */ 9848 struct netdev_hw_addr *ha; 9849 u32 mc_filter[4] = { 0, }; 9850 u32 regidx; 9851 u32 bit; 9852 u32 crc; 9853 9854 netdev_for_each_mc_addr(ha, dev) { 9855 crc = calc_crc(ha->addr, ETH_ALEN); 9856 bit = ~crc & 0x7f; 9857 regidx = (bit & 0x60) >> 5; 9858 bit &= 0x1f; 9859 mc_filter[regidx] |= (1 << bit); 9860 } 9861 9862 tw32(MAC_HASH_REG_0, mc_filter[0]); 9863 tw32(MAC_HASH_REG_1, mc_filter[1]); 9864 tw32(MAC_HASH_REG_2, mc_filter[2]); 9865 tw32(MAC_HASH_REG_3, mc_filter[3]); 9866 } 9867 9868 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9869 rx_mode |= RX_MODE_PROMISC; 9870 } else if (!(dev->flags & IFF_PROMISC)) { 9871 /* Add all entries into to the mac addr filter list */ 9872 int i = 0; 9873 struct netdev_hw_addr *ha; 9874 9875 netdev_for_each_uc_addr(ha, dev) { 9876 __tg3_set_one_mac_addr(tp, ha->addr, 9877 i + TG3_UCAST_ADDR_IDX(tp)); 9878 i++; 9879 } 9880 } 9881 9882 if (rx_mode != tp->rx_mode) { 9883 tp->rx_mode = rx_mode; 9884 tw32_f(MAC_RX_MODE, rx_mode); 9885 udelay(10); 9886 } 9887 } 9888 9889 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9890 { 9891 int i; 9892 9893 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9894 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9895 } 9896 9897 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9898 { 9899 int i; 9900 9901 if (!tg3_flag(tp, SUPPORT_MSIX)) 9902 return; 9903 9904 if (tp->rxq_cnt == 1) { 9905 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9906 return; 9907 } 9908 9909 /* Validate table against current IRQ count */ 9910 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9911 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9912 break; 9913 } 9914 9915 if (i != TG3_RSS_INDIR_TBL_SIZE) 9916 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9917 } 9918 9919 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9920 { 9921 int i = 0; 9922 u32 reg = MAC_RSS_INDIR_TBL_0; 9923 9924 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9925 u32 val = tp->rss_ind_tbl[i]; 9926 i++; 9927 for (; i % 8; i++) { 9928 val <<= 4; 9929 val |= tp->rss_ind_tbl[i]; 9930 } 9931 tw32(reg, val); 9932 reg += 4; 9933 } 9934 } 9935 9936 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9937 { 9938 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9939 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9940 else 9941 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9942 } 9943 9944 /* tp->lock is held. */ 9945 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9946 { 9947 u32 val, rdmac_mode; 9948 int i, err, limit; 9949 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9950 9951 tg3_disable_ints(tp); 9952 9953 tg3_stop_fw(tp); 9954 9955 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9956 9957 if (tg3_flag(tp, INIT_COMPLETE)) 9958 tg3_abort_hw(tp, 1); 9959 9960 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9961 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9962 tg3_phy_pull_config(tp); 9963 tg3_eee_pull_config(tp, NULL); 9964 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9965 } 9966 9967 /* Enable MAC control of LPI */ 9968 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9969 tg3_setup_eee(tp); 9970 9971 if (reset_phy) 9972 tg3_phy_reset(tp); 9973 9974 err = tg3_chip_reset(tp); 9975 if (err) 9976 return err; 9977 9978 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9979 9980 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9981 val = tr32(TG3_CPMU_CTRL); 9982 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9983 tw32(TG3_CPMU_CTRL, val); 9984 9985 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9986 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9987 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9988 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9989 9990 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9991 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9992 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9993 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9994 9995 val = tr32(TG3_CPMU_HST_ACC); 9996 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9997 val |= CPMU_HST_ACC_MACCLK_6_25; 9998 tw32(TG3_CPMU_HST_ACC, val); 9999 } 10000 10001 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 10002 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 10003 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 10004 PCIE_PWR_MGMT_L1_THRESH_4MS; 10005 tw32(PCIE_PWR_MGMT_THRESH, val); 10006 10007 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 10008 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 10009 10010 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 10011 10012 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 10013 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 10014 } 10015 10016 if (tg3_flag(tp, L1PLLPD_EN)) { 10017 u32 grc_mode = tr32(GRC_MODE); 10018 10019 /* Access the lower 1K of PL PCIE block registers. */ 10020 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 10021 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 10022 10023 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 10024 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 10025 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 10026 10027 tw32(GRC_MODE, grc_mode); 10028 } 10029 10030 if (tg3_flag(tp, 57765_CLASS)) { 10031 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 10032 u32 grc_mode = tr32(GRC_MODE); 10033 10034 /* Access the lower 1K of PL PCIE block registers. */ 10035 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 10036 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 10037 10038 val = tr32(TG3_PCIE_TLDLPL_PORT + 10039 TG3_PCIE_PL_LO_PHYCTL5); 10040 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 10041 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 10042 10043 tw32(GRC_MODE, grc_mode); 10044 } 10045 10046 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 10047 u32 grc_mode; 10048 10049 /* Fix transmit hangs */ 10050 val = tr32(TG3_CPMU_PADRNG_CTL); 10051 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 10052 tw32(TG3_CPMU_PADRNG_CTL, val); 10053 10054 grc_mode = tr32(GRC_MODE); 10055 10056 /* Access the lower 1K of DL PCIE block registers. */ 10057 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 10058 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 10059 10060 val = tr32(TG3_PCIE_TLDLPL_PORT + 10061 TG3_PCIE_DL_LO_FTSMAX); 10062 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 10063 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 10064 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 10065 10066 tw32(GRC_MODE, grc_mode); 10067 } 10068 10069 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 10070 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 10071 val |= CPMU_LSPD_10MB_MACCLK_6_25; 10072 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 10073 } 10074 10075 /* This works around an issue with Athlon chipsets on 10076 * B3 tigon3 silicon. This bit has no effect on any 10077 * other revision. But do not set this on PCI Express 10078 * chips and don't even touch the clocks if the CPMU is present. 10079 */ 10080 if (!tg3_flag(tp, CPMU_PRESENT)) { 10081 if (!tg3_flag(tp, PCI_EXPRESS)) 10082 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 10083 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 10084 } 10085 10086 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 10087 tg3_flag(tp, PCIX_MODE)) { 10088 val = tr32(TG3PCI_PCISTATE); 10089 val |= PCISTATE_RETRY_SAME_DMA; 10090 tw32(TG3PCI_PCISTATE, val); 10091 } 10092 10093 if (tg3_flag(tp, ENABLE_APE)) { 10094 /* Allow reads and writes to the 10095 * APE register and memory space. 10096 */ 10097 val = tr32(TG3PCI_PCISTATE); 10098 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 10099 PCISTATE_ALLOW_APE_SHMEM_WR | 10100 PCISTATE_ALLOW_APE_PSPACE_WR; 10101 tw32(TG3PCI_PCISTATE, val); 10102 } 10103 10104 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 10105 /* Enable some hw fixes. */ 10106 val = tr32(TG3PCI_MSI_DATA); 10107 val |= (1 << 26) | (1 << 28) | (1 << 29); 10108 tw32(TG3PCI_MSI_DATA, val); 10109 } 10110 10111 /* Descriptor ring init may make accesses to the 10112 * NIC SRAM area to setup the TX descriptors, so we 10113 * can only do this after the hardware has been 10114 * successfully reset. 10115 */ 10116 err = tg3_init_rings(tp); 10117 if (err) 10118 return err; 10119 10120 if (tg3_flag(tp, 57765_PLUS)) { 10121 val = tr32(TG3PCI_DMA_RW_CTRL) & 10122 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 10123 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 10124 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 10125 if (!tg3_flag(tp, 57765_CLASS) && 10126 tg3_asic_rev(tp) != ASIC_REV_5717 && 10127 tg3_asic_rev(tp) != ASIC_REV_5762) 10128 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10129 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10130 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10131 tg3_asic_rev(tp) != ASIC_REV_5761) { 10132 /* This value is determined during the probe time DMA 10133 * engine test, tg3_test_dma. 10134 */ 10135 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10136 } 10137 10138 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10139 GRC_MODE_4X_NIC_SEND_RINGS | 10140 GRC_MODE_NO_TX_PHDR_CSUM | 10141 GRC_MODE_NO_RX_PHDR_CSUM); 10142 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10143 10144 /* Pseudo-header checksum is done by hardware logic and not 10145 * the offload processers, so make the chip do the pseudo- 10146 * header checksums on receive. For transmit it is more 10147 * convenient to do the pseudo-header checksum in software 10148 * as Linux does that on transmit for us in all cases. 10149 */ 10150 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10151 10152 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10153 if (tp->rxptpctl) 10154 tw32(TG3_RX_PTP_CTL, 10155 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10156 10157 if (tg3_flag(tp, PTP_CAPABLE)) 10158 val |= GRC_MODE_TIME_SYNC_ENABLE; 10159 10160 tw32(GRC_MODE, tp->grc_mode | val); 10161 10162 /* On one of the AMD platform, MRRS is restricted to 4000 because of 10163 * south bridge limitation. As a workaround, Driver is setting MRRS 10164 * to 2048 instead of default 4096. 10165 */ 10166 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10167 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { 10168 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; 10169 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); 10170 } 10171 10172 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10173 val = tr32(GRC_MISC_CFG); 10174 val &= ~0xff; 10175 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10176 tw32(GRC_MISC_CFG, val); 10177 10178 /* Initialize MBUF/DESC pool. */ 10179 if (tg3_flag(tp, 5750_PLUS)) { 10180 /* Do nothing. */ 10181 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10182 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10183 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10184 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10185 else 10186 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10187 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10188 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10189 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10190 int fw_len; 10191 10192 fw_len = tp->fw_len; 10193 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10194 tw32(BUFMGR_MB_POOL_ADDR, 10195 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10196 tw32(BUFMGR_MB_POOL_SIZE, 10197 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10198 } 10199 10200 if (tp->dev->mtu <= ETH_DATA_LEN) { 10201 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10202 tp->bufmgr_config.mbuf_read_dma_low_water); 10203 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10204 tp->bufmgr_config.mbuf_mac_rx_low_water); 10205 tw32(BUFMGR_MB_HIGH_WATER, 10206 tp->bufmgr_config.mbuf_high_water); 10207 } else { 10208 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10209 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10210 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10211 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10212 tw32(BUFMGR_MB_HIGH_WATER, 10213 tp->bufmgr_config.mbuf_high_water_jumbo); 10214 } 10215 tw32(BUFMGR_DMA_LOW_WATER, 10216 tp->bufmgr_config.dma_low_water); 10217 tw32(BUFMGR_DMA_HIGH_WATER, 10218 tp->bufmgr_config.dma_high_water); 10219 10220 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10221 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10222 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10223 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10224 tg3_asic_rev(tp) == ASIC_REV_5762 || 10225 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10226 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10227 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10228 tw32(BUFMGR_MODE, val); 10229 for (i = 0; i < 2000; i++) { 10230 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10231 break; 10232 udelay(10); 10233 } 10234 if (i >= 2000) { 10235 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10236 return -ENODEV; 10237 } 10238 10239 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10240 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10241 10242 tg3_setup_rxbd_thresholds(tp); 10243 10244 /* Initialize TG3_BDINFO's at: 10245 * RCVDBDI_STD_BD: standard eth size rx ring 10246 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10247 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10248 * 10249 * like so: 10250 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10251 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10252 * ring attribute flags 10253 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10254 * 10255 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10256 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10257 * 10258 * The size of each ring is fixed in the firmware, but the location is 10259 * configurable. 10260 */ 10261 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10262 ((u64) tpr->rx_std_mapping >> 32)); 10263 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10264 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10265 if (!tg3_flag(tp, 5717_PLUS)) 10266 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10267 NIC_SRAM_RX_BUFFER_DESC); 10268 10269 /* Disable the mini ring */ 10270 if (!tg3_flag(tp, 5705_PLUS)) 10271 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10272 BDINFO_FLAGS_DISABLED); 10273 10274 /* Program the jumbo buffer descriptor ring control 10275 * blocks on those devices that have them. 10276 */ 10277 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10278 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10279 10280 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10281 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10282 ((u64) tpr->rx_jmb_mapping >> 32)); 10283 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10284 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10285 val = TG3_RX_JMB_RING_SIZE(tp) << 10286 BDINFO_FLAGS_MAXLEN_SHIFT; 10287 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10288 val | BDINFO_FLAGS_USE_EXT_RECV); 10289 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10290 tg3_flag(tp, 57765_CLASS) || 10291 tg3_asic_rev(tp) == ASIC_REV_5762) 10292 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10293 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10294 } else { 10295 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10296 BDINFO_FLAGS_DISABLED); 10297 } 10298 10299 if (tg3_flag(tp, 57765_PLUS)) { 10300 val = TG3_RX_STD_RING_SIZE(tp); 10301 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10302 val |= (TG3_RX_STD_DMA_SZ << 2); 10303 } else 10304 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10305 } else 10306 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10307 10308 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10309 10310 tpr->rx_std_prod_idx = tp->rx_pending; 10311 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10312 10313 tpr->rx_jmb_prod_idx = 10314 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10315 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10316 10317 tg3_rings_reset(tp); 10318 10319 /* Initialize MAC address and backoff seed. */ 10320 __tg3_set_mac_addr(tp, false); 10321 10322 /* MTU + ethernet header + FCS + optional VLAN tag */ 10323 tw32(MAC_RX_MTU_SIZE, 10324 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10325 10326 /* The slot time is changed by tg3_setup_phy if we 10327 * run at gigabit with half duplex. 10328 */ 10329 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10330 (6 << TX_LENGTHS_IPG_SHIFT) | 10331 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10332 10333 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10334 tg3_asic_rev(tp) == ASIC_REV_5762) 10335 val |= tr32(MAC_TX_LENGTHS) & 10336 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10337 TX_LENGTHS_CNT_DWN_VAL_MSK); 10338 10339 tw32(MAC_TX_LENGTHS, val); 10340 10341 /* Receive rules. */ 10342 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10343 tw32(RCVLPC_CONFIG, 0x0181); 10344 10345 /* Calculate RDMAC_MODE setting early, we need it to determine 10346 * the RCVLPC_STATE_ENABLE mask. 10347 */ 10348 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10349 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10350 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10351 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10352 RDMAC_MODE_LNGREAD_ENAB); 10353 10354 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10355 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10356 10357 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10358 tg3_asic_rev(tp) == ASIC_REV_5785 || 10359 tg3_asic_rev(tp) == ASIC_REV_57780) 10360 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10361 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10362 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10363 10364 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10365 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10366 if (tg3_flag(tp, TSO_CAPABLE)) { 10367 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10368 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10369 !tg3_flag(tp, IS_5788)) { 10370 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10371 } 10372 } 10373 10374 if (tg3_flag(tp, PCI_EXPRESS)) 10375 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10376 10377 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10378 tp->dma_limit = 0; 10379 if (tp->dev->mtu <= ETH_DATA_LEN) { 10380 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10381 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10382 } 10383 } 10384 10385 if (tg3_flag(tp, HW_TSO_1) || 10386 tg3_flag(tp, HW_TSO_2) || 10387 tg3_flag(tp, HW_TSO_3)) 10388 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10389 10390 if (tg3_flag(tp, 57765_PLUS) || 10391 tg3_asic_rev(tp) == ASIC_REV_5785 || 10392 tg3_asic_rev(tp) == ASIC_REV_57780) 10393 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10394 10395 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10396 tg3_asic_rev(tp) == ASIC_REV_5762) 10397 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10398 10399 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10400 tg3_asic_rev(tp) == ASIC_REV_5784 || 10401 tg3_asic_rev(tp) == ASIC_REV_5785 || 10402 tg3_asic_rev(tp) == ASIC_REV_57780 || 10403 tg3_flag(tp, 57765_PLUS)) { 10404 u32 tgtreg; 10405 10406 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10407 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10408 else 10409 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10410 10411 val = tr32(tgtreg); 10412 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10413 tg3_asic_rev(tp) == ASIC_REV_5762) { 10414 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10415 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10416 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10417 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10418 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10419 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10420 } 10421 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10422 } 10423 10424 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10425 tg3_asic_rev(tp) == ASIC_REV_5720 || 10426 tg3_asic_rev(tp) == ASIC_REV_5762) { 10427 u32 tgtreg; 10428 10429 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10430 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10431 else 10432 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10433 10434 val = tr32(tgtreg); 10435 tw32(tgtreg, val | 10436 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10437 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10438 } 10439 10440 /* Receive/send statistics. */ 10441 if (tg3_flag(tp, 5750_PLUS)) { 10442 val = tr32(RCVLPC_STATS_ENABLE); 10443 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10444 tw32(RCVLPC_STATS_ENABLE, val); 10445 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10446 tg3_flag(tp, TSO_CAPABLE)) { 10447 val = tr32(RCVLPC_STATS_ENABLE); 10448 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10449 tw32(RCVLPC_STATS_ENABLE, val); 10450 } else { 10451 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10452 } 10453 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10454 tw32(SNDDATAI_STATSENAB, 0xffffff); 10455 tw32(SNDDATAI_STATSCTRL, 10456 (SNDDATAI_SCTRL_ENABLE | 10457 SNDDATAI_SCTRL_FASTUPD)); 10458 10459 /* Setup host coalescing engine. */ 10460 tw32(HOSTCC_MODE, 0); 10461 for (i = 0; i < 2000; i++) { 10462 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10463 break; 10464 udelay(10); 10465 } 10466 10467 __tg3_set_coalesce(tp, &tp->coal); 10468 10469 if (!tg3_flag(tp, 5705_PLUS)) { 10470 /* Status/statistics block address. See tg3_timer, 10471 * the tg3_periodic_fetch_stats call there, and 10472 * tg3_get_stats to see how this works for 5705/5750 chips. 10473 */ 10474 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10475 ((u64) tp->stats_mapping >> 32)); 10476 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10477 ((u64) tp->stats_mapping & 0xffffffff)); 10478 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10479 10480 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10481 10482 /* Clear statistics and status block memory areas */ 10483 for (i = NIC_SRAM_STATS_BLK; 10484 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10485 i += sizeof(u32)) { 10486 tg3_write_mem(tp, i, 0); 10487 udelay(40); 10488 } 10489 } 10490 10491 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10492 10493 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10494 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10495 if (!tg3_flag(tp, 5705_PLUS)) 10496 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10497 10498 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10499 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10500 /* reset to prevent losing 1st rx packet intermittently */ 10501 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10502 udelay(10); 10503 } 10504 10505 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10506 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10507 MAC_MODE_FHDE_ENABLE; 10508 if (tg3_flag(tp, ENABLE_APE)) 10509 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10510 if (!tg3_flag(tp, 5705_PLUS) && 10511 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10512 tg3_asic_rev(tp) != ASIC_REV_5700) 10513 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10514 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10515 udelay(40); 10516 10517 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10518 * If TG3_FLAG_IS_NIC is zero, we should read the 10519 * register to preserve the GPIO settings for LOMs. The GPIOs, 10520 * whether used as inputs or outputs, are set by boot code after 10521 * reset. 10522 */ 10523 if (!tg3_flag(tp, IS_NIC)) { 10524 u32 gpio_mask; 10525 10526 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10527 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10528 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10529 10530 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10531 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10532 GRC_LCLCTRL_GPIO_OUTPUT3; 10533 10534 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10535 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10536 10537 tp->grc_local_ctrl &= ~gpio_mask; 10538 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10539 10540 /* GPIO1 must be driven high for eeprom write protect */ 10541 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10542 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10543 GRC_LCLCTRL_GPIO_OUTPUT1); 10544 } 10545 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10546 udelay(100); 10547 10548 if (tg3_flag(tp, USING_MSIX)) { 10549 val = tr32(MSGINT_MODE); 10550 val |= MSGINT_MODE_ENABLE; 10551 if (tp->irq_cnt > 1) 10552 val |= MSGINT_MODE_MULTIVEC_EN; 10553 if (!tg3_flag(tp, 1SHOT_MSI)) 10554 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10555 tw32(MSGINT_MODE, val); 10556 } 10557 10558 if (!tg3_flag(tp, 5705_PLUS)) { 10559 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10560 udelay(40); 10561 } 10562 10563 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10564 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10565 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10566 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10567 WDMAC_MODE_LNGREAD_ENAB); 10568 10569 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10570 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10571 if (tg3_flag(tp, TSO_CAPABLE) && 10572 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10573 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10574 /* nothing */ 10575 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10576 !tg3_flag(tp, IS_5788)) { 10577 val |= WDMAC_MODE_RX_ACCEL; 10578 } 10579 } 10580 10581 /* Enable host coalescing bug fix */ 10582 if (tg3_flag(tp, 5755_PLUS)) 10583 val |= WDMAC_MODE_STATUS_TAG_FIX; 10584 10585 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10586 val |= WDMAC_MODE_BURST_ALL_DATA; 10587 10588 tw32_f(WDMAC_MODE, val); 10589 udelay(40); 10590 10591 if (tg3_flag(tp, PCIX_MODE)) { 10592 u16 pcix_cmd; 10593 10594 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10595 &pcix_cmd); 10596 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10597 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10598 pcix_cmd |= PCI_X_CMD_READ_2K; 10599 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10600 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10601 pcix_cmd |= PCI_X_CMD_READ_2K; 10602 } 10603 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10604 pcix_cmd); 10605 } 10606 10607 tw32_f(RDMAC_MODE, rdmac_mode); 10608 udelay(40); 10609 10610 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10611 tg3_asic_rev(tp) == ASIC_REV_5720) { 10612 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10613 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10614 break; 10615 } 10616 if (i < TG3_NUM_RDMA_CHANNELS) { 10617 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10618 val |= tg3_lso_rd_dma_workaround_bit(tp); 10619 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10620 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10621 } 10622 } 10623 10624 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10625 if (!tg3_flag(tp, 5705_PLUS)) 10626 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10627 10628 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10629 tw32(SNDDATAC_MODE, 10630 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10631 else 10632 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10633 10634 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10635 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10636 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10637 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10638 val |= RCVDBDI_MODE_LRG_RING_SZ; 10639 tw32(RCVDBDI_MODE, val); 10640 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10641 if (tg3_flag(tp, HW_TSO_1) || 10642 tg3_flag(tp, HW_TSO_2) || 10643 tg3_flag(tp, HW_TSO_3)) 10644 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10645 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10646 if (tg3_flag(tp, ENABLE_TSS)) 10647 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10648 tw32(SNDBDI_MODE, val); 10649 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10650 10651 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10652 err = tg3_load_5701_a0_firmware_fix(tp); 10653 if (err) 10654 return err; 10655 } 10656 10657 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10658 /* Ignore any errors for the firmware download. If download 10659 * fails, the device will operate with EEE disabled 10660 */ 10661 tg3_load_57766_firmware(tp); 10662 } 10663 10664 if (tg3_flag(tp, TSO_CAPABLE)) { 10665 err = tg3_load_tso_firmware(tp); 10666 if (err) 10667 return err; 10668 } 10669 10670 tp->tx_mode = TX_MODE_ENABLE; 10671 10672 if (tg3_flag(tp, 5755_PLUS) || 10673 tg3_asic_rev(tp) == ASIC_REV_5906) 10674 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10675 10676 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10677 tg3_asic_rev(tp) == ASIC_REV_5762) { 10678 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10679 tp->tx_mode &= ~val; 10680 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10681 } 10682 10683 tw32_f(MAC_TX_MODE, tp->tx_mode); 10684 udelay(100); 10685 10686 if (tg3_flag(tp, ENABLE_RSS)) { 10687 u32 rss_key[10]; 10688 10689 tg3_rss_write_indir_tbl(tp); 10690 10691 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10692 10693 for (i = 0; i < 10 ; i++) 10694 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10695 } 10696 10697 tp->rx_mode = RX_MODE_ENABLE; 10698 if (tg3_flag(tp, 5755_PLUS)) 10699 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10700 10701 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10702 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10703 10704 if (tg3_flag(tp, ENABLE_RSS)) 10705 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10706 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10707 RX_MODE_RSS_IPV6_HASH_EN | 10708 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10709 RX_MODE_RSS_IPV4_HASH_EN | 10710 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10711 10712 tw32_f(MAC_RX_MODE, tp->rx_mode); 10713 udelay(10); 10714 10715 tw32(MAC_LED_CTRL, tp->led_ctrl); 10716 10717 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10718 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10719 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10720 udelay(10); 10721 } 10722 tw32_f(MAC_RX_MODE, tp->rx_mode); 10723 udelay(10); 10724 10725 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10726 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10727 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10728 /* Set drive transmission level to 1.2V */ 10729 /* only if the signal pre-emphasis bit is not set */ 10730 val = tr32(MAC_SERDES_CFG); 10731 val &= 0xfffff000; 10732 val |= 0x880; 10733 tw32(MAC_SERDES_CFG, val); 10734 } 10735 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10736 tw32(MAC_SERDES_CFG, 0x616000); 10737 } 10738 10739 /* Prevent chip from dropping frames when flow control 10740 * is enabled. 10741 */ 10742 if (tg3_flag(tp, 57765_CLASS)) 10743 val = 1; 10744 else 10745 val = 2; 10746 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10747 10748 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10749 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10750 /* Use hardware link auto-negotiation */ 10751 tg3_flag_set(tp, HW_AUTONEG); 10752 } 10753 10754 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10755 tg3_asic_rev(tp) == ASIC_REV_5714) { 10756 u32 tmp; 10757 10758 tmp = tr32(SERDES_RX_CTRL); 10759 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10760 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10761 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10762 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10763 } 10764 10765 if (!tg3_flag(tp, USE_PHYLIB)) { 10766 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10767 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10768 10769 err = tg3_setup_phy(tp, false); 10770 if (err) 10771 return err; 10772 10773 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10774 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10775 u32 tmp; 10776 10777 /* Clear CRC stats. */ 10778 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10779 tg3_writephy(tp, MII_TG3_TEST1, 10780 tmp | MII_TG3_TEST1_CRC_EN); 10781 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10782 } 10783 } 10784 } 10785 10786 __tg3_set_rx_mode(tp->dev); 10787 10788 /* Initialize receive rules. */ 10789 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10790 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10791 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10792 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10793 10794 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10795 limit = 8; 10796 else 10797 limit = 16; 10798 if (tg3_flag(tp, ENABLE_ASF)) 10799 limit -= 4; 10800 switch (limit) { 10801 case 16: 10802 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10803 fallthrough; 10804 case 15: 10805 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10806 fallthrough; 10807 case 14: 10808 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10809 fallthrough; 10810 case 13: 10811 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10812 fallthrough; 10813 case 12: 10814 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10815 fallthrough; 10816 case 11: 10817 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10818 fallthrough; 10819 case 10: 10820 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10821 fallthrough; 10822 case 9: 10823 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10824 fallthrough; 10825 case 8: 10826 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10827 fallthrough; 10828 case 7: 10829 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10830 fallthrough; 10831 case 6: 10832 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10833 fallthrough; 10834 case 5: 10835 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10836 fallthrough; 10837 case 4: 10838 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10839 case 3: 10840 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10841 case 2: 10842 case 1: 10843 10844 default: 10845 break; 10846 } 10847 10848 if (tg3_flag(tp, ENABLE_APE)) 10849 /* Write our heartbeat update interval to APE. */ 10850 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10851 APE_HOST_HEARTBEAT_INT_5SEC); 10852 10853 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10854 10855 return 0; 10856 } 10857 10858 /* Called at device open time to get the chip ready for 10859 * packet processing. Invoked with tp->lock held. 10860 */ 10861 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10862 { 10863 /* Chip may have been just powered on. If so, the boot code may still 10864 * be running initialization. Wait for it to finish to avoid races in 10865 * accessing the hardware. 10866 */ 10867 tg3_enable_register_access(tp); 10868 tg3_poll_fw(tp); 10869 10870 tg3_switch_clocks(tp); 10871 10872 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10873 10874 return tg3_reset_hw(tp, reset_phy); 10875 } 10876 10877 #ifdef CONFIG_TIGON3_HWMON 10878 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10879 { 10880 u32 off, len = TG3_OCIR_LEN; 10881 int i; 10882 10883 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) { 10884 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10885 10886 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10887 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10888 memset(ocir, 0, len); 10889 } 10890 } 10891 10892 /* sysfs attributes for hwmon */ 10893 static ssize_t tg3_show_temp(struct device *dev, 10894 struct device_attribute *devattr, char *buf) 10895 { 10896 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10897 struct tg3 *tp = dev_get_drvdata(dev); 10898 u32 temperature; 10899 10900 spin_lock_bh(&tp->lock); 10901 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10902 sizeof(temperature)); 10903 spin_unlock_bh(&tp->lock); 10904 return sprintf(buf, "%u\n", temperature * 1000); 10905 } 10906 10907 10908 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, 10909 TG3_TEMP_SENSOR_OFFSET); 10910 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, 10911 TG3_TEMP_CAUTION_OFFSET); 10912 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, 10913 TG3_TEMP_MAX_OFFSET); 10914 10915 static struct attribute *tg3_attrs[] = { 10916 &sensor_dev_attr_temp1_input.dev_attr.attr, 10917 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10918 &sensor_dev_attr_temp1_max.dev_attr.attr, 10919 NULL 10920 }; 10921 ATTRIBUTE_GROUPS(tg3); 10922 10923 static void tg3_hwmon_close(struct tg3 *tp) 10924 { 10925 if (tp->hwmon_dev) { 10926 hwmon_device_unregister(tp->hwmon_dev); 10927 tp->hwmon_dev = NULL; 10928 } 10929 } 10930 10931 static void tg3_hwmon_open(struct tg3 *tp) 10932 { 10933 int i; 10934 u32 size = 0; 10935 struct pci_dev *pdev = tp->pdev; 10936 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10937 10938 tg3_sd_scan_scratchpad(tp, ocirs); 10939 10940 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10941 if (!ocirs[i].src_data_length) 10942 continue; 10943 10944 size += ocirs[i].src_hdr_length; 10945 size += ocirs[i].src_data_length; 10946 } 10947 10948 if (!size) 10949 return; 10950 10951 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10952 tp, tg3_groups); 10953 if (IS_ERR(tp->hwmon_dev)) { 10954 tp->hwmon_dev = NULL; 10955 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10956 } 10957 } 10958 #else 10959 static inline void tg3_hwmon_close(struct tg3 *tp) { } 10960 static inline void tg3_hwmon_open(struct tg3 *tp) { } 10961 #endif /* CONFIG_TIGON3_HWMON */ 10962 10963 10964 #define TG3_STAT_ADD32(PSTAT, REG) \ 10965 do { u32 __val = tr32(REG); \ 10966 (PSTAT)->low += __val; \ 10967 if ((PSTAT)->low < __val) \ 10968 (PSTAT)->high += 1; \ 10969 } while (0) 10970 10971 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10972 { 10973 struct tg3_hw_stats *sp = tp->hw_stats; 10974 10975 if (!tp->link_up) 10976 return; 10977 10978 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10979 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10980 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10981 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10982 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10983 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10984 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10985 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10986 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10987 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10988 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10989 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10990 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10991 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10992 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10993 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10994 u32 val; 10995 10996 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10997 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10998 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10999 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 11000 } 11001 11002 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 11003 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 11004 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 11005 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 11006 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 11007 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 11008 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 11009 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 11010 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 11011 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 11012 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 11013 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 11014 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 11015 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 11016 11017 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 11018 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 11019 tg3_asic_rev(tp) != ASIC_REV_5762 && 11020 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 11021 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 11022 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 11023 } else { 11024 u32 val = tr32(HOSTCC_FLOW_ATTN); 11025 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 11026 if (val) { 11027 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 11028 sp->rx_discards.low += val; 11029 if (sp->rx_discards.low < val) 11030 sp->rx_discards.high += 1; 11031 } 11032 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 11033 } 11034 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 11035 } 11036 11037 static void tg3_chk_missed_msi(struct tg3 *tp) 11038 { 11039 u32 i; 11040 11041 for (i = 0; i < tp->irq_cnt; i++) { 11042 struct tg3_napi *tnapi = &tp->napi[i]; 11043 11044 if (tg3_has_work(tnapi)) { 11045 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 11046 tnapi->last_tx_cons == tnapi->tx_cons) { 11047 if (tnapi->chk_msi_cnt < 1) { 11048 tnapi->chk_msi_cnt++; 11049 return; 11050 } 11051 tg3_msi(0, tnapi); 11052 } 11053 } 11054 tnapi->chk_msi_cnt = 0; 11055 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 11056 tnapi->last_tx_cons = tnapi->tx_cons; 11057 } 11058 } 11059 11060 static void tg3_timer(struct timer_list *t) 11061 { 11062 struct tg3 *tp = from_timer(tp, t, timer); 11063 11064 spin_lock(&tp->lock); 11065 11066 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 11067 spin_unlock(&tp->lock); 11068 goto restart_timer; 11069 } 11070 11071 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 11072 tg3_flag(tp, 57765_CLASS)) 11073 tg3_chk_missed_msi(tp); 11074 11075 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 11076 /* BCM4785: Flush posted writes from GbE to host memory. */ 11077 tr32(HOSTCC_MODE); 11078 } 11079 11080 if (!tg3_flag(tp, TAGGED_STATUS)) { 11081 /* All of this garbage is because when using non-tagged 11082 * IRQ status the mailbox/status_block protocol the chip 11083 * uses with the cpu is race prone. 11084 */ 11085 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 11086 tw32(GRC_LOCAL_CTRL, 11087 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 11088 } else { 11089 tw32(HOSTCC_MODE, tp->coalesce_mode | 11090 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 11091 } 11092 11093 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 11094 spin_unlock(&tp->lock); 11095 tg3_reset_task_schedule(tp); 11096 goto restart_timer; 11097 } 11098 } 11099 11100 /* This part only runs once per second. */ 11101 if (!--tp->timer_counter) { 11102 if (tg3_flag(tp, 5705_PLUS)) 11103 tg3_periodic_fetch_stats(tp); 11104 11105 if (tp->setlpicnt && !--tp->setlpicnt) 11106 tg3_phy_eee_enable(tp); 11107 11108 if (tg3_flag(tp, USE_LINKCHG_REG)) { 11109 u32 mac_stat; 11110 int phy_event; 11111 11112 mac_stat = tr32(MAC_STATUS); 11113 11114 phy_event = 0; 11115 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 11116 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 11117 phy_event = 1; 11118 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 11119 phy_event = 1; 11120 11121 if (phy_event) 11122 tg3_setup_phy(tp, false); 11123 } else if (tg3_flag(tp, POLL_SERDES)) { 11124 u32 mac_stat = tr32(MAC_STATUS); 11125 int need_setup = 0; 11126 11127 if (tp->link_up && 11128 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 11129 need_setup = 1; 11130 } 11131 if (!tp->link_up && 11132 (mac_stat & (MAC_STATUS_PCS_SYNCED | 11133 MAC_STATUS_SIGNAL_DET))) { 11134 need_setup = 1; 11135 } 11136 if (need_setup) { 11137 if (!tp->serdes_counter) { 11138 tw32_f(MAC_MODE, 11139 (tp->mac_mode & 11140 ~MAC_MODE_PORT_MODE_MASK)); 11141 udelay(40); 11142 tw32_f(MAC_MODE, tp->mac_mode); 11143 udelay(40); 11144 } 11145 tg3_setup_phy(tp, false); 11146 } 11147 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 11148 tg3_flag(tp, 5780_CLASS)) { 11149 tg3_serdes_parallel_detect(tp); 11150 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11151 u32 cpmu = tr32(TG3_CPMU_STATUS); 11152 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11153 TG3_CPMU_STATUS_LINK_MASK); 11154 11155 if (link_up != tp->link_up) 11156 tg3_setup_phy(tp, false); 11157 } 11158 11159 tp->timer_counter = tp->timer_multiplier; 11160 } 11161 11162 /* Heartbeat is only sent once every 2 seconds. 11163 * 11164 * The heartbeat is to tell the ASF firmware that the host 11165 * driver is still alive. In the event that the OS crashes, 11166 * ASF needs to reset the hardware to free up the FIFO space 11167 * that may be filled with rx packets destined for the host. 11168 * If the FIFO is full, ASF will no longer function properly. 11169 * 11170 * Unintended resets have been reported on real time kernels 11171 * where the timer doesn't run on time. Netpoll will also have 11172 * same problem. 11173 * 11174 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11175 * to check the ring condition when the heartbeat is expiring 11176 * before doing the reset. This will prevent most unintended 11177 * resets. 11178 */ 11179 if (!--tp->asf_counter) { 11180 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11181 tg3_wait_for_event_ack(tp); 11182 11183 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11184 FWCMD_NICDRV_ALIVE3); 11185 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11186 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11187 TG3_FW_UPDATE_TIMEOUT_SEC); 11188 11189 tg3_generate_fw_event(tp); 11190 } 11191 tp->asf_counter = tp->asf_multiplier; 11192 } 11193 11194 /* Update the APE heartbeat every 5 seconds.*/ 11195 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); 11196 11197 spin_unlock(&tp->lock); 11198 11199 restart_timer: 11200 tp->timer.expires = jiffies + tp->timer_offset; 11201 add_timer(&tp->timer); 11202 } 11203 11204 static void tg3_timer_init(struct tg3 *tp) 11205 { 11206 if (tg3_flag(tp, TAGGED_STATUS) && 11207 tg3_asic_rev(tp) != ASIC_REV_5717 && 11208 !tg3_flag(tp, 57765_CLASS)) 11209 tp->timer_offset = HZ; 11210 else 11211 tp->timer_offset = HZ / 10; 11212 11213 BUG_ON(tp->timer_offset > HZ); 11214 11215 tp->timer_multiplier = (HZ / tp->timer_offset); 11216 tp->asf_multiplier = (HZ / tp->timer_offset) * 11217 TG3_FW_UPDATE_FREQ_SEC; 11218 11219 timer_setup(&tp->timer, tg3_timer, 0); 11220 } 11221 11222 static void tg3_timer_start(struct tg3 *tp) 11223 { 11224 tp->asf_counter = tp->asf_multiplier; 11225 tp->timer_counter = tp->timer_multiplier; 11226 11227 tp->timer.expires = jiffies + tp->timer_offset; 11228 add_timer(&tp->timer); 11229 } 11230 11231 static void tg3_timer_stop(struct tg3 *tp) 11232 { 11233 del_timer_sync(&tp->timer); 11234 } 11235 11236 /* Restart hardware after configuration changes, self-test, etc. 11237 * Invoked with tp->lock held. 11238 */ 11239 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11240 __releases(tp->lock) 11241 __acquires(tp->lock) 11242 { 11243 int err; 11244 11245 err = tg3_init_hw(tp, reset_phy); 11246 if (err) { 11247 netdev_err(tp->dev, 11248 "Failed to re-initialize device, aborting\n"); 11249 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11250 tg3_full_unlock(tp); 11251 tg3_timer_stop(tp); 11252 tp->irq_sync = 0; 11253 tg3_napi_enable(tp); 11254 dev_close(tp->dev); 11255 tg3_full_lock(tp, 0); 11256 } 11257 return err; 11258 } 11259 11260 static void tg3_reset_task(struct work_struct *work) 11261 { 11262 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11263 int err; 11264 11265 rtnl_lock(); 11266 tg3_full_lock(tp, 0); 11267 11268 if (tp->pcierr_recovery || !netif_running(tp->dev) || 11269 tp->pdev->error_state != pci_channel_io_normal) { 11270 tg3_flag_clear(tp, RESET_TASK_PENDING); 11271 tg3_full_unlock(tp); 11272 rtnl_unlock(); 11273 return; 11274 } 11275 11276 tg3_full_unlock(tp); 11277 11278 tg3_phy_stop(tp); 11279 11280 tg3_netif_stop(tp); 11281 11282 tg3_full_lock(tp, 1); 11283 11284 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11285 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11286 tp->write32_rx_mbox = tg3_write_flush_reg32; 11287 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11288 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11289 } 11290 11291 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11292 err = tg3_init_hw(tp, true); 11293 if (err) { 11294 tg3_full_unlock(tp); 11295 tp->irq_sync = 0; 11296 tg3_napi_enable(tp); 11297 /* Clear this flag so that tg3_reset_task_cancel() will not 11298 * call cancel_work_sync() and wait forever. 11299 */ 11300 tg3_flag_clear(tp, RESET_TASK_PENDING); 11301 dev_close(tp->dev); 11302 goto out; 11303 } 11304 11305 tg3_netif_start(tp); 11306 tg3_full_unlock(tp); 11307 tg3_phy_start(tp); 11308 tg3_flag_clear(tp, RESET_TASK_PENDING); 11309 out: 11310 rtnl_unlock(); 11311 } 11312 11313 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11314 { 11315 irq_handler_t fn; 11316 unsigned long flags; 11317 char *name; 11318 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11319 11320 if (tp->irq_cnt == 1) 11321 name = tp->dev->name; 11322 else { 11323 name = &tnapi->irq_lbl[0]; 11324 if (tnapi->tx_buffers && tnapi->rx_rcb) 11325 snprintf(name, IFNAMSIZ, 11326 "%s-txrx-%d", tp->dev->name, irq_num); 11327 else if (tnapi->tx_buffers) 11328 snprintf(name, IFNAMSIZ, 11329 "%s-tx-%d", tp->dev->name, irq_num); 11330 else if (tnapi->rx_rcb) 11331 snprintf(name, IFNAMSIZ, 11332 "%s-rx-%d", tp->dev->name, irq_num); 11333 else 11334 snprintf(name, IFNAMSIZ, 11335 "%s-%d", tp->dev->name, irq_num); 11336 name[IFNAMSIZ-1] = 0; 11337 } 11338 11339 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11340 fn = tg3_msi; 11341 if (tg3_flag(tp, 1SHOT_MSI)) 11342 fn = tg3_msi_1shot; 11343 flags = 0; 11344 } else { 11345 fn = tg3_interrupt; 11346 if (tg3_flag(tp, TAGGED_STATUS)) 11347 fn = tg3_interrupt_tagged; 11348 flags = IRQF_SHARED; 11349 } 11350 11351 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11352 } 11353 11354 static int tg3_test_interrupt(struct tg3 *tp) 11355 { 11356 struct tg3_napi *tnapi = &tp->napi[0]; 11357 struct net_device *dev = tp->dev; 11358 int err, i, intr_ok = 0; 11359 u32 val; 11360 11361 if (!netif_running(dev)) 11362 return -ENODEV; 11363 11364 tg3_disable_ints(tp); 11365 11366 free_irq(tnapi->irq_vec, tnapi); 11367 11368 /* 11369 * Turn off MSI one shot mode. Otherwise this test has no 11370 * observable way to know whether the interrupt was delivered. 11371 */ 11372 if (tg3_flag(tp, 57765_PLUS)) { 11373 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11374 tw32(MSGINT_MODE, val); 11375 } 11376 11377 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11378 IRQF_SHARED, dev->name, tnapi); 11379 if (err) 11380 return err; 11381 11382 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11383 tg3_enable_ints(tp); 11384 11385 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11386 tnapi->coal_now); 11387 11388 for (i = 0; i < 5; i++) { 11389 u32 int_mbox, misc_host_ctrl; 11390 11391 int_mbox = tr32_mailbox(tnapi->int_mbox); 11392 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11393 11394 if ((int_mbox != 0) || 11395 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11396 intr_ok = 1; 11397 break; 11398 } 11399 11400 if (tg3_flag(tp, 57765_PLUS) && 11401 tnapi->hw_status->status_tag != tnapi->last_tag) 11402 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11403 11404 msleep(10); 11405 } 11406 11407 tg3_disable_ints(tp); 11408 11409 free_irq(tnapi->irq_vec, tnapi); 11410 11411 err = tg3_request_irq(tp, 0); 11412 11413 if (err) 11414 return err; 11415 11416 if (intr_ok) { 11417 /* Reenable MSI one shot mode. */ 11418 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11419 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11420 tw32(MSGINT_MODE, val); 11421 } 11422 return 0; 11423 } 11424 11425 return -EIO; 11426 } 11427 11428 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11429 * successfully restored 11430 */ 11431 static int tg3_test_msi(struct tg3 *tp) 11432 { 11433 int err; 11434 u16 pci_cmd; 11435 11436 if (!tg3_flag(tp, USING_MSI)) 11437 return 0; 11438 11439 /* Turn off SERR reporting in case MSI terminates with Master 11440 * Abort. 11441 */ 11442 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11443 pci_write_config_word(tp->pdev, PCI_COMMAND, 11444 pci_cmd & ~PCI_COMMAND_SERR); 11445 11446 err = tg3_test_interrupt(tp); 11447 11448 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11449 11450 if (!err) 11451 return 0; 11452 11453 /* other failures */ 11454 if (err != -EIO) 11455 return err; 11456 11457 /* MSI test failed, go back to INTx mode */ 11458 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11459 "to INTx mode. Please report this failure to the PCI " 11460 "maintainer and include system chipset information\n"); 11461 11462 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11463 11464 pci_disable_msi(tp->pdev); 11465 11466 tg3_flag_clear(tp, USING_MSI); 11467 tp->napi[0].irq_vec = tp->pdev->irq; 11468 11469 err = tg3_request_irq(tp, 0); 11470 if (err) 11471 return err; 11472 11473 /* Need to reset the chip because the MSI cycle may have terminated 11474 * with Master Abort. 11475 */ 11476 tg3_full_lock(tp, 1); 11477 11478 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11479 err = tg3_init_hw(tp, true); 11480 11481 tg3_full_unlock(tp); 11482 11483 if (err) 11484 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11485 11486 return err; 11487 } 11488 11489 static int tg3_request_firmware(struct tg3 *tp) 11490 { 11491 const struct tg3_firmware_hdr *fw_hdr; 11492 11493 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11494 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11495 tp->fw_needed); 11496 return -ENOENT; 11497 } 11498 11499 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11500 11501 /* Firmware blob starts with version numbers, followed by 11502 * start address and _full_ length including BSS sections 11503 * (which must be longer than the actual data, of course 11504 */ 11505 11506 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11507 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11508 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11509 tp->fw_len, tp->fw_needed); 11510 release_firmware(tp->fw); 11511 tp->fw = NULL; 11512 return -EINVAL; 11513 } 11514 11515 /* We no longer need firmware; we have it. */ 11516 tp->fw_needed = NULL; 11517 return 0; 11518 } 11519 11520 static u32 tg3_irq_count(struct tg3 *tp) 11521 { 11522 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11523 11524 if (irq_cnt > 1) { 11525 /* We want as many rx rings enabled as there are cpus. 11526 * In multiqueue MSI-X mode, the first MSI-X vector 11527 * only deals with link interrupts, etc, so we add 11528 * one to the number of vectors we are requesting. 11529 */ 11530 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11531 } 11532 11533 return irq_cnt; 11534 } 11535 11536 static bool tg3_enable_msix(struct tg3 *tp) 11537 { 11538 int i, rc; 11539 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11540 11541 tp->txq_cnt = tp->txq_req; 11542 tp->rxq_cnt = tp->rxq_req; 11543 if (!tp->rxq_cnt) 11544 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11545 if (tp->rxq_cnt > tp->rxq_max) 11546 tp->rxq_cnt = tp->rxq_max; 11547 11548 /* Disable multiple TX rings by default. Simple round-robin hardware 11549 * scheduling of the TX rings can cause starvation of rings with 11550 * small packets when other rings have TSO or jumbo packets. 11551 */ 11552 if (!tp->txq_req) 11553 tp->txq_cnt = 1; 11554 11555 tp->irq_cnt = tg3_irq_count(tp); 11556 11557 for (i = 0; i < tp->irq_max; i++) { 11558 msix_ent[i].entry = i; 11559 msix_ent[i].vector = 0; 11560 } 11561 11562 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11563 if (rc < 0) { 11564 return false; 11565 } else if (rc < tp->irq_cnt) { 11566 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11567 tp->irq_cnt, rc); 11568 tp->irq_cnt = rc; 11569 tp->rxq_cnt = max(rc - 1, 1); 11570 if (tp->txq_cnt) 11571 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11572 } 11573 11574 for (i = 0; i < tp->irq_max; i++) 11575 tp->napi[i].irq_vec = msix_ent[i].vector; 11576 11577 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11578 pci_disable_msix(tp->pdev); 11579 return false; 11580 } 11581 11582 if (tp->irq_cnt == 1) 11583 return true; 11584 11585 tg3_flag_set(tp, ENABLE_RSS); 11586 11587 if (tp->txq_cnt > 1) 11588 tg3_flag_set(tp, ENABLE_TSS); 11589 11590 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11591 11592 return true; 11593 } 11594 11595 static void tg3_ints_init(struct tg3 *tp) 11596 { 11597 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11598 !tg3_flag(tp, TAGGED_STATUS)) { 11599 /* All MSI supporting chips should support tagged 11600 * status. Assert that this is the case. 11601 */ 11602 netdev_warn(tp->dev, 11603 "MSI without TAGGED_STATUS? Not using MSI\n"); 11604 goto defcfg; 11605 } 11606 11607 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11608 tg3_flag_set(tp, USING_MSIX); 11609 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11610 tg3_flag_set(tp, USING_MSI); 11611 11612 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11613 u32 msi_mode = tr32(MSGINT_MODE); 11614 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11615 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11616 if (!tg3_flag(tp, 1SHOT_MSI)) 11617 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11618 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11619 } 11620 defcfg: 11621 if (!tg3_flag(tp, USING_MSIX)) { 11622 tp->irq_cnt = 1; 11623 tp->napi[0].irq_vec = tp->pdev->irq; 11624 } 11625 11626 if (tp->irq_cnt == 1) { 11627 tp->txq_cnt = 1; 11628 tp->rxq_cnt = 1; 11629 netif_set_real_num_tx_queues(tp->dev, 1); 11630 netif_set_real_num_rx_queues(tp->dev, 1); 11631 } 11632 } 11633 11634 static void tg3_ints_fini(struct tg3 *tp) 11635 { 11636 if (tg3_flag(tp, USING_MSIX)) 11637 pci_disable_msix(tp->pdev); 11638 else if (tg3_flag(tp, USING_MSI)) 11639 pci_disable_msi(tp->pdev); 11640 tg3_flag_clear(tp, USING_MSI); 11641 tg3_flag_clear(tp, USING_MSIX); 11642 tg3_flag_clear(tp, ENABLE_RSS); 11643 tg3_flag_clear(tp, ENABLE_TSS); 11644 } 11645 11646 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11647 bool init) 11648 { 11649 struct net_device *dev = tp->dev; 11650 int i, err; 11651 11652 /* 11653 * Setup interrupts first so we know how 11654 * many NAPI resources to allocate 11655 */ 11656 tg3_ints_init(tp); 11657 11658 tg3_rss_check_indir_tbl(tp); 11659 11660 /* The placement of this call is tied 11661 * to the setup and use of Host TX descriptors. 11662 */ 11663 err = tg3_alloc_consistent(tp); 11664 if (err) 11665 goto out_ints_fini; 11666 11667 tg3_napi_init(tp); 11668 11669 tg3_napi_enable(tp); 11670 11671 for (i = 0; i < tp->irq_cnt; i++) { 11672 err = tg3_request_irq(tp, i); 11673 if (err) { 11674 for (i--; i >= 0; i--) { 11675 struct tg3_napi *tnapi = &tp->napi[i]; 11676 11677 free_irq(tnapi->irq_vec, tnapi); 11678 } 11679 goto out_napi_fini; 11680 } 11681 } 11682 11683 tg3_full_lock(tp, 0); 11684 11685 if (init) 11686 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11687 11688 err = tg3_init_hw(tp, reset_phy); 11689 if (err) { 11690 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11691 tg3_free_rings(tp); 11692 } 11693 11694 tg3_full_unlock(tp); 11695 11696 if (err) 11697 goto out_free_irq; 11698 11699 if (test_irq && tg3_flag(tp, USING_MSI)) { 11700 err = tg3_test_msi(tp); 11701 11702 if (err) { 11703 tg3_full_lock(tp, 0); 11704 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11705 tg3_free_rings(tp); 11706 tg3_full_unlock(tp); 11707 11708 goto out_napi_fini; 11709 } 11710 11711 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11712 u32 val = tr32(PCIE_TRANSACTION_CFG); 11713 11714 tw32(PCIE_TRANSACTION_CFG, 11715 val | PCIE_TRANS_CFG_1SHOT_MSI); 11716 } 11717 } 11718 11719 tg3_phy_start(tp); 11720 11721 tg3_hwmon_open(tp); 11722 11723 tg3_full_lock(tp, 0); 11724 11725 tg3_timer_start(tp); 11726 tg3_flag_set(tp, INIT_COMPLETE); 11727 tg3_enable_ints(tp); 11728 11729 tg3_ptp_resume(tp); 11730 11731 tg3_full_unlock(tp); 11732 11733 netif_tx_start_all_queues(dev); 11734 11735 /* 11736 * Reset loopback feature if it was turned on while the device was down 11737 * make sure that it's installed properly now. 11738 */ 11739 if (dev->features & NETIF_F_LOOPBACK) 11740 tg3_set_loopback(dev, dev->features); 11741 11742 return 0; 11743 11744 out_free_irq: 11745 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11746 struct tg3_napi *tnapi = &tp->napi[i]; 11747 free_irq(tnapi->irq_vec, tnapi); 11748 } 11749 11750 out_napi_fini: 11751 tg3_napi_disable(tp); 11752 tg3_napi_fini(tp); 11753 tg3_free_consistent(tp); 11754 11755 out_ints_fini: 11756 tg3_ints_fini(tp); 11757 11758 return err; 11759 } 11760 11761 static void tg3_stop(struct tg3 *tp) 11762 { 11763 int i; 11764 11765 tg3_reset_task_cancel(tp); 11766 tg3_netif_stop(tp); 11767 11768 tg3_timer_stop(tp); 11769 11770 tg3_hwmon_close(tp); 11771 11772 tg3_phy_stop(tp); 11773 11774 tg3_full_lock(tp, 1); 11775 11776 tg3_disable_ints(tp); 11777 11778 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11779 tg3_free_rings(tp); 11780 tg3_flag_clear(tp, INIT_COMPLETE); 11781 11782 tg3_full_unlock(tp); 11783 11784 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11785 struct tg3_napi *tnapi = &tp->napi[i]; 11786 free_irq(tnapi->irq_vec, tnapi); 11787 } 11788 11789 tg3_ints_fini(tp); 11790 11791 tg3_napi_fini(tp); 11792 11793 tg3_free_consistent(tp); 11794 } 11795 11796 static int tg3_open(struct net_device *dev) 11797 { 11798 struct tg3 *tp = netdev_priv(dev); 11799 int err; 11800 11801 if (tp->pcierr_recovery) { 11802 netdev_err(dev, "Failed to open device. PCI error recovery " 11803 "in progress\n"); 11804 return -EAGAIN; 11805 } 11806 11807 if (tp->fw_needed) { 11808 err = tg3_request_firmware(tp); 11809 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11810 if (err) { 11811 netdev_warn(tp->dev, "EEE capability disabled\n"); 11812 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11813 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11814 netdev_warn(tp->dev, "EEE capability restored\n"); 11815 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11816 } 11817 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11818 if (err) 11819 return err; 11820 } else if (err) { 11821 netdev_warn(tp->dev, "TSO capability disabled\n"); 11822 tg3_flag_clear(tp, TSO_CAPABLE); 11823 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11824 netdev_notice(tp->dev, "TSO capability restored\n"); 11825 tg3_flag_set(tp, TSO_CAPABLE); 11826 } 11827 } 11828 11829 tg3_carrier_off(tp); 11830 11831 err = tg3_power_up(tp); 11832 if (err) 11833 return err; 11834 11835 tg3_full_lock(tp, 0); 11836 11837 tg3_disable_ints(tp); 11838 tg3_flag_clear(tp, INIT_COMPLETE); 11839 11840 tg3_full_unlock(tp); 11841 11842 err = tg3_start(tp, 11843 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11844 true, true); 11845 if (err) { 11846 tg3_frob_aux_power(tp, false); 11847 pci_set_power_state(tp->pdev, PCI_D3hot); 11848 } 11849 11850 return err; 11851 } 11852 11853 static int tg3_close(struct net_device *dev) 11854 { 11855 struct tg3 *tp = netdev_priv(dev); 11856 11857 if (tp->pcierr_recovery) { 11858 netdev_err(dev, "Failed to close device. PCI error recovery " 11859 "in progress\n"); 11860 return -EAGAIN; 11861 } 11862 11863 tg3_stop(tp); 11864 11865 if (pci_device_is_present(tp->pdev)) { 11866 tg3_power_down_prepare(tp); 11867 11868 tg3_carrier_off(tp); 11869 } 11870 return 0; 11871 } 11872 11873 static inline u64 get_stat64(tg3_stat64_t *val) 11874 { 11875 return ((u64)val->high << 32) | ((u64)val->low); 11876 } 11877 11878 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11879 { 11880 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11881 11882 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11883 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11884 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11885 u32 val; 11886 11887 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11888 tg3_writephy(tp, MII_TG3_TEST1, 11889 val | MII_TG3_TEST1_CRC_EN); 11890 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11891 } else 11892 val = 0; 11893 11894 tp->phy_crc_errors += val; 11895 11896 return tp->phy_crc_errors; 11897 } 11898 11899 return get_stat64(&hw_stats->rx_fcs_errors); 11900 } 11901 11902 #define ESTAT_ADD(member) \ 11903 estats->member = old_estats->member + \ 11904 get_stat64(&hw_stats->member) 11905 11906 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11907 { 11908 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11909 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11910 11911 ESTAT_ADD(rx_octets); 11912 ESTAT_ADD(rx_fragments); 11913 ESTAT_ADD(rx_ucast_packets); 11914 ESTAT_ADD(rx_mcast_packets); 11915 ESTAT_ADD(rx_bcast_packets); 11916 ESTAT_ADD(rx_fcs_errors); 11917 ESTAT_ADD(rx_align_errors); 11918 ESTAT_ADD(rx_xon_pause_rcvd); 11919 ESTAT_ADD(rx_xoff_pause_rcvd); 11920 ESTAT_ADD(rx_mac_ctrl_rcvd); 11921 ESTAT_ADD(rx_xoff_entered); 11922 ESTAT_ADD(rx_frame_too_long_errors); 11923 ESTAT_ADD(rx_jabbers); 11924 ESTAT_ADD(rx_undersize_packets); 11925 ESTAT_ADD(rx_in_length_errors); 11926 ESTAT_ADD(rx_out_length_errors); 11927 ESTAT_ADD(rx_64_or_less_octet_packets); 11928 ESTAT_ADD(rx_65_to_127_octet_packets); 11929 ESTAT_ADD(rx_128_to_255_octet_packets); 11930 ESTAT_ADD(rx_256_to_511_octet_packets); 11931 ESTAT_ADD(rx_512_to_1023_octet_packets); 11932 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11933 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11934 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11935 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11936 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11937 11938 ESTAT_ADD(tx_octets); 11939 ESTAT_ADD(tx_collisions); 11940 ESTAT_ADD(tx_xon_sent); 11941 ESTAT_ADD(tx_xoff_sent); 11942 ESTAT_ADD(tx_flow_control); 11943 ESTAT_ADD(tx_mac_errors); 11944 ESTAT_ADD(tx_single_collisions); 11945 ESTAT_ADD(tx_mult_collisions); 11946 ESTAT_ADD(tx_deferred); 11947 ESTAT_ADD(tx_excessive_collisions); 11948 ESTAT_ADD(tx_late_collisions); 11949 ESTAT_ADD(tx_collide_2times); 11950 ESTAT_ADD(tx_collide_3times); 11951 ESTAT_ADD(tx_collide_4times); 11952 ESTAT_ADD(tx_collide_5times); 11953 ESTAT_ADD(tx_collide_6times); 11954 ESTAT_ADD(tx_collide_7times); 11955 ESTAT_ADD(tx_collide_8times); 11956 ESTAT_ADD(tx_collide_9times); 11957 ESTAT_ADD(tx_collide_10times); 11958 ESTAT_ADD(tx_collide_11times); 11959 ESTAT_ADD(tx_collide_12times); 11960 ESTAT_ADD(tx_collide_13times); 11961 ESTAT_ADD(tx_collide_14times); 11962 ESTAT_ADD(tx_collide_15times); 11963 ESTAT_ADD(tx_ucast_packets); 11964 ESTAT_ADD(tx_mcast_packets); 11965 ESTAT_ADD(tx_bcast_packets); 11966 ESTAT_ADD(tx_carrier_sense_errors); 11967 ESTAT_ADD(tx_discards); 11968 ESTAT_ADD(tx_errors); 11969 11970 ESTAT_ADD(dma_writeq_full); 11971 ESTAT_ADD(dma_write_prioq_full); 11972 ESTAT_ADD(rxbds_empty); 11973 ESTAT_ADD(rx_discards); 11974 ESTAT_ADD(rx_errors); 11975 ESTAT_ADD(rx_threshold_hit); 11976 11977 ESTAT_ADD(dma_readq_full); 11978 ESTAT_ADD(dma_read_prioq_full); 11979 ESTAT_ADD(tx_comp_queue_full); 11980 11981 ESTAT_ADD(ring_set_send_prod_index); 11982 ESTAT_ADD(ring_status_update); 11983 ESTAT_ADD(nic_irqs); 11984 ESTAT_ADD(nic_avoided_irqs); 11985 ESTAT_ADD(nic_tx_threshold_hit); 11986 11987 ESTAT_ADD(mbuf_lwm_thresh_hit); 11988 } 11989 11990 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11991 { 11992 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11993 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11994 unsigned long rx_dropped; 11995 unsigned long tx_dropped; 11996 int i; 11997 11998 stats->rx_packets = old_stats->rx_packets + 11999 get_stat64(&hw_stats->rx_ucast_packets) + 12000 get_stat64(&hw_stats->rx_mcast_packets) + 12001 get_stat64(&hw_stats->rx_bcast_packets); 12002 12003 stats->tx_packets = old_stats->tx_packets + 12004 get_stat64(&hw_stats->tx_ucast_packets) + 12005 get_stat64(&hw_stats->tx_mcast_packets) + 12006 get_stat64(&hw_stats->tx_bcast_packets); 12007 12008 stats->rx_bytes = old_stats->rx_bytes + 12009 get_stat64(&hw_stats->rx_octets); 12010 stats->tx_bytes = old_stats->tx_bytes + 12011 get_stat64(&hw_stats->tx_octets); 12012 12013 stats->rx_errors = old_stats->rx_errors + 12014 get_stat64(&hw_stats->rx_errors); 12015 stats->tx_errors = old_stats->tx_errors + 12016 get_stat64(&hw_stats->tx_errors) + 12017 get_stat64(&hw_stats->tx_mac_errors) + 12018 get_stat64(&hw_stats->tx_carrier_sense_errors) + 12019 get_stat64(&hw_stats->tx_discards); 12020 12021 stats->multicast = old_stats->multicast + 12022 get_stat64(&hw_stats->rx_mcast_packets); 12023 stats->collisions = old_stats->collisions + 12024 get_stat64(&hw_stats->tx_collisions); 12025 12026 stats->rx_length_errors = old_stats->rx_length_errors + 12027 get_stat64(&hw_stats->rx_frame_too_long_errors) + 12028 get_stat64(&hw_stats->rx_undersize_packets); 12029 12030 stats->rx_frame_errors = old_stats->rx_frame_errors + 12031 get_stat64(&hw_stats->rx_align_errors); 12032 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 12033 get_stat64(&hw_stats->tx_discards); 12034 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 12035 get_stat64(&hw_stats->tx_carrier_sense_errors); 12036 12037 stats->rx_crc_errors = old_stats->rx_crc_errors + 12038 tg3_calc_crc_errors(tp); 12039 12040 stats->rx_missed_errors = old_stats->rx_missed_errors + 12041 get_stat64(&hw_stats->rx_discards); 12042 12043 /* Aggregate per-queue counters. The per-queue counters are updated 12044 * by a single writer, race-free. The result computed by this loop 12045 * might not be 100% accurate (counters can be updated in the middle of 12046 * the loop) but the next tg3_get_nstats() will recompute the current 12047 * value so it is acceptable. 12048 * 12049 * Note that these counters wrap around at 4G on 32bit machines. 12050 */ 12051 rx_dropped = (unsigned long)(old_stats->rx_dropped); 12052 tx_dropped = (unsigned long)(old_stats->tx_dropped); 12053 12054 for (i = 0; i < tp->irq_cnt; i++) { 12055 struct tg3_napi *tnapi = &tp->napi[i]; 12056 12057 rx_dropped += tnapi->rx_dropped; 12058 tx_dropped += tnapi->tx_dropped; 12059 } 12060 12061 stats->rx_dropped = rx_dropped; 12062 stats->tx_dropped = tx_dropped; 12063 } 12064 12065 static int tg3_get_regs_len(struct net_device *dev) 12066 { 12067 return TG3_REG_BLK_SIZE; 12068 } 12069 12070 static void tg3_get_regs(struct net_device *dev, 12071 struct ethtool_regs *regs, void *_p) 12072 { 12073 struct tg3 *tp = netdev_priv(dev); 12074 12075 regs->version = 0; 12076 12077 memset(_p, 0, TG3_REG_BLK_SIZE); 12078 12079 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 12080 return; 12081 12082 tg3_full_lock(tp, 0); 12083 12084 tg3_dump_legacy_regs(tp, (u32 *)_p); 12085 12086 tg3_full_unlock(tp); 12087 } 12088 12089 static int tg3_get_eeprom_len(struct net_device *dev) 12090 { 12091 struct tg3 *tp = netdev_priv(dev); 12092 12093 return tp->nvram_size; 12094 } 12095 12096 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12097 { 12098 struct tg3 *tp = netdev_priv(dev); 12099 int ret, cpmu_restore = 0; 12100 u8 *pd; 12101 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 12102 __be32 val; 12103 12104 if (tg3_flag(tp, NO_NVRAM)) 12105 return -EINVAL; 12106 12107 offset = eeprom->offset; 12108 len = eeprom->len; 12109 eeprom->len = 0; 12110 12111 eeprom->magic = TG3_EEPROM_MAGIC; 12112 12113 /* Override clock, link aware and link idle modes */ 12114 if (tg3_flag(tp, CPMU_PRESENT)) { 12115 cpmu_val = tr32(TG3_CPMU_CTRL); 12116 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 12117 CPMU_CTRL_LINK_IDLE_MODE)) { 12118 tw32(TG3_CPMU_CTRL, cpmu_val & 12119 ~(CPMU_CTRL_LINK_AWARE_MODE | 12120 CPMU_CTRL_LINK_IDLE_MODE)); 12121 cpmu_restore = 1; 12122 } 12123 } 12124 tg3_override_clk(tp); 12125 12126 if (offset & 3) { 12127 /* adjustments to start on required 4 byte boundary */ 12128 b_offset = offset & 3; 12129 b_count = 4 - b_offset; 12130 if (b_count > len) { 12131 /* i.e. offset=1 len=2 */ 12132 b_count = len; 12133 } 12134 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 12135 if (ret) 12136 goto eeprom_done; 12137 memcpy(data, ((char *)&val) + b_offset, b_count); 12138 len -= b_count; 12139 offset += b_count; 12140 eeprom->len += b_count; 12141 } 12142 12143 /* read bytes up to the last 4 byte boundary */ 12144 pd = &data[eeprom->len]; 12145 for (i = 0; i < (len - (len & 3)); i += 4) { 12146 ret = tg3_nvram_read_be32(tp, offset + i, &val); 12147 if (ret) { 12148 if (i) 12149 i -= 4; 12150 eeprom->len += i; 12151 goto eeprom_done; 12152 } 12153 memcpy(pd + i, &val, 4); 12154 if (need_resched()) { 12155 if (signal_pending(current)) { 12156 eeprom->len += i; 12157 ret = -EINTR; 12158 goto eeprom_done; 12159 } 12160 cond_resched(); 12161 } 12162 } 12163 eeprom->len += i; 12164 12165 if (len & 3) { 12166 /* read last bytes not ending on 4 byte boundary */ 12167 pd = &data[eeprom->len]; 12168 b_count = len & 3; 12169 b_offset = offset + len - b_count; 12170 ret = tg3_nvram_read_be32(tp, b_offset, &val); 12171 if (ret) 12172 goto eeprom_done; 12173 memcpy(pd, &val, b_count); 12174 eeprom->len += b_count; 12175 } 12176 ret = 0; 12177 12178 eeprom_done: 12179 /* Restore clock, link aware and link idle modes */ 12180 tg3_restore_clk(tp); 12181 if (cpmu_restore) 12182 tw32(TG3_CPMU_CTRL, cpmu_val); 12183 12184 return ret; 12185 } 12186 12187 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12188 { 12189 struct tg3 *tp = netdev_priv(dev); 12190 int ret; 12191 u32 offset, len, b_offset, odd_len; 12192 u8 *buf; 12193 __be32 start = 0, end; 12194 12195 if (tg3_flag(tp, NO_NVRAM) || 12196 eeprom->magic != TG3_EEPROM_MAGIC) 12197 return -EINVAL; 12198 12199 offset = eeprom->offset; 12200 len = eeprom->len; 12201 12202 if ((b_offset = (offset & 3))) { 12203 /* adjustments to start on required 4 byte boundary */ 12204 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12205 if (ret) 12206 return ret; 12207 len += b_offset; 12208 offset &= ~3; 12209 if (len < 4) 12210 len = 4; 12211 } 12212 12213 odd_len = 0; 12214 if (len & 3) { 12215 /* adjustments to end on required 4 byte boundary */ 12216 odd_len = 1; 12217 len = (len + 3) & ~3; 12218 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12219 if (ret) 12220 return ret; 12221 } 12222 12223 buf = data; 12224 if (b_offset || odd_len) { 12225 buf = kmalloc(len, GFP_KERNEL); 12226 if (!buf) 12227 return -ENOMEM; 12228 if (b_offset) 12229 memcpy(buf, &start, 4); 12230 if (odd_len) 12231 memcpy(buf+len-4, &end, 4); 12232 memcpy(buf + b_offset, data, eeprom->len); 12233 } 12234 12235 ret = tg3_nvram_write_block(tp, offset, len, buf); 12236 12237 if (buf != data) 12238 kfree(buf); 12239 12240 return ret; 12241 } 12242 12243 static int tg3_get_link_ksettings(struct net_device *dev, 12244 struct ethtool_link_ksettings *cmd) 12245 { 12246 struct tg3 *tp = netdev_priv(dev); 12247 u32 supported, advertising; 12248 12249 if (tg3_flag(tp, USE_PHYLIB)) { 12250 struct phy_device *phydev; 12251 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12252 return -EAGAIN; 12253 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12254 phy_ethtool_ksettings_get(phydev, cmd); 12255 12256 return 0; 12257 } 12258 12259 supported = (SUPPORTED_Autoneg); 12260 12261 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12262 supported |= (SUPPORTED_1000baseT_Half | 12263 SUPPORTED_1000baseT_Full); 12264 12265 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12266 supported |= (SUPPORTED_100baseT_Half | 12267 SUPPORTED_100baseT_Full | 12268 SUPPORTED_10baseT_Half | 12269 SUPPORTED_10baseT_Full | 12270 SUPPORTED_TP); 12271 cmd->base.port = PORT_TP; 12272 } else { 12273 supported |= SUPPORTED_FIBRE; 12274 cmd->base.port = PORT_FIBRE; 12275 } 12276 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 12277 supported); 12278 12279 advertising = tp->link_config.advertising; 12280 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12281 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12282 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12283 advertising |= ADVERTISED_Pause; 12284 } else { 12285 advertising |= ADVERTISED_Pause | 12286 ADVERTISED_Asym_Pause; 12287 } 12288 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12289 advertising |= ADVERTISED_Asym_Pause; 12290 } 12291 } 12292 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 12293 advertising); 12294 12295 if (netif_running(dev) && tp->link_up) { 12296 cmd->base.speed = tp->link_config.active_speed; 12297 cmd->base.duplex = tp->link_config.active_duplex; 12298 ethtool_convert_legacy_u32_to_link_mode( 12299 cmd->link_modes.lp_advertising, 12300 tp->link_config.rmt_adv); 12301 12302 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12303 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12304 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; 12305 else 12306 cmd->base.eth_tp_mdix = ETH_TP_MDI; 12307 } 12308 } else { 12309 cmd->base.speed = SPEED_UNKNOWN; 12310 cmd->base.duplex = DUPLEX_UNKNOWN; 12311 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 12312 } 12313 cmd->base.phy_address = tp->phy_addr; 12314 cmd->base.autoneg = tp->link_config.autoneg; 12315 return 0; 12316 } 12317 12318 static int tg3_set_link_ksettings(struct net_device *dev, 12319 const struct ethtool_link_ksettings *cmd) 12320 { 12321 struct tg3 *tp = netdev_priv(dev); 12322 u32 speed = cmd->base.speed; 12323 u32 advertising; 12324 12325 if (tg3_flag(tp, USE_PHYLIB)) { 12326 struct phy_device *phydev; 12327 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12328 return -EAGAIN; 12329 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12330 return phy_ethtool_ksettings_set(phydev, cmd); 12331 } 12332 12333 if (cmd->base.autoneg != AUTONEG_ENABLE && 12334 cmd->base.autoneg != AUTONEG_DISABLE) 12335 return -EINVAL; 12336 12337 if (cmd->base.autoneg == AUTONEG_DISABLE && 12338 cmd->base.duplex != DUPLEX_FULL && 12339 cmd->base.duplex != DUPLEX_HALF) 12340 return -EINVAL; 12341 12342 ethtool_convert_link_mode_to_legacy_u32(&advertising, 12343 cmd->link_modes.advertising); 12344 12345 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12346 u32 mask = ADVERTISED_Autoneg | 12347 ADVERTISED_Pause | 12348 ADVERTISED_Asym_Pause; 12349 12350 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12351 mask |= ADVERTISED_1000baseT_Half | 12352 ADVERTISED_1000baseT_Full; 12353 12354 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12355 mask |= ADVERTISED_100baseT_Half | 12356 ADVERTISED_100baseT_Full | 12357 ADVERTISED_10baseT_Half | 12358 ADVERTISED_10baseT_Full | 12359 ADVERTISED_TP; 12360 else 12361 mask |= ADVERTISED_FIBRE; 12362 12363 if (advertising & ~mask) 12364 return -EINVAL; 12365 12366 mask &= (ADVERTISED_1000baseT_Half | 12367 ADVERTISED_1000baseT_Full | 12368 ADVERTISED_100baseT_Half | 12369 ADVERTISED_100baseT_Full | 12370 ADVERTISED_10baseT_Half | 12371 ADVERTISED_10baseT_Full); 12372 12373 advertising &= mask; 12374 } else { 12375 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12376 if (speed != SPEED_1000) 12377 return -EINVAL; 12378 12379 if (cmd->base.duplex != DUPLEX_FULL) 12380 return -EINVAL; 12381 } else { 12382 if (speed != SPEED_100 && 12383 speed != SPEED_10) 12384 return -EINVAL; 12385 } 12386 } 12387 12388 tg3_full_lock(tp, 0); 12389 12390 tp->link_config.autoneg = cmd->base.autoneg; 12391 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12392 tp->link_config.advertising = (advertising | 12393 ADVERTISED_Autoneg); 12394 tp->link_config.speed = SPEED_UNKNOWN; 12395 tp->link_config.duplex = DUPLEX_UNKNOWN; 12396 } else { 12397 tp->link_config.advertising = 0; 12398 tp->link_config.speed = speed; 12399 tp->link_config.duplex = cmd->base.duplex; 12400 } 12401 12402 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12403 12404 tg3_warn_mgmt_link_flap(tp); 12405 12406 if (netif_running(dev)) 12407 tg3_setup_phy(tp, true); 12408 12409 tg3_full_unlock(tp); 12410 12411 return 0; 12412 } 12413 12414 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12415 { 12416 struct tg3 *tp = netdev_priv(dev); 12417 12418 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12419 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12420 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12421 } 12422 12423 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12424 { 12425 struct tg3 *tp = netdev_priv(dev); 12426 12427 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12428 wol->supported = WAKE_MAGIC; 12429 else 12430 wol->supported = 0; 12431 wol->wolopts = 0; 12432 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12433 wol->wolopts = WAKE_MAGIC; 12434 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12435 } 12436 12437 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12438 { 12439 struct tg3 *tp = netdev_priv(dev); 12440 struct device *dp = &tp->pdev->dev; 12441 12442 if (wol->wolopts & ~WAKE_MAGIC) 12443 return -EINVAL; 12444 if ((wol->wolopts & WAKE_MAGIC) && 12445 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12446 return -EINVAL; 12447 12448 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12449 12450 if (device_may_wakeup(dp)) 12451 tg3_flag_set(tp, WOL_ENABLE); 12452 else 12453 tg3_flag_clear(tp, WOL_ENABLE); 12454 12455 return 0; 12456 } 12457 12458 static u32 tg3_get_msglevel(struct net_device *dev) 12459 { 12460 struct tg3 *tp = netdev_priv(dev); 12461 return tp->msg_enable; 12462 } 12463 12464 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12465 { 12466 struct tg3 *tp = netdev_priv(dev); 12467 tp->msg_enable = value; 12468 } 12469 12470 static int tg3_nway_reset(struct net_device *dev) 12471 { 12472 struct tg3 *tp = netdev_priv(dev); 12473 int r; 12474 12475 if (!netif_running(dev)) 12476 return -EAGAIN; 12477 12478 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12479 return -EINVAL; 12480 12481 tg3_warn_mgmt_link_flap(tp); 12482 12483 if (tg3_flag(tp, USE_PHYLIB)) { 12484 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12485 return -EAGAIN; 12486 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12487 } else { 12488 u32 bmcr; 12489 12490 spin_lock_bh(&tp->lock); 12491 r = -EINVAL; 12492 tg3_readphy(tp, MII_BMCR, &bmcr); 12493 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12494 ((bmcr & BMCR_ANENABLE) || 12495 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12496 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12497 BMCR_ANENABLE); 12498 r = 0; 12499 } 12500 spin_unlock_bh(&tp->lock); 12501 } 12502 12503 return r; 12504 } 12505 12506 static void tg3_get_ringparam(struct net_device *dev, 12507 struct ethtool_ringparam *ering, 12508 struct kernel_ethtool_ringparam *kernel_ering, 12509 struct netlink_ext_ack *extack) 12510 { 12511 struct tg3 *tp = netdev_priv(dev); 12512 12513 ering->rx_max_pending = tp->rx_std_ring_mask; 12514 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12515 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12516 else 12517 ering->rx_jumbo_max_pending = 0; 12518 12519 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12520 12521 ering->rx_pending = tp->rx_pending; 12522 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12523 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12524 else 12525 ering->rx_jumbo_pending = 0; 12526 12527 ering->tx_pending = tp->napi[0].tx_pending; 12528 } 12529 12530 static int tg3_set_ringparam(struct net_device *dev, 12531 struct ethtool_ringparam *ering, 12532 struct kernel_ethtool_ringparam *kernel_ering, 12533 struct netlink_ext_ack *extack) 12534 { 12535 struct tg3 *tp = netdev_priv(dev); 12536 int i, irq_sync = 0, err = 0; 12537 bool reset_phy = false; 12538 12539 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12540 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12541 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12542 (ering->tx_pending <= MAX_SKB_FRAGS) || 12543 (tg3_flag(tp, TSO_BUG) && 12544 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12545 return -EINVAL; 12546 12547 if (netif_running(dev)) { 12548 tg3_phy_stop(tp); 12549 tg3_netif_stop(tp); 12550 irq_sync = 1; 12551 } 12552 12553 tg3_full_lock(tp, irq_sync); 12554 12555 tp->rx_pending = ering->rx_pending; 12556 12557 if (tg3_flag(tp, MAX_RXPEND_64) && 12558 tp->rx_pending > 63) 12559 tp->rx_pending = 63; 12560 12561 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12562 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12563 12564 for (i = 0; i < tp->irq_max; i++) 12565 tp->napi[i].tx_pending = ering->tx_pending; 12566 12567 if (netif_running(dev)) { 12568 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12569 /* Reset PHY to avoid PHY lock up */ 12570 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12571 tg3_asic_rev(tp) == ASIC_REV_5719 || 12572 tg3_asic_rev(tp) == ASIC_REV_5720) 12573 reset_phy = true; 12574 12575 err = tg3_restart_hw(tp, reset_phy); 12576 if (!err) 12577 tg3_netif_start(tp); 12578 } 12579 12580 tg3_full_unlock(tp); 12581 12582 if (irq_sync && !err) 12583 tg3_phy_start(tp); 12584 12585 return err; 12586 } 12587 12588 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12589 { 12590 struct tg3 *tp = netdev_priv(dev); 12591 12592 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12593 12594 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12595 epause->rx_pause = 1; 12596 else 12597 epause->rx_pause = 0; 12598 12599 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12600 epause->tx_pause = 1; 12601 else 12602 epause->tx_pause = 0; 12603 } 12604 12605 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12606 { 12607 struct tg3 *tp = netdev_priv(dev); 12608 int err = 0; 12609 bool reset_phy = false; 12610 12611 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12612 tg3_warn_mgmt_link_flap(tp); 12613 12614 if (tg3_flag(tp, USE_PHYLIB)) { 12615 struct phy_device *phydev; 12616 12617 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12618 12619 if (!phy_validate_pause(phydev, epause)) 12620 return -EINVAL; 12621 12622 tp->link_config.flowctrl = 0; 12623 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 12624 if (epause->rx_pause) { 12625 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12626 12627 if (epause->tx_pause) { 12628 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12629 } 12630 } else if (epause->tx_pause) { 12631 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12632 } 12633 12634 if (epause->autoneg) 12635 tg3_flag_set(tp, PAUSE_AUTONEG); 12636 else 12637 tg3_flag_clear(tp, PAUSE_AUTONEG); 12638 12639 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12640 if (phydev->autoneg) { 12641 /* phy_set_asym_pause() will 12642 * renegotiate the link to inform our 12643 * link partner of our flow control 12644 * settings, even if the flow control 12645 * is forced. Let tg3_adjust_link() 12646 * do the final flow control setup. 12647 */ 12648 return 0; 12649 } 12650 12651 if (!epause->autoneg) 12652 tg3_setup_flow_control(tp, 0, 0); 12653 } 12654 } else { 12655 int irq_sync = 0; 12656 12657 if (netif_running(dev)) { 12658 tg3_netif_stop(tp); 12659 irq_sync = 1; 12660 } 12661 12662 tg3_full_lock(tp, irq_sync); 12663 12664 if (epause->autoneg) 12665 tg3_flag_set(tp, PAUSE_AUTONEG); 12666 else 12667 tg3_flag_clear(tp, PAUSE_AUTONEG); 12668 if (epause->rx_pause) 12669 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12670 else 12671 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12672 if (epause->tx_pause) 12673 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12674 else 12675 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12676 12677 if (netif_running(dev)) { 12678 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12679 /* Reset PHY to avoid PHY lock up */ 12680 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12681 tg3_asic_rev(tp) == ASIC_REV_5719 || 12682 tg3_asic_rev(tp) == ASIC_REV_5720) 12683 reset_phy = true; 12684 12685 err = tg3_restart_hw(tp, reset_phy); 12686 if (!err) 12687 tg3_netif_start(tp); 12688 } 12689 12690 tg3_full_unlock(tp); 12691 } 12692 12693 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12694 12695 return err; 12696 } 12697 12698 static int tg3_get_sset_count(struct net_device *dev, int sset) 12699 { 12700 switch (sset) { 12701 case ETH_SS_TEST: 12702 return TG3_NUM_TEST; 12703 case ETH_SS_STATS: 12704 return TG3_NUM_STATS; 12705 default: 12706 return -EOPNOTSUPP; 12707 } 12708 } 12709 12710 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12711 u32 *rules __always_unused) 12712 { 12713 struct tg3 *tp = netdev_priv(dev); 12714 12715 if (!tg3_flag(tp, SUPPORT_MSIX)) 12716 return -EOPNOTSUPP; 12717 12718 switch (info->cmd) { 12719 case ETHTOOL_GRXRINGS: 12720 if (netif_running(tp->dev)) 12721 info->data = tp->rxq_cnt; 12722 else { 12723 info->data = num_online_cpus(); 12724 if (info->data > TG3_RSS_MAX_NUM_QS) 12725 info->data = TG3_RSS_MAX_NUM_QS; 12726 } 12727 12728 return 0; 12729 12730 default: 12731 return -EOPNOTSUPP; 12732 } 12733 } 12734 12735 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12736 { 12737 u32 size = 0; 12738 struct tg3 *tp = netdev_priv(dev); 12739 12740 if (tg3_flag(tp, SUPPORT_MSIX)) 12741 size = TG3_RSS_INDIR_TBL_SIZE; 12742 12743 return size; 12744 } 12745 12746 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh) 12747 { 12748 struct tg3 *tp = netdev_priv(dev); 12749 int i; 12750 12751 rxfh->hfunc = ETH_RSS_HASH_TOP; 12752 if (!rxfh->indir) 12753 return 0; 12754 12755 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12756 rxfh->indir[i] = tp->rss_ind_tbl[i]; 12757 12758 return 0; 12759 } 12760 12761 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, 12762 struct netlink_ext_ack *extack) 12763 { 12764 struct tg3 *tp = netdev_priv(dev); 12765 size_t i; 12766 12767 /* We require at least one supported parameter to be changed and no 12768 * change in any of the unsupported parameters 12769 */ 12770 if (rxfh->key || 12771 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 12772 rxfh->hfunc != ETH_RSS_HASH_TOP)) 12773 return -EOPNOTSUPP; 12774 12775 if (!rxfh->indir) 12776 return 0; 12777 12778 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12779 tp->rss_ind_tbl[i] = rxfh->indir[i]; 12780 12781 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12782 return 0; 12783 12784 /* It is legal to write the indirection 12785 * table while the device is running. 12786 */ 12787 tg3_full_lock(tp, 0); 12788 tg3_rss_write_indir_tbl(tp); 12789 tg3_full_unlock(tp); 12790 12791 return 0; 12792 } 12793 12794 static void tg3_get_channels(struct net_device *dev, 12795 struct ethtool_channels *channel) 12796 { 12797 struct tg3 *tp = netdev_priv(dev); 12798 u32 deflt_qs = netif_get_num_default_rss_queues(); 12799 12800 channel->max_rx = tp->rxq_max; 12801 channel->max_tx = tp->txq_max; 12802 12803 if (netif_running(dev)) { 12804 channel->rx_count = tp->rxq_cnt; 12805 channel->tx_count = tp->txq_cnt; 12806 } else { 12807 if (tp->rxq_req) 12808 channel->rx_count = tp->rxq_req; 12809 else 12810 channel->rx_count = min(deflt_qs, tp->rxq_max); 12811 12812 if (tp->txq_req) 12813 channel->tx_count = tp->txq_req; 12814 else 12815 channel->tx_count = min(deflt_qs, tp->txq_max); 12816 } 12817 } 12818 12819 static int tg3_set_channels(struct net_device *dev, 12820 struct ethtool_channels *channel) 12821 { 12822 struct tg3 *tp = netdev_priv(dev); 12823 12824 if (!tg3_flag(tp, SUPPORT_MSIX)) 12825 return -EOPNOTSUPP; 12826 12827 if (channel->rx_count > tp->rxq_max || 12828 channel->tx_count > tp->txq_max) 12829 return -EINVAL; 12830 12831 tp->rxq_req = channel->rx_count; 12832 tp->txq_req = channel->tx_count; 12833 12834 if (!netif_running(dev)) 12835 return 0; 12836 12837 tg3_stop(tp); 12838 12839 tg3_carrier_off(tp); 12840 12841 tg3_start(tp, true, false, false); 12842 12843 return 0; 12844 } 12845 12846 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12847 { 12848 switch (stringset) { 12849 case ETH_SS_STATS: 12850 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12851 break; 12852 case ETH_SS_TEST: 12853 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12854 break; 12855 default: 12856 WARN_ON(1); /* we need a WARN() */ 12857 break; 12858 } 12859 } 12860 12861 static int tg3_set_phys_id(struct net_device *dev, 12862 enum ethtool_phys_id_state state) 12863 { 12864 struct tg3 *tp = netdev_priv(dev); 12865 12866 switch (state) { 12867 case ETHTOOL_ID_ACTIVE: 12868 return 1; /* cycle on/off once per second */ 12869 12870 case ETHTOOL_ID_ON: 12871 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12872 LED_CTRL_1000MBPS_ON | 12873 LED_CTRL_100MBPS_ON | 12874 LED_CTRL_10MBPS_ON | 12875 LED_CTRL_TRAFFIC_OVERRIDE | 12876 LED_CTRL_TRAFFIC_BLINK | 12877 LED_CTRL_TRAFFIC_LED); 12878 break; 12879 12880 case ETHTOOL_ID_OFF: 12881 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12882 LED_CTRL_TRAFFIC_OVERRIDE); 12883 break; 12884 12885 case ETHTOOL_ID_INACTIVE: 12886 tw32(MAC_LED_CTRL, tp->led_ctrl); 12887 break; 12888 } 12889 12890 return 0; 12891 } 12892 12893 static void tg3_get_ethtool_stats(struct net_device *dev, 12894 struct ethtool_stats *estats, u64 *tmp_stats) 12895 { 12896 struct tg3 *tp = netdev_priv(dev); 12897 12898 if (tp->hw_stats) 12899 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12900 else 12901 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12902 } 12903 12904 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen) 12905 { 12906 int i; 12907 __be32 *buf; 12908 u32 offset = 0, len = 0; 12909 u32 magic, val; 12910 12911 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12912 return NULL; 12913 12914 if (magic == TG3_EEPROM_MAGIC) { 12915 for (offset = TG3_NVM_DIR_START; 12916 offset < TG3_NVM_DIR_END; 12917 offset += TG3_NVM_DIRENT_SIZE) { 12918 if (tg3_nvram_read(tp, offset, &val)) 12919 return NULL; 12920 12921 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12922 TG3_NVM_DIRTYPE_EXTVPD) 12923 break; 12924 } 12925 12926 if (offset != TG3_NVM_DIR_END) { 12927 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12928 if (tg3_nvram_read(tp, offset + 4, &offset)) 12929 return NULL; 12930 12931 offset = tg3_nvram_logical_addr(tp, offset); 12932 } 12933 12934 if (!offset || !len) { 12935 offset = TG3_NVM_VPD_OFF; 12936 len = TG3_NVM_VPD_LEN; 12937 } 12938 12939 buf = kmalloc(len, GFP_KERNEL); 12940 if (!buf) 12941 return NULL; 12942 12943 for (i = 0; i < len; i += 4) { 12944 /* The data is in little-endian format in NVRAM. 12945 * Use the big-endian read routines to preserve 12946 * the byte order as it exists in NVRAM. 12947 */ 12948 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12949 goto error; 12950 } 12951 *vpdlen = len; 12952 } else { 12953 buf = pci_vpd_alloc(tp->pdev, vpdlen); 12954 if (IS_ERR(buf)) 12955 return NULL; 12956 } 12957 12958 return buf; 12959 12960 error: 12961 kfree(buf); 12962 return NULL; 12963 } 12964 12965 #define NVRAM_TEST_SIZE 0x100 12966 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12967 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12968 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12969 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12970 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12971 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12972 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12973 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12974 12975 static int tg3_test_nvram(struct tg3 *tp) 12976 { 12977 u32 csum, magic; 12978 __be32 *buf; 12979 int i, j, k, err = 0, size; 12980 unsigned int len; 12981 12982 if (tg3_flag(tp, NO_NVRAM)) 12983 return 0; 12984 12985 if (tg3_nvram_read(tp, 0, &magic) != 0) 12986 return -EIO; 12987 12988 if (magic == TG3_EEPROM_MAGIC) 12989 size = NVRAM_TEST_SIZE; 12990 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12991 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12992 TG3_EEPROM_SB_FORMAT_1) { 12993 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12994 case TG3_EEPROM_SB_REVISION_0: 12995 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12996 break; 12997 case TG3_EEPROM_SB_REVISION_2: 12998 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12999 break; 13000 case TG3_EEPROM_SB_REVISION_3: 13001 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 13002 break; 13003 case TG3_EEPROM_SB_REVISION_4: 13004 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 13005 break; 13006 case TG3_EEPROM_SB_REVISION_5: 13007 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 13008 break; 13009 case TG3_EEPROM_SB_REVISION_6: 13010 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 13011 break; 13012 default: 13013 return -EIO; 13014 } 13015 } else 13016 return 0; 13017 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 13018 size = NVRAM_SELFBOOT_HW_SIZE; 13019 else 13020 return -EIO; 13021 13022 buf = kmalloc(size, GFP_KERNEL); 13023 if (buf == NULL) 13024 return -ENOMEM; 13025 13026 err = -EIO; 13027 for (i = 0, j = 0; i < size; i += 4, j++) { 13028 err = tg3_nvram_read_be32(tp, i, &buf[j]); 13029 if (err) 13030 break; 13031 } 13032 if (i < size) 13033 goto out; 13034 13035 /* Selfboot format */ 13036 magic = be32_to_cpu(buf[0]); 13037 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 13038 TG3_EEPROM_MAGIC_FW) { 13039 u8 *buf8 = (u8 *) buf, csum8 = 0; 13040 13041 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 13042 TG3_EEPROM_SB_REVISION_2) { 13043 /* For rev 2, the csum doesn't include the MBA. */ 13044 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 13045 csum8 += buf8[i]; 13046 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 13047 csum8 += buf8[i]; 13048 } else { 13049 for (i = 0; i < size; i++) 13050 csum8 += buf8[i]; 13051 } 13052 13053 if (csum8 == 0) { 13054 err = 0; 13055 goto out; 13056 } 13057 13058 err = -EIO; 13059 goto out; 13060 } 13061 13062 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 13063 TG3_EEPROM_MAGIC_HW) { 13064 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 13065 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 13066 u8 *buf8 = (u8 *) buf; 13067 13068 /* Separate the parity bits and the data bytes. */ 13069 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 13070 if ((i == 0) || (i == 8)) { 13071 int l; 13072 u8 msk; 13073 13074 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 13075 parity[k++] = buf8[i] & msk; 13076 i++; 13077 } else if (i == 16) { 13078 int l; 13079 u8 msk; 13080 13081 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 13082 parity[k++] = buf8[i] & msk; 13083 i++; 13084 13085 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 13086 parity[k++] = buf8[i] & msk; 13087 i++; 13088 } 13089 data[j++] = buf8[i]; 13090 } 13091 13092 err = -EIO; 13093 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 13094 u8 hw8 = hweight8(data[i]); 13095 13096 if ((hw8 & 0x1) && parity[i]) 13097 goto out; 13098 else if (!(hw8 & 0x1) && !parity[i]) 13099 goto out; 13100 } 13101 err = 0; 13102 goto out; 13103 } 13104 13105 err = -EIO; 13106 13107 /* Bootstrap checksum at offset 0x10 */ 13108 csum = calc_crc((unsigned char *) buf, 0x10); 13109 if (csum != le32_to_cpu(buf[0x10/4])) 13110 goto out; 13111 13112 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 13113 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 13114 if (csum != le32_to_cpu(buf[0xfc/4])) 13115 goto out; 13116 13117 kfree(buf); 13118 13119 buf = tg3_vpd_readblock(tp, &len); 13120 if (!buf) 13121 return -ENOMEM; 13122 13123 err = pci_vpd_check_csum(buf, len); 13124 /* go on if no checksum found */ 13125 if (err == 1) 13126 err = 0; 13127 out: 13128 kfree(buf); 13129 return err; 13130 } 13131 13132 #define TG3_SERDES_TIMEOUT_SEC 2 13133 #define TG3_COPPER_TIMEOUT_SEC 6 13134 13135 static int tg3_test_link(struct tg3 *tp) 13136 { 13137 int i, max; 13138 13139 if (!netif_running(tp->dev)) 13140 return -ENODEV; 13141 13142 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13143 max = TG3_SERDES_TIMEOUT_SEC; 13144 else 13145 max = TG3_COPPER_TIMEOUT_SEC; 13146 13147 for (i = 0; i < max; i++) { 13148 if (tp->link_up) 13149 return 0; 13150 13151 if (msleep_interruptible(1000)) 13152 break; 13153 } 13154 13155 return -EIO; 13156 } 13157 13158 /* Only test the commonly used registers */ 13159 static int tg3_test_registers(struct tg3 *tp) 13160 { 13161 int i, is_5705, is_5750; 13162 u32 offset, read_mask, write_mask, val, save_val, read_val; 13163 static struct { 13164 u16 offset; 13165 u16 flags; 13166 #define TG3_FL_5705 0x1 13167 #define TG3_FL_NOT_5705 0x2 13168 #define TG3_FL_NOT_5788 0x4 13169 #define TG3_FL_NOT_5750 0x8 13170 u32 read_mask; 13171 u32 write_mask; 13172 } reg_tbl[] = { 13173 /* MAC Control Registers */ 13174 { MAC_MODE, TG3_FL_NOT_5705, 13175 0x00000000, 0x00ef6f8c }, 13176 { MAC_MODE, TG3_FL_5705, 13177 0x00000000, 0x01ef6b8c }, 13178 { MAC_STATUS, TG3_FL_NOT_5705, 13179 0x03800107, 0x00000000 }, 13180 { MAC_STATUS, TG3_FL_5705, 13181 0x03800100, 0x00000000 }, 13182 { MAC_ADDR_0_HIGH, 0x0000, 13183 0x00000000, 0x0000ffff }, 13184 { MAC_ADDR_0_LOW, 0x0000, 13185 0x00000000, 0xffffffff }, 13186 { MAC_RX_MTU_SIZE, 0x0000, 13187 0x00000000, 0x0000ffff }, 13188 { MAC_TX_MODE, 0x0000, 13189 0x00000000, 0x00000070 }, 13190 { MAC_TX_LENGTHS, 0x0000, 13191 0x00000000, 0x00003fff }, 13192 { MAC_RX_MODE, TG3_FL_NOT_5705, 13193 0x00000000, 0x000007fc }, 13194 { MAC_RX_MODE, TG3_FL_5705, 13195 0x00000000, 0x000007dc }, 13196 { MAC_HASH_REG_0, 0x0000, 13197 0x00000000, 0xffffffff }, 13198 { MAC_HASH_REG_1, 0x0000, 13199 0x00000000, 0xffffffff }, 13200 { MAC_HASH_REG_2, 0x0000, 13201 0x00000000, 0xffffffff }, 13202 { MAC_HASH_REG_3, 0x0000, 13203 0x00000000, 0xffffffff }, 13204 13205 /* Receive Data and Receive BD Initiator Control Registers. */ 13206 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13207 0x00000000, 0xffffffff }, 13208 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13209 0x00000000, 0xffffffff }, 13210 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13211 0x00000000, 0x00000003 }, 13212 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13213 0x00000000, 0xffffffff }, 13214 { RCVDBDI_STD_BD+0, 0x0000, 13215 0x00000000, 0xffffffff }, 13216 { RCVDBDI_STD_BD+4, 0x0000, 13217 0x00000000, 0xffffffff }, 13218 { RCVDBDI_STD_BD+8, 0x0000, 13219 0x00000000, 0xffff0002 }, 13220 { RCVDBDI_STD_BD+0xc, 0x0000, 13221 0x00000000, 0xffffffff }, 13222 13223 /* Receive BD Initiator Control Registers. */ 13224 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13225 0x00000000, 0xffffffff }, 13226 { RCVBDI_STD_THRESH, TG3_FL_5705, 13227 0x00000000, 0x000003ff }, 13228 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13229 0x00000000, 0xffffffff }, 13230 13231 /* Host Coalescing Control Registers. */ 13232 { HOSTCC_MODE, TG3_FL_NOT_5705, 13233 0x00000000, 0x00000004 }, 13234 { HOSTCC_MODE, TG3_FL_5705, 13235 0x00000000, 0x000000f6 }, 13236 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13237 0x00000000, 0xffffffff }, 13238 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13239 0x00000000, 0x000003ff }, 13240 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13241 0x00000000, 0xffffffff }, 13242 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13243 0x00000000, 0x000003ff }, 13244 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13245 0x00000000, 0xffffffff }, 13246 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13247 0x00000000, 0x000000ff }, 13248 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13249 0x00000000, 0xffffffff }, 13250 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13251 0x00000000, 0x000000ff }, 13252 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13253 0x00000000, 0xffffffff }, 13254 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13255 0x00000000, 0xffffffff }, 13256 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13257 0x00000000, 0xffffffff }, 13258 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13259 0x00000000, 0x000000ff }, 13260 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13261 0x00000000, 0xffffffff }, 13262 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13263 0x00000000, 0x000000ff }, 13264 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13265 0x00000000, 0xffffffff }, 13266 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13267 0x00000000, 0xffffffff }, 13268 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13269 0x00000000, 0xffffffff }, 13270 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13271 0x00000000, 0xffffffff }, 13272 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13273 0x00000000, 0xffffffff }, 13274 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13275 0xffffffff, 0x00000000 }, 13276 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13277 0xffffffff, 0x00000000 }, 13278 13279 /* Buffer Manager Control Registers. */ 13280 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13281 0x00000000, 0x007fff80 }, 13282 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13283 0x00000000, 0x007fffff }, 13284 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13285 0x00000000, 0x0000003f }, 13286 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13287 0x00000000, 0x000001ff }, 13288 { BUFMGR_MB_HIGH_WATER, 0x0000, 13289 0x00000000, 0x000001ff }, 13290 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13291 0xffffffff, 0x00000000 }, 13292 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13293 0xffffffff, 0x00000000 }, 13294 13295 /* Mailbox Registers */ 13296 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13297 0x00000000, 0x000001ff }, 13298 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13299 0x00000000, 0x000001ff }, 13300 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13301 0x00000000, 0x000007ff }, 13302 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13303 0x00000000, 0x000001ff }, 13304 13305 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13306 }; 13307 13308 is_5705 = is_5750 = 0; 13309 if (tg3_flag(tp, 5705_PLUS)) { 13310 is_5705 = 1; 13311 if (tg3_flag(tp, 5750_PLUS)) 13312 is_5750 = 1; 13313 } 13314 13315 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13316 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13317 continue; 13318 13319 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13320 continue; 13321 13322 if (tg3_flag(tp, IS_5788) && 13323 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13324 continue; 13325 13326 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13327 continue; 13328 13329 offset = (u32) reg_tbl[i].offset; 13330 read_mask = reg_tbl[i].read_mask; 13331 write_mask = reg_tbl[i].write_mask; 13332 13333 /* Save the original register content */ 13334 save_val = tr32(offset); 13335 13336 /* Determine the read-only value. */ 13337 read_val = save_val & read_mask; 13338 13339 /* Write zero to the register, then make sure the read-only bits 13340 * are not changed and the read/write bits are all zeros. 13341 */ 13342 tw32(offset, 0); 13343 13344 val = tr32(offset); 13345 13346 /* Test the read-only and read/write bits. */ 13347 if (((val & read_mask) != read_val) || (val & write_mask)) 13348 goto out; 13349 13350 /* Write ones to all the bits defined by RdMask and WrMask, then 13351 * make sure the read-only bits are not changed and the 13352 * read/write bits are all ones. 13353 */ 13354 tw32(offset, read_mask | write_mask); 13355 13356 val = tr32(offset); 13357 13358 /* Test the read-only bits. */ 13359 if ((val & read_mask) != read_val) 13360 goto out; 13361 13362 /* Test the read/write bits. */ 13363 if ((val & write_mask) != write_mask) 13364 goto out; 13365 13366 tw32(offset, save_val); 13367 } 13368 13369 return 0; 13370 13371 out: 13372 if (netif_msg_hw(tp)) 13373 netdev_err(tp->dev, 13374 "Register test failed at offset %x\n", offset); 13375 tw32(offset, save_val); 13376 return -EIO; 13377 } 13378 13379 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13380 { 13381 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13382 int i; 13383 u32 j; 13384 13385 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13386 for (j = 0; j < len; j += 4) { 13387 u32 val; 13388 13389 tg3_write_mem(tp, offset + j, test_pattern[i]); 13390 tg3_read_mem(tp, offset + j, &val); 13391 if (val != test_pattern[i]) 13392 return -EIO; 13393 } 13394 } 13395 return 0; 13396 } 13397 13398 static int tg3_test_memory(struct tg3 *tp) 13399 { 13400 static struct mem_entry { 13401 u32 offset; 13402 u32 len; 13403 } mem_tbl_570x[] = { 13404 { 0x00000000, 0x00b50}, 13405 { 0x00002000, 0x1c000}, 13406 { 0xffffffff, 0x00000} 13407 }, mem_tbl_5705[] = { 13408 { 0x00000100, 0x0000c}, 13409 { 0x00000200, 0x00008}, 13410 { 0x00004000, 0x00800}, 13411 { 0x00006000, 0x01000}, 13412 { 0x00008000, 0x02000}, 13413 { 0x00010000, 0x0e000}, 13414 { 0xffffffff, 0x00000} 13415 }, mem_tbl_5755[] = { 13416 { 0x00000200, 0x00008}, 13417 { 0x00004000, 0x00800}, 13418 { 0x00006000, 0x00800}, 13419 { 0x00008000, 0x02000}, 13420 { 0x00010000, 0x0c000}, 13421 { 0xffffffff, 0x00000} 13422 }, mem_tbl_5906[] = { 13423 { 0x00000200, 0x00008}, 13424 { 0x00004000, 0x00400}, 13425 { 0x00006000, 0x00400}, 13426 { 0x00008000, 0x01000}, 13427 { 0x00010000, 0x01000}, 13428 { 0xffffffff, 0x00000} 13429 }, mem_tbl_5717[] = { 13430 { 0x00000200, 0x00008}, 13431 { 0x00010000, 0x0a000}, 13432 { 0x00020000, 0x13c00}, 13433 { 0xffffffff, 0x00000} 13434 }, mem_tbl_57765[] = { 13435 { 0x00000200, 0x00008}, 13436 { 0x00004000, 0x00800}, 13437 { 0x00006000, 0x09800}, 13438 { 0x00010000, 0x0a000}, 13439 { 0xffffffff, 0x00000} 13440 }; 13441 struct mem_entry *mem_tbl; 13442 int err = 0; 13443 int i; 13444 13445 if (tg3_flag(tp, 5717_PLUS)) 13446 mem_tbl = mem_tbl_5717; 13447 else if (tg3_flag(tp, 57765_CLASS) || 13448 tg3_asic_rev(tp) == ASIC_REV_5762) 13449 mem_tbl = mem_tbl_57765; 13450 else if (tg3_flag(tp, 5755_PLUS)) 13451 mem_tbl = mem_tbl_5755; 13452 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13453 mem_tbl = mem_tbl_5906; 13454 else if (tg3_flag(tp, 5705_PLUS)) 13455 mem_tbl = mem_tbl_5705; 13456 else 13457 mem_tbl = mem_tbl_570x; 13458 13459 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13460 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13461 if (err) 13462 break; 13463 } 13464 13465 return err; 13466 } 13467 13468 #define TG3_TSO_MSS 500 13469 13470 #define TG3_TSO_IP_HDR_LEN 20 13471 #define TG3_TSO_TCP_HDR_LEN 20 13472 #define TG3_TSO_TCP_OPT_LEN 12 13473 13474 static const u8 tg3_tso_header[] = { 13475 0x08, 0x00, 13476 0x45, 0x00, 0x00, 0x00, 13477 0x00, 0x00, 0x40, 0x00, 13478 0x40, 0x06, 0x00, 0x00, 13479 0x0a, 0x00, 0x00, 0x01, 13480 0x0a, 0x00, 0x00, 0x02, 13481 0x0d, 0x00, 0xe0, 0x00, 13482 0x00, 0x00, 0x01, 0x00, 13483 0x00, 0x00, 0x02, 0x00, 13484 0x80, 0x10, 0x10, 0x00, 13485 0x14, 0x09, 0x00, 0x00, 13486 0x01, 0x01, 0x08, 0x0a, 13487 0x11, 0x11, 0x11, 0x11, 13488 0x11, 0x11, 0x11, 0x11, 13489 }; 13490 13491 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13492 { 13493 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13494 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13495 u32 budget; 13496 struct sk_buff *skb; 13497 u8 *tx_data, *rx_data; 13498 dma_addr_t map; 13499 int num_pkts, tx_len, rx_len, i, err; 13500 struct tg3_rx_buffer_desc *desc; 13501 struct tg3_napi *tnapi, *rnapi; 13502 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13503 13504 tnapi = &tp->napi[0]; 13505 rnapi = &tp->napi[0]; 13506 if (tp->irq_cnt > 1) { 13507 if (tg3_flag(tp, ENABLE_RSS)) 13508 rnapi = &tp->napi[1]; 13509 if (tg3_flag(tp, ENABLE_TSS)) 13510 tnapi = &tp->napi[1]; 13511 } 13512 coal_now = tnapi->coal_now | rnapi->coal_now; 13513 13514 err = -EIO; 13515 13516 tx_len = pktsz; 13517 skb = netdev_alloc_skb(tp->dev, tx_len); 13518 if (!skb) 13519 return -ENOMEM; 13520 13521 tx_data = skb_put(skb, tx_len); 13522 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13523 memset(tx_data + ETH_ALEN, 0x0, 8); 13524 13525 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13526 13527 if (tso_loopback) { 13528 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13529 13530 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13531 TG3_TSO_TCP_OPT_LEN; 13532 13533 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13534 sizeof(tg3_tso_header)); 13535 mss = TG3_TSO_MSS; 13536 13537 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13538 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13539 13540 /* Set the total length field in the IP header */ 13541 iph->tot_len = htons((u16)(mss + hdr_len)); 13542 13543 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13544 TXD_FLAG_CPU_POST_DMA); 13545 13546 if (tg3_flag(tp, HW_TSO_1) || 13547 tg3_flag(tp, HW_TSO_2) || 13548 tg3_flag(tp, HW_TSO_3)) { 13549 struct tcphdr *th; 13550 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13551 th = (struct tcphdr *)&tx_data[val]; 13552 th->check = 0; 13553 } else 13554 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13555 13556 if (tg3_flag(tp, HW_TSO_3)) { 13557 mss |= (hdr_len & 0xc) << 12; 13558 if (hdr_len & 0x10) 13559 base_flags |= 0x00000010; 13560 base_flags |= (hdr_len & 0x3e0) << 5; 13561 } else if (tg3_flag(tp, HW_TSO_2)) 13562 mss |= hdr_len << 9; 13563 else if (tg3_flag(tp, HW_TSO_1) || 13564 tg3_asic_rev(tp) == ASIC_REV_5705) { 13565 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13566 } else { 13567 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13568 } 13569 13570 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13571 } else { 13572 num_pkts = 1; 13573 data_off = ETH_HLEN; 13574 13575 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13576 tx_len > VLAN_ETH_FRAME_LEN) 13577 base_flags |= TXD_FLAG_JMB_PKT; 13578 } 13579 13580 for (i = data_off; i < tx_len; i++) 13581 tx_data[i] = (u8) (i & 0xff); 13582 13583 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); 13584 if (dma_mapping_error(&tp->pdev->dev, map)) { 13585 dev_kfree_skb(skb); 13586 return -EIO; 13587 } 13588 13589 val = tnapi->tx_prod; 13590 tnapi->tx_buffers[val].skb = skb; 13591 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13592 13593 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13594 rnapi->coal_now); 13595 13596 udelay(10); 13597 13598 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13599 13600 budget = tg3_tx_avail(tnapi); 13601 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13602 base_flags | TXD_FLAG_END, mss, 0)) { 13603 tnapi->tx_buffers[val].skb = NULL; 13604 dev_kfree_skb(skb); 13605 return -EIO; 13606 } 13607 13608 tnapi->tx_prod++; 13609 13610 /* Sync BD data before updating mailbox */ 13611 wmb(); 13612 13613 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13614 tr32_mailbox(tnapi->prodmbox); 13615 13616 udelay(10); 13617 13618 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13619 for (i = 0; i < 35; i++) { 13620 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13621 coal_now); 13622 13623 udelay(10); 13624 13625 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13626 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13627 if ((tx_idx == tnapi->tx_prod) && 13628 (rx_idx == (rx_start_idx + num_pkts))) 13629 break; 13630 } 13631 13632 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13633 dev_kfree_skb(skb); 13634 13635 if (tx_idx != tnapi->tx_prod) 13636 goto out; 13637 13638 if (rx_idx != rx_start_idx + num_pkts) 13639 goto out; 13640 13641 val = data_off; 13642 while (rx_idx != rx_start_idx) { 13643 desc = &rnapi->rx_rcb[rx_start_idx++]; 13644 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13645 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13646 13647 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13648 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13649 goto out; 13650 13651 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13652 - ETH_FCS_LEN; 13653 13654 if (!tso_loopback) { 13655 if (rx_len != tx_len) 13656 goto out; 13657 13658 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13659 if (opaque_key != RXD_OPAQUE_RING_STD) 13660 goto out; 13661 } else { 13662 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13663 goto out; 13664 } 13665 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13666 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13667 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13668 goto out; 13669 } 13670 13671 if (opaque_key == RXD_OPAQUE_RING_STD) { 13672 rx_data = tpr->rx_std_buffers[desc_idx].data; 13673 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13674 mapping); 13675 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13676 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13677 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13678 mapping); 13679 } else 13680 goto out; 13681 13682 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, 13683 DMA_FROM_DEVICE); 13684 13685 rx_data += TG3_RX_OFFSET(tp); 13686 for (i = data_off; i < rx_len; i++, val++) { 13687 if (*(rx_data + i) != (u8) (val & 0xff)) 13688 goto out; 13689 } 13690 } 13691 13692 err = 0; 13693 13694 /* tg3_free_rings will unmap and free the rx_data */ 13695 out: 13696 return err; 13697 } 13698 13699 #define TG3_STD_LOOPBACK_FAILED 1 13700 #define TG3_JMB_LOOPBACK_FAILED 2 13701 #define TG3_TSO_LOOPBACK_FAILED 4 13702 #define TG3_LOOPBACK_FAILED \ 13703 (TG3_STD_LOOPBACK_FAILED | \ 13704 TG3_JMB_LOOPBACK_FAILED | \ 13705 TG3_TSO_LOOPBACK_FAILED) 13706 13707 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13708 { 13709 int err = -EIO; 13710 u32 eee_cap; 13711 u32 jmb_pkt_sz = 9000; 13712 13713 if (tp->dma_limit) 13714 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13715 13716 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13717 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13718 13719 if (!netif_running(tp->dev)) { 13720 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13721 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13722 if (do_extlpbk) 13723 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13724 goto done; 13725 } 13726 13727 err = tg3_reset_hw(tp, true); 13728 if (err) { 13729 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13730 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13731 if (do_extlpbk) 13732 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13733 goto done; 13734 } 13735 13736 if (tg3_flag(tp, ENABLE_RSS)) { 13737 int i; 13738 13739 /* Reroute all rx packets to the 1st queue */ 13740 for (i = MAC_RSS_INDIR_TBL_0; 13741 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13742 tw32(i, 0x0); 13743 } 13744 13745 /* HW errata - mac loopback fails in some cases on 5780. 13746 * Normal traffic and PHY loopback are not affected by 13747 * errata. Also, the MAC loopback test is deprecated for 13748 * all newer ASIC revisions. 13749 */ 13750 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13751 !tg3_flag(tp, CPMU_PRESENT)) { 13752 tg3_mac_loopback(tp, true); 13753 13754 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13755 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13756 13757 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13758 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13759 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13760 13761 tg3_mac_loopback(tp, false); 13762 } 13763 13764 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13765 !tg3_flag(tp, USE_PHYLIB)) { 13766 int i; 13767 13768 tg3_phy_lpbk_set(tp, 0, false); 13769 13770 /* Wait for link */ 13771 for (i = 0; i < 100; i++) { 13772 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13773 break; 13774 mdelay(1); 13775 } 13776 13777 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13778 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13779 if (tg3_flag(tp, TSO_CAPABLE) && 13780 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13781 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13782 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13783 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13784 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13785 13786 if (do_extlpbk) { 13787 tg3_phy_lpbk_set(tp, 0, true); 13788 13789 /* All link indications report up, but the hardware 13790 * isn't really ready for about 20 msec. Double it 13791 * to be sure. 13792 */ 13793 mdelay(40); 13794 13795 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13796 data[TG3_EXT_LOOPB_TEST] |= 13797 TG3_STD_LOOPBACK_FAILED; 13798 if (tg3_flag(tp, TSO_CAPABLE) && 13799 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13800 data[TG3_EXT_LOOPB_TEST] |= 13801 TG3_TSO_LOOPBACK_FAILED; 13802 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13803 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13804 data[TG3_EXT_LOOPB_TEST] |= 13805 TG3_JMB_LOOPBACK_FAILED; 13806 } 13807 13808 /* Re-enable gphy autopowerdown. */ 13809 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13810 tg3_phy_toggle_apd(tp, true); 13811 } 13812 13813 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13814 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13815 13816 done: 13817 tp->phy_flags |= eee_cap; 13818 13819 return err; 13820 } 13821 13822 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13823 u64 *data) 13824 { 13825 struct tg3 *tp = netdev_priv(dev); 13826 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13827 13828 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13829 if (tg3_power_up(tp)) { 13830 etest->flags |= ETH_TEST_FL_FAILED; 13831 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13832 return; 13833 } 13834 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13835 } 13836 13837 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13838 13839 if (tg3_test_nvram(tp) != 0) { 13840 etest->flags |= ETH_TEST_FL_FAILED; 13841 data[TG3_NVRAM_TEST] = 1; 13842 } 13843 if (!doextlpbk && tg3_test_link(tp)) { 13844 etest->flags |= ETH_TEST_FL_FAILED; 13845 data[TG3_LINK_TEST] = 1; 13846 } 13847 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13848 int err, err2 = 0, irq_sync = 0; 13849 13850 if (netif_running(dev)) { 13851 tg3_phy_stop(tp); 13852 tg3_netif_stop(tp); 13853 irq_sync = 1; 13854 } 13855 13856 tg3_full_lock(tp, irq_sync); 13857 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13858 err = tg3_nvram_lock(tp); 13859 tg3_halt_cpu(tp, RX_CPU_BASE); 13860 if (!tg3_flag(tp, 5705_PLUS)) 13861 tg3_halt_cpu(tp, TX_CPU_BASE); 13862 if (!err) 13863 tg3_nvram_unlock(tp); 13864 13865 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13866 tg3_phy_reset(tp); 13867 13868 if (tg3_test_registers(tp) != 0) { 13869 etest->flags |= ETH_TEST_FL_FAILED; 13870 data[TG3_REGISTER_TEST] = 1; 13871 } 13872 13873 if (tg3_test_memory(tp) != 0) { 13874 etest->flags |= ETH_TEST_FL_FAILED; 13875 data[TG3_MEMORY_TEST] = 1; 13876 } 13877 13878 if (doextlpbk) 13879 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13880 13881 if (tg3_test_loopback(tp, data, doextlpbk)) 13882 etest->flags |= ETH_TEST_FL_FAILED; 13883 13884 tg3_full_unlock(tp); 13885 13886 if (tg3_test_interrupt(tp) != 0) { 13887 etest->flags |= ETH_TEST_FL_FAILED; 13888 data[TG3_INTERRUPT_TEST] = 1; 13889 } 13890 13891 tg3_full_lock(tp, 0); 13892 13893 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13894 if (netif_running(dev)) { 13895 tg3_flag_set(tp, INIT_COMPLETE); 13896 err2 = tg3_restart_hw(tp, true); 13897 if (!err2) 13898 tg3_netif_start(tp); 13899 } 13900 13901 tg3_full_unlock(tp); 13902 13903 if (irq_sync && !err2) 13904 tg3_phy_start(tp); 13905 } 13906 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13907 tg3_power_down_prepare(tp); 13908 13909 } 13910 13911 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13912 { 13913 struct tg3 *tp = netdev_priv(dev); 13914 struct hwtstamp_config stmpconf; 13915 13916 if (!tg3_flag(tp, PTP_CAPABLE)) 13917 return -EOPNOTSUPP; 13918 13919 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13920 return -EFAULT; 13921 13922 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13923 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13924 return -ERANGE; 13925 13926 switch (stmpconf.rx_filter) { 13927 case HWTSTAMP_FILTER_NONE: 13928 tp->rxptpctl = 0; 13929 break; 13930 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13931 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13932 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13933 break; 13934 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13935 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13936 TG3_RX_PTP_CTL_SYNC_EVNT; 13937 break; 13938 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13939 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13940 TG3_RX_PTP_CTL_DELAY_REQ; 13941 break; 13942 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13943 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13944 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13945 break; 13946 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13947 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13948 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13949 break; 13950 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13951 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13952 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13953 break; 13954 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13955 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13956 TG3_RX_PTP_CTL_SYNC_EVNT; 13957 break; 13958 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13959 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13960 TG3_RX_PTP_CTL_SYNC_EVNT; 13961 break; 13962 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13963 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13964 TG3_RX_PTP_CTL_SYNC_EVNT; 13965 break; 13966 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13967 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13968 TG3_RX_PTP_CTL_DELAY_REQ; 13969 break; 13970 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13971 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13972 TG3_RX_PTP_CTL_DELAY_REQ; 13973 break; 13974 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13975 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13976 TG3_RX_PTP_CTL_DELAY_REQ; 13977 break; 13978 default: 13979 return -ERANGE; 13980 } 13981 13982 if (netif_running(dev) && tp->rxptpctl) 13983 tw32(TG3_RX_PTP_CTL, 13984 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13985 13986 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13987 tg3_flag_set(tp, TX_TSTAMP_EN); 13988 else 13989 tg3_flag_clear(tp, TX_TSTAMP_EN); 13990 13991 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13992 -EFAULT : 0; 13993 } 13994 13995 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13996 { 13997 struct tg3 *tp = netdev_priv(dev); 13998 struct hwtstamp_config stmpconf; 13999 14000 if (!tg3_flag(tp, PTP_CAPABLE)) 14001 return -EOPNOTSUPP; 14002 14003 stmpconf.flags = 0; 14004 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 14005 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 14006 14007 switch (tp->rxptpctl) { 14008 case 0: 14009 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 14010 break; 14011 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 14012 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 14013 break; 14014 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 14015 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 14016 break; 14017 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 14018 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 14019 break; 14020 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 14021 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 14022 break; 14023 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 14024 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 14025 break; 14026 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 14027 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 14028 break; 14029 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 14030 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 14031 break; 14032 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 14033 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 14034 break; 14035 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 14036 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 14037 break; 14038 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 14039 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 14040 break; 14041 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 14042 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 14043 break; 14044 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 14045 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 14046 break; 14047 default: 14048 WARN_ON_ONCE(1); 14049 return -ERANGE; 14050 } 14051 14052 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 14053 -EFAULT : 0; 14054 } 14055 14056 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 14057 { 14058 struct mii_ioctl_data *data = if_mii(ifr); 14059 struct tg3 *tp = netdev_priv(dev); 14060 int err; 14061 14062 if (tg3_flag(tp, USE_PHYLIB)) { 14063 struct phy_device *phydev; 14064 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 14065 return -EAGAIN; 14066 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 14067 return phy_mii_ioctl(phydev, ifr, cmd); 14068 } 14069 14070 switch (cmd) { 14071 case SIOCGMIIPHY: 14072 data->phy_id = tp->phy_addr; 14073 14074 fallthrough; 14075 case SIOCGMIIREG: { 14076 u32 mii_regval; 14077 14078 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14079 break; /* We have no PHY */ 14080 14081 if (!netif_running(dev)) 14082 return -EAGAIN; 14083 14084 spin_lock_bh(&tp->lock); 14085 err = __tg3_readphy(tp, data->phy_id & 0x1f, 14086 data->reg_num & 0x1f, &mii_regval); 14087 spin_unlock_bh(&tp->lock); 14088 14089 data->val_out = mii_regval; 14090 14091 return err; 14092 } 14093 14094 case SIOCSMIIREG: 14095 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14096 break; /* We have no PHY */ 14097 14098 if (!netif_running(dev)) 14099 return -EAGAIN; 14100 14101 spin_lock_bh(&tp->lock); 14102 err = __tg3_writephy(tp, data->phy_id & 0x1f, 14103 data->reg_num & 0x1f, data->val_in); 14104 spin_unlock_bh(&tp->lock); 14105 14106 return err; 14107 14108 case SIOCSHWTSTAMP: 14109 return tg3_hwtstamp_set(dev, ifr); 14110 14111 case SIOCGHWTSTAMP: 14112 return tg3_hwtstamp_get(dev, ifr); 14113 14114 default: 14115 /* do nothing */ 14116 break; 14117 } 14118 return -EOPNOTSUPP; 14119 } 14120 14121 static int tg3_get_coalesce(struct net_device *dev, 14122 struct ethtool_coalesce *ec, 14123 struct kernel_ethtool_coalesce *kernel_coal, 14124 struct netlink_ext_ack *extack) 14125 { 14126 struct tg3 *tp = netdev_priv(dev); 14127 14128 memcpy(ec, &tp->coal, sizeof(*ec)); 14129 return 0; 14130 } 14131 14132 static int tg3_set_coalesce(struct net_device *dev, 14133 struct ethtool_coalesce *ec, 14134 struct kernel_ethtool_coalesce *kernel_coal, 14135 struct netlink_ext_ack *extack) 14136 { 14137 struct tg3 *tp = netdev_priv(dev); 14138 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 14139 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 14140 14141 if (!tg3_flag(tp, 5705_PLUS)) { 14142 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 14143 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 14144 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14145 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14146 } 14147 14148 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14149 (!ec->rx_coalesce_usecs) || 14150 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14151 (!ec->tx_coalesce_usecs) || 14152 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14153 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14154 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14155 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14156 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14157 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14158 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14159 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14160 return -EINVAL; 14161 14162 /* Only copy relevant parameters, ignore all others. */ 14163 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14164 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14165 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14166 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14167 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14168 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14169 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14170 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14171 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14172 14173 if (netif_running(dev)) { 14174 tg3_full_lock(tp, 0); 14175 __tg3_set_coalesce(tp, &tp->coal); 14176 tg3_full_unlock(tp); 14177 } 14178 return 0; 14179 } 14180 14181 static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata) 14182 { 14183 struct tg3 *tp = netdev_priv(dev); 14184 14185 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14186 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14187 return -EOPNOTSUPP; 14188 } 14189 14190 if (!linkmode_equal(edata->advertised, tp->eee.advertised)) { 14191 netdev_warn(tp->dev, 14192 "Direct manipulation of EEE advertisement is not supported\n"); 14193 return -EINVAL; 14194 } 14195 14196 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14197 netdev_warn(tp->dev, 14198 "Maximal Tx Lpi timer supported is %#x(u)\n", 14199 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14200 return -EINVAL; 14201 } 14202 14203 tp->eee = *edata; 14204 14205 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14206 tg3_warn_mgmt_link_flap(tp); 14207 14208 if (netif_running(tp->dev)) { 14209 tg3_full_lock(tp, 0); 14210 tg3_setup_eee(tp); 14211 tg3_phy_reset(tp); 14212 tg3_full_unlock(tp); 14213 } 14214 14215 return 0; 14216 } 14217 14218 static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata) 14219 { 14220 struct tg3 *tp = netdev_priv(dev); 14221 14222 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14223 netdev_warn(tp->dev, 14224 "Board does not support EEE!\n"); 14225 return -EOPNOTSUPP; 14226 } 14227 14228 *edata = tp->eee; 14229 return 0; 14230 } 14231 14232 static const struct ethtool_ops tg3_ethtool_ops = { 14233 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 14234 ETHTOOL_COALESCE_MAX_FRAMES | 14235 ETHTOOL_COALESCE_USECS_IRQ | 14236 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 14237 ETHTOOL_COALESCE_STATS_BLOCK_USECS, 14238 .get_drvinfo = tg3_get_drvinfo, 14239 .get_regs_len = tg3_get_regs_len, 14240 .get_regs = tg3_get_regs, 14241 .get_wol = tg3_get_wol, 14242 .set_wol = tg3_set_wol, 14243 .get_msglevel = tg3_get_msglevel, 14244 .set_msglevel = tg3_set_msglevel, 14245 .nway_reset = tg3_nway_reset, 14246 .get_link = ethtool_op_get_link, 14247 .get_eeprom_len = tg3_get_eeprom_len, 14248 .get_eeprom = tg3_get_eeprom, 14249 .set_eeprom = tg3_set_eeprom, 14250 .get_ringparam = tg3_get_ringparam, 14251 .set_ringparam = tg3_set_ringparam, 14252 .get_pauseparam = tg3_get_pauseparam, 14253 .set_pauseparam = tg3_set_pauseparam, 14254 .self_test = tg3_self_test, 14255 .get_strings = tg3_get_strings, 14256 .set_phys_id = tg3_set_phys_id, 14257 .get_ethtool_stats = tg3_get_ethtool_stats, 14258 .get_coalesce = tg3_get_coalesce, 14259 .set_coalesce = tg3_set_coalesce, 14260 .get_sset_count = tg3_get_sset_count, 14261 .get_rxnfc = tg3_get_rxnfc, 14262 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14263 .get_rxfh = tg3_get_rxfh, 14264 .set_rxfh = tg3_set_rxfh, 14265 .get_channels = tg3_get_channels, 14266 .set_channels = tg3_set_channels, 14267 .get_ts_info = tg3_get_ts_info, 14268 .get_eee = tg3_get_eee, 14269 .set_eee = tg3_set_eee, 14270 .get_link_ksettings = tg3_get_link_ksettings, 14271 .set_link_ksettings = tg3_set_link_ksettings, 14272 }; 14273 14274 static void tg3_get_stats64(struct net_device *dev, 14275 struct rtnl_link_stats64 *stats) 14276 { 14277 struct tg3 *tp = netdev_priv(dev); 14278 14279 spin_lock_bh(&tp->lock); 14280 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { 14281 *stats = tp->net_stats_prev; 14282 spin_unlock_bh(&tp->lock); 14283 return; 14284 } 14285 14286 tg3_get_nstats(tp, stats); 14287 spin_unlock_bh(&tp->lock); 14288 } 14289 14290 static void tg3_set_rx_mode(struct net_device *dev) 14291 { 14292 struct tg3 *tp = netdev_priv(dev); 14293 14294 if (!netif_running(dev)) 14295 return; 14296 14297 tg3_full_lock(tp, 0); 14298 __tg3_set_rx_mode(dev); 14299 tg3_full_unlock(tp); 14300 } 14301 14302 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14303 int new_mtu) 14304 { 14305 dev->mtu = new_mtu; 14306 14307 if (new_mtu > ETH_DATA_LEN) { 14308 if (tg3_flag(tp, 5780_CLASS)) { 14309 netdev_update_features(dev); 14310 tg3_flag_clear(tp, TSO_CAPABLE); 14311 } else { 14312 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14313 } 14314 } else { 14315 if (tg3_flag(tp, 5780_CLASS)) { 14316 tg3_flag_set(tp, TSO_CAPABLE); 14317 netdev_update_features(dev); 14318 } 14319 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14320 } 14321 } 14322 14323 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14324 { 14325 struct tg3 *tp = netdev_priv(dev); 14326 int err; 14327 bool reset_phy = false; 14328 14329 if (!netif_running(dev)) { 14330 /* We'll just catch it later when the 14331 * device is up'd. 14332 */ 14333 tg3_set_mtu(dev, tp, new_mtu); 14334 return 0; 14335 } 14336 14337 tg3_phy_stop(tp); 14338 14339 tg3_netif_stop(tp); 14340 14341 tg3_set_mtu(dev, tp, new_mtu); 14342 14343 tg3_full_lock(tp, 1); 14344 14345 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14346 14347 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14348 * breaks all requests to 256 bytes. 14349 */ 14350 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14351 tg3_asic_rev(tp) == ASIC_REV_5717 || 14352 tg3_asic_rev(tp) == ASIC_REV_5719 || 14353 tg3_asic_rev(tp) == ASIC_REV_5720) 14354 reset_phy = true; 14355 14356 err = tg3_restart_hw(tp, reset_phy); 14357 14358 if (!err) 14359 tg3_netif_start(tp); 14360 14361 tg3_full_unlock(tp); 14362 14363 if (!err) 14364 tg3_phy_start(tp); 14365 14366 return err; 14367 } 14368 14369 static const struct net_device_ops tg3_netdev_ops = { 14370 .ndo_open = tg3_open, 14371 .ndo_stop = tg3_close, 14372 .ndo_start_xmit = tg3_start_xmit, 14373 .ndo_get_stats64 = tg3_get_stats64, 14374 .ndo_validate_addr = eth_validate_addr, 14375 .ndo_set_rx_mode = tg3_set_rx_mode, 14376 .ndo_set_mac_address = tg3_set_mac_addr, 14377 .ndo_eth_ioctl = tg3_ioctl, 14378 .ndo_tx_timeout = tg3_tx_timeout, 14379 .ndo_change_mtu = tg3_change_mtu, 14380 .ndo_fix_features = tg3_fix_features, 14381 .ndo_set_features = tg3_set_features, 14382 #ifdef CONFIG_NET_POLL_CONTROLLER 14383 .ndo_poll_controller = tg3_poll_controller, 14384 #endif 14385 }; 14386 14387 static void tg3_get_eeprom_size(struct tg3 *tp) 14388 { 14389 u32 cursize, val, magic; 14390 14391 tp->nvram_size = EEPROM_CHIP_SIZE; 14392 14393 if (tg3_nvram_read(tp, 0, &magic) != 0) 14394 return; 14395 14396 if ((magic != TG3_EEPROM_MAGIC) && 14397 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14398 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14399 return; 14400 14401 /* 14402 * Size the chip by reading offsets at increasing powers of two. 14403 * When we encounter our validation signature, we know the addressing 14404 * has wrapped around, and thus have our chip size. 14405 */ 14406 cursize = 0x10; 14407 14408 while (cursize < tp->nvram_size) { 14409 if (tg3_nvram_read(tp, cursize, &val) != 0) 14410 return; 14411 14412 if (val == magic) 14413 break; 14414 14415 cursize <<= 1; 14416 } 14417 14418 tp->nvram_size = cursize; 14419 } 14420 14421 static void tg3_get_nvram_size(struct tg3 *tp) 14422 { 14423 u32 val; 14424 14425 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14426 return; 14427 14428 /* Selfboot format */ 14429 if (val != TG3_EEPROM_MAGIC) { 14430 tg3_get_eeprom_size(tp); 14431 return; 14432 } 14433 14434 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14435 if (val != 0) { 14436 /* This is confusing. We want to operate on the 14437 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14438 * call will read from NVRAM and byteswap the data 14439 * according to the byteswapping settings for all 14440 * other register accesses. This ensures the data we 14441 * want will always reside in the lower 16-bits. 14442 * However, the data in NVRAM is in LE format, which 14443 * means the data from the NVRAM read will always be 14444 * opposite the endianness of the CPU. The 16-bit 14445 * byteswap then brings the data to CPU endianness. 14446 */ 14447 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14448 return; 14449 } 14450 } 14451 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14452 } 14453 14454 static void tg3_get_nvram_info(struct tg3 *tp) 14455 { 14456 u32 nvcfg1; 14457 14458 nvcfg1 = tr32(NVRAM_CFG1); 14459 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14460 tg3_flag_set(tp, FLASH); 14461 } else { 14462 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14463 tw32(NVRAM_CFG1, nvcfg1); 14464 } 14465 14466 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14467 tg3_flag(tp, 5780_CLASS)) { 14468 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14469 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14470 tp->nvram_jedecnum = JEDEC_ATMEL; 14471 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14472 tg3_flag_set(tp, NVRAM_BUFFERED); 14473 break; 14474 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14475 tp->nvram_jedecnum = JEDEC_ATMEL; 14476 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14477 break; 14478 case FLASH_VENDOR_ATMEL_EEPROM: 14479 tp->nvram_jedecnum = JEDEC_ATMEL; 14480 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14481 tg3_flag_set(tp, NVRAM_BUFFERED); 14482 break; 14483 case FLASH_VENDOR_ST: 14484 tp->nvram_jedecnum = JEDEC_ST; 14485 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14486 tg3_flag_set(tp, NVRAM_BUFFERED); 14487 break; 14488 case FLASH_VENDOR_SAIFUN: 14489 tp->nvram_jedecnum = JEDEC_SAIFUN; 14490 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14491 break; 14492 case FLASH_VENDOR_SST_SMALL: 14493 case FLASH_VENDOR_SST_LARGE: 14494 tp->nvram_jedecnum = JEDEC_SST; 14495 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14496 break; 14497 } 14498 } else { 14499 tp->nvram_jedecnum = JEDEC_ATMEL; 14500 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14501 tg3_flag_set(tp, NVRAM_BUFFERED); 14502 } 14503 } 14504 14505 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14506 { 14507 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14508 case FLASH_5752PAGE_SIZE_256: 14509 tp->nvram_pagesize = 256; 14510 break; 14511 case FLASH_5752PAGE_SIZE_512: 14512 tp->nvram_pagesize = 512; 14513 break; 14514 case FLASH_5752PAGE_SIZE_1K: 14515 tp->nvram_pagesize = 1024; 14516 break; 14517 case FLASH_5752PAGE_SIZE_2K: 14518 tp->nvram_pagesize = 2048; 14519 break; 14520 case FLASH_5752PAGE_SIZE_4K: 14521 tp->nvram_pagesize = 4096; 14522 break; 14523 case FLASH_5752PAGE_SIZE_264: 14524 tp->nvram_pagesize = 264; 14525 break; 14526 case FLASH_5752PAGE_SIZE_528: 14527 tp->nvram_pagesize = 528; 14528 break; 14529 } 14530 } 14531 14532 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14533 { 14534 u32 nvcfg1; 14535 14536 nvcfg1 = tr32(NVRAM_CFG1); 14537 14538 /* NVRAM protection for TPM */ 14539 if (nvcfg1 & (1 << 27)) 14540 tg3_flag_set(tp, PROTECTED_NVRAM); 14541 14542 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14543 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14544 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14545 tp->nvram_jedecnum = JEDEC_ATMEL; 14546 tg3_flag_set(tp, NVRAM_BUFFERED); 14547 break; 14548 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14549 tp->nvram_jedecnum = JEDEC_ATMEL; 14550 tg3_flag_set(tp, NVRAM_BUFFERED); 14551 tg3_flag_set(tp, FLASH); 14552 break; 14553 case FLASH_5752VENDOR_ST_M45PE10: 14554 case FLASH_5752VENDOR_ST_M45PE20: 14555 case FLASH_5752VENDOR_ST_M45PE40: 14556 tp->nvram_jedecnum = JEDEC_ST; 14557 tg3_flag_set(tp, NVRAM_BUFFERED); 14558 tg3_flag_set(tp, FLASH); 14559 break; 14560 } 14561 14562 if (tg3_flag(tp, FLASH)) { 14563 tg3_nvram_get_pagesize(tp, nvcfg1); 14564 } else { 14565 /* For eeprom, set pagesize to maximum eeprom size */ 14566 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14567 14568 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14569 tw32(NVRAM_CFG1, nvcfg1); 14570 } 14571 } 14572 14573 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14574 { 14575 u32 nvcfg1, protect = 0; 14576 14577 nvcfg1 = tr32(NVRAM_CFG1); 14578 14579 /* NVRAM protection for TPM */ 14580 if (nvcfg1 & (1 << 27)) { 14581 tg3_flag_set(tp, PROTECTED_NVRAM); 14582 protect = 1; 14583 } 14584 14585 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14586 switch (nvcfg1) { 14587 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14588 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14589 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14590 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14591 tp->nvram_jedecnum = JEDEC_ATMEL; 14592 tg3_flag_set(tp, NVRAM_BUFFERED); 14593 tg3_flag_set(tp, FLASH); 14594 tp->nvram_pagesize = 264; 14595 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14596 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14597 tp->nvram_size = (protect ? 0x3e200 : 14598 TG3_NVRAM_SIZE_512KB); 14599 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14600 tp->nvram_size = (protect ? 0x1f200 : 14601 TG3_NVRAM_SIZE_256KB); 14602 else 14603 tp->nvram_size = (protect ? 0x1f200 : 14604 TG3_NVRAM_SIZE_128KB); 14605 break; 14606 case FLASH_5752VENDOR_ST_M45PE10: 14607 case FLASH_5752VENDOR_ST_M45PE20: 14608 case FLASH_5752VENDOR_ST_M45PE40: 14609 tp->nvram_jedecnum = JEDEC_ST; 14610 tg3_flag_set(tp, NVRAM_BUFFERED); 14611 tg3_flag_set(tp, FLASH); 14612 tp->nvram_pagesize = 256; 14613 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14614 tp->nvram_size = (protect ? 14615 TG3_NVRAM_SIZE_64KB : 14616 TG3_NVRAM_SIZE_128KB); 14617 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14618 tp->nvram_size = (protect ? 14619 TG3_NVRAM_SIZE_64KB : 14620 TG3_NVRAM_SIZE_256KB); 14621 else 14622 tp->nvram_size = (protect ? 14623 TG3_NVRAM_SIZE_128KB : 14624 TG3_NVRAM_SIZE_512KB); 14625 break; 14626 } 14627 } 14628 14629 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14630 { 14631 u32 nvcfg1; 14632 14633 nvcfg1 = tr32(NVRAM_CFG1); 14634 14635 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14636 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14637 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14638 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14639 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14640 tp->nvram_jedecnum = JEDEC_ATMEL; 14641 tg3_flag_set(tp, NVRAM_BUFFERED); 14642 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14643 14644 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14645 tw32(NVRAM_CFG1, nvcfg1); 14646 break; 14647 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14648 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14649 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14650 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14651 tp->nvram_jedecnum = JEDEC_ATMEL; 14652 tg3_flag_set(tp, NVRAM_BUFFERED); 14653 tg3_flag_set(tp, FLASH); 14654 tp->nvram_pagesize = 264; 14655 break; 14656 case FLASH_5752VENDOR_ST_M45PE10: 14657 case FLASH_5752VENDOR_ST_M45PE20: 14658 case FLASH_5752VENDOR_ST_M45PE40: 14659 tp->nvram_jedecnum = JEDEC_ST; 14660 tg3_flag_set(tp, NVRAM_BUFFERED); 14661 tg3_flag_set(tp, FLASH); 14662 tp->nvram_pagesize = 256; 14663 break; 14664 } 14665 } 14666 14667 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14668 { 14669 u32 nvcfg1, protect = 0; 14670 14671 nvcfg1 = tr32(NVRAM_CFG1); 14672 14673 /* NVRAM protection for TPM */ 14674 if (nvcfg1 & (1 << 27)) { 14675 tg3_flag_set(tp, PROTECTED_NVRAM); 14676 protect = 1; 14677 } 14678 14679 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14680 switch (nvcfg1) { 14681 case FLASH_5761VENDOR_ATMEL_ADB021D: 14682 case FLASH_5761VENDOR_ATMEL_ADB041D: 14683 case FLASH_5761VENDOR_ATMEL_ADB081D: 14684 case FLASH_5761VENDOR_ATMEL_ADB161D: 14685 case FLASH_5761VENDOR_ATMEL_MDB021D: 14686 case FLASH_5761VENDOR_ATMEL_MDB041D: 14687 case FLASH_5761VENDOR_ATMEL_MDB081D: 14688 case FLASH_5761VENDOR_ATMEL_MDB161D: 14689 tp->nvram_jedecnum = JEDEC_ATMEL; 14690 tg3_flag_set(tp, NVRAM_BUFFERED); 14691 tg3_flag_set(tp, FLASH); 14692 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14693 tp->nvram_pagesize = 256; 14694 break; 14695 case FLASH_5761VENDOR_ST_A_M45PE20: 14696 case FLASH_5761VENDOR_ST_A_M45PE40: 14697 case FLASH_5761VENDOR_ST_A_M45PE80: 14698 case FLASH_5761VENDOR_ST_A_M45PE16: 14699 case FLASH_5761VENDOR_ST_M_M45PE20: 14700 case FLASH_5761VENDOR_ST_M_M45PE40: 14701 case FLASH_5761VENDOR_ST_M_M45PE80: 14702 case FLASH_5761VENDOR_ST_M_M45PE16: 14703 tp->nvram_jedecnum = JEDEC_ST; 14704 tg3_flag_set(tp, NVRAM_BUFFERED); 14705 tg3_flag_set(tp, FLASH); 14706 tp->nvram_pagesize = 256; 14707 break; 14708 } 14709 14710 if (protect) { 14711 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14712 } else { 14713 switch (nvcfg1) { 14714 case FLASH_5761VENDOR_ATMEL_ADB161D: 14715 case FLASH_5761VENDOR_ATMEL_MDB161D: 14716 case FLASH_5761VENDOR_ST_A_M45PE16: 14717 case FLASH_5761VENDOR_ST_M_M45PE16: 14718 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14719 break; 14720 case FLASH_5761VENDOR_ATMEL_ADB081D: 14721 case FLASH_5761VENDOR_ATMEL_MDB081D: 14722 case FLASH_5761VENDOR_ST_A_M45PE80: 14723 case FLASH_5761VENDOR_ST_M_M45PE80: 14724 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14725 break; 14726 case FLASH_5761VENDOR_ATMEL_ADB041D: 14727 case FLASH_5761VENDOR_ATMEL_MDB041D: 14728 case FLASH_5761VENDOR_ST_A_M45PE40: 14729 case FLASH_5761VENDOR_ST_M_M45PE40: 14730 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14731 break; 14732 case FLASH_5761VENDOR_ATMEL_ADB021D: 14733 case FLASH_5761VENDOR_ATMEL_MDB021D: 14734 case FLASH_5761VENDOR_ST_A_M45PE20: 14735 case FLASH_5761VENDOR_ST_M_M45PE20: 14736 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14737 break; 14738 } 14739 } 14740 } 14741 14742 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14743 { 14744 tp->nvram_jedecnum = JEDEC_ATMEL; 14745 tg3_flag_set(tp, NVRAM_BUFFERED); 14746 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14747 } 14748 14749 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14750 { 14751 u32 nvcfg1; 14752 14753 nvcfg1 = tr32(NVRAM_CFG1); 14754 14755 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14756 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14757 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14758 tp->nvram_jedecnum = JEDEC_ATMEL; 14759 tg3_flag_set(tp, NVRAM_BUFFERED); 14760 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14761 14762 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14763 tw32(NVRAM_CFG1, nvcfg1); 14764 return; 14765 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14766 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14767 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14768 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14769 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14770 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14771 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14772 tp->nvram_jedecnum = JEDEC_ATMEL; 14773 tg3_flag_set(tp, NVRAM_BUFFERED); 14774 tg3_flag_set(tp, FLASH); 14775 14776 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14777 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14778 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14779 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14780 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14781 break; 14782 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14783 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14784 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14785 break; 14786 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14787 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14788 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14789 break; 14790 } 14791 break; 14792 case FLASH_5752VENDOR_ST_M45PE10: 14793 case FLASH_5752VENDOR_ST_M45PE20: 14794 case FLASH_5752VENDOR_ST_M45PE40: 14795 tp->nvram_jedecnum = JEDEC_ST; 14796 tg3_flag_set(tp, NVRAM_BUFFERED); 14797 tg3_flag_set(tp, FLASH); 14798 14799 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14800 case FLASH_5752VENDOR_ST_M45PE10: 14801 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14802 break; 14803 case FLASH_5752VENDOR_ST_M45PE20: 14804 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14805 break; 14806 case FLASH_5752VENDOR_ST_M45PE40: 14807 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14808 break; 14809 } 14810 break; 14811 default: 14812 tg3_flag_set(tp, NO_NVRAM); 14813 return; 14814 } 14815 14816 tg3_nvram_get_pagesize(tp, nvcfg1); 14817 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14818 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14819 } 14820 14821 14822 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14823 { 14824 u32 nvcfg1; 14825 14826 nvcfg1 = tr32(NVRAM_CFG1); 14827 14828 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14829 case FLASH_5717VENDOR_ATMEL_EEPROM: 14830 case FLASH_5717VENDOR_MICRO_EEPROM: 14831 tp->nvram_jedecnum = JEDEC_ATMEL; 14832 tg3_flag_set(tp, NVRAM_BUFFERED); 14833 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14834 14835 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14836 tw32(NVRAM_CFG1, nvcfg1); 14837 return; 14838 case FLASH_5717VENDOR_ATMEL_MDB011D: 14839 case FLASH_5717VENDOR_ATMEL_ADB011B: 14840 case FLASH_5717VENDOR_ATMEL_ADB011D: 14841 case FLASH_5717VENDOR_ATMEL_MDB021D: 14842 case FLASH_5717VENDOR_ATMEL_ADB021B: 14843 case FLASH_5717VENDOR_ATMEL_ADB021D: 14844 case FLASH_5717VENDOR_ATMEL_45USPT: 14845 tp->nvram_jedecnum = JEDEC_ATMEL; 14846 tg3_flag_set(tp, NVRAM_BUFFERED); 14847 tg3_flag_set(tp, FLASH); 14848 14849 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14850 case FLASH_5717VENDOR_ATMEL_MDB021D: 14851 /* Detect size with tg3_nvram_get_size() */ 14852 break; 14853 case FLASH_5717VENDOR_ATMEL_ADB021B: 14854 case FLASH_5717VENDOR_ATMEL_ADB021D: 14855 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14856 break; 14857 default: 14858 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14859 break; 14860 } 14861 break; 14862 case FLASH_5717VENDOR_ST_M_M25PE10: 14863 case FLASH_5717VENDOR_ST_A_M25PE10: 14864 case FLASH_5717VENDOR_ST_M_M45PE10: 14865 case FLASH_5717VENDOR_ST_A_M45PE10: 14866 case FLASH_5717VENDOR_ST_M_M25PE20: 14867 case FLASH_5717VENDOR_ST_A_M25PE20: 14868 case FLASH_5717VENDOR_ST_M_M45PE20: 14869 case FLASH_5717VENDOR_ST_A_M45PE20: 14870 case FLASH_5717VENDOR_ST_25USPT: 14871 case FLASH_5717VENDOR_ST_45USPT: 14872 tp->nvram_jedecnum = JEDEC_ST; 14873 tg3_flag_set(tp, NVRAM_BUFFERED); 14874 tg3_flag_set(tp, FLASH); 14875 14876 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14877 case FLASH_5717VENDOR_ST_M_M25PE20: 14878 case FLASH_5717VENDOR_ST_M_M45PE20: 14879 /* Detect size with tg3_nvram_get_size() */ 14880 break; 14881 case FLASH_5717VENDOR_ST_A_M25PE20: 14882 case FLASH_5717VENDOR_ST_A_M45PE20: 14883 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14884 break; 14885 default: 14886 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14887 break; 14888 } 14889 break; 14890 default: 14891 tg3_flag_set(tp, NO_NVRAM); 14892 return; 14893 } 14894 14895 tg3_nvram_get_pagesize(tp, nvcfg1); 14896 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14897 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14898 } 14899 14900 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14901 { 14902 u32 nvcfg1, nvmpinstrp, nv_status; 14903 14904 nvcfg1 = tr32(NVRAM_CFG1); 14905 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14906 14907 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14908 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14909 tg3_flag_set(tp, NO_NVRAM); 14910 return; 14911 } 14912 14913 switch (nvmpinstrp) { 14914 case FLASH_5762_MX25L_100: 14915 case FLASH_5762_MX25L_200: 14916 case FLASH_5762_MX25L_400: 14917 case FLASH_5762_MX25L_800: 14918 case FLASH_5762_MX25L_160_320: 14919 tp->nvram_pagesize = 4096; 14920 tp->nvram_jedecnum = JEDEC_MACRONIX; 14921 tg3_flag_set(tp, NVRAM_BUFFERED); 14922 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14923 tg3_flag_set(tp, FLASH); 14924 nv_status = tr32(NVRAM_AUTOSENSE_STATUS); 14925 tp->nvram_size = 14926 (1 << (nv_status >> AUTOSENSE_DEVID & 14927 AUTOSENSE_DEVID_MASK) 14928 << AUTOSENSE_SIZE_IN_MB); 14929 return; 14930 14931 case FLASH_5762_EEPROM_HD: 14932 nvmpinstrp = FLASH_5720_EEPROM_HD; 14933 break; 14934 case FLASH_5762_EEPROM_LD: 14935 nvmpinstrp = FLASH_5720_EEPROM_LD; 14936 break; 14937 case FLASH_5720VENDOR_M_ST_M45PE20: 14938 /* This pinstrap supports multiple sizes, so force it 14939 * to read the actual size from location 0xf0. 14940 */ 14941 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14942 break; 14943 } 14944 } 14945 14946 switch (nvmpinstrp) { 14947 case FLASH_5720_EEPROM_HD: 14948 case FLASH_5720_EEPROM_LD: 14949 tp->nvram_jedecnum = JEDEC_ATMEL; 14950 tg3_flag_set(tp, NVRAM_BUFFERED); 14951 14952 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14953 tw32(NVRAM_CFG1, nvcfg1); 14954 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14955 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14956 else 14957 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14958 return; 14959 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14960 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14961 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14962 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14963 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14964 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14965 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14966 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14967 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14968 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14969 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14970 case FLASH_5720VENDOR_ATMEL_45USPT: 14971 tp->nvram_jedecnum = JEDEC_ATMEL; 14972 tg3_flag_set(tp, NVRAM_BUFFERED); 14973 tg3_flag_set(tp, FLASH); 14974 14975 switch (nvmpinstrp) { 14976 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14977 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14978 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14979 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14980 break; 14981 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14982 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14983 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14984 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14985 break; 14986 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14987 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14988 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14989 break; 14990 default: 14991 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14992 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14993 break; 14994 } 14995 break; 14996 case FLASH_5720VENDOR_M_ST_M25PE10: 14997 case FLASH_5720VENDOR_M_ST_M45PE10: 14998 case FLASH_5720VENDOR_A_ST_M25PE10: 14999 case FLASH_5720VENDOR_A_ST_M45PE10: 15000 case FLASH_5720VENDOR_M_ST_M25PE20: 15001 case FLASH_5720VENDOR_M_ST_M45PE20: 15002 case FLASH_5720VENDOR_A_ST_M25PE20: 15003 case FLASH_5720VENDOR_A_ST_M45PE20: 15004 case FLASH_5720VENDOR_M_ST_M25PE40: 15005 case FLASH_5720VENDOR_M_ST_M45PE40: 15006 case FLASH_5720VENDOR_A_ST_M25PE40: 15007 case FLASH_5720VENDOR_A_ST_M45PE40: 15008 case FLASH_5720VENDOR_M_ST_M25PE80: 15009 case FLASH_5720VENDOR_M_ST_M45PE80: 15010 case FLASH_5720VENDOR_A_ST_M25PE80: 15011 case FLASH_5720VENDOR_A_ST_M45PE80: 15012 case FLASH_5720VENDOR_ST_25USPT: 15013 case FLASH_5720VENDOR_ST_45USPT: 15014 tp->nvram_jedecnum = JEDEC_ST; 15015 tg3_flag_set(tp, NVRAM_BUFFERED); 15016 tg3_flag_set(tp, FLASH); 15017 15018 switch (nvmpinstrp) { 15019 case FLASH_5720VENDOR_M_ST_M25PE20: 15020 case FLASH_5720VENDOR_M_ST_M45PE20: 15021 case FLASH_5720VENDOR_A_ST_M25PE20: 15022 case FLASH_5720VENDOR_A_ST_M45PE20: 15023 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 15024 break; 15025 case FLASH_5720VENDOR_M_ST_M25PE40: 15026 case FLASH_5720VENDOR_M_ST_M45PE40: 15027 case FLASH_5720VENDOR_A_ST_M25PE40: 15028 case FLASH_5720VENDOR_A_ST_M45PE40: 15029 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 15030 break; 15031 case FLASH_5720VENDOR_M_ST_M25PE80: 15032 case FLASH_5720VENDOR_M_ST_M45PE80: 15033 case FLASH_5720VENDOR_A_ST_M25PE80: 15034 case FLASH_5720VENDOR_A_ST_M45PE80: 15035 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 15036 break; 15037 default: 15038 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15039 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 15040 break; 15041 } 15042 break; 15043 default: 15044 tg3_flag_set(tp, NO_NVRAM); 15045 return; 15046 } 15047 15048 tg3_nvram_get_pagesize(tp, nvcfg1); 15049 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 15050 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 15051 15052 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 15053 u32 val; 15054 15055 if (tg3_nvram_read(tp, 0, &val)) 15056 return; 15057 15058 if (val != TG3_EEPROM_MAGIC && 15059 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 15060 tg3_flag_set(tp, NO_NVRAM); 15061 } 15062 } 15063 15064 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 15065 static void tg3_nvram_init(struct tg3 *tp) 15066 { 15067 if (tg3_flag(tp, IS_SSB_CORE)) { 15068 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 15069 tg3_flag_clear(tp, NVRAM); 15070 tg3_flag_clear(tp, NVRAM_BUFFERED); 15071 tg3_flag_set(tp, NO_NVRAM); 15072 return; 15073 } 15074 15075 tw32_f(GRC_EEPROM_ADDR, 15076 (EEPROM_ADDR_FSM_RESET | 15077 (EEPROM_DEFAULT_CLOCK_PERIOD << 15078 EEPROM_ADDR_CLKPERD_SHIFT))); 15079 15080 msleep(1); 15081 15082 /* Enable seeprom accesses. */ 15083 tw32_f(GRC_LOCAL_CTRL, 15084 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 15085 udelay(100); 15086 15087 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15088 tg3_asic_rev(tp) != ASIC_REV_5701) { 15089 tg3_flag_set(tp, NVRAM); 15090 15091 if (tg3_nvram_lock(tp)) { 15092 netdev_warn(tp->dev, 15093 "Cannot get nvram lock, %s failed\n", 15094 __func__); 15095 return; 15096 } 15097 tg3_enable_nvram_access(tp); 15098 15099 tp->nvram_size = 0; 15100 15101 if (tg3_asic_rev(tp) == ASIC_REV_5752) 15102 tg3_get_5752_nvram_info(tp); 15103 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 15104 tg3_get_5755_nvram_info(tp); 15105 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 15106 tg3_asic_rev(tp) == ASIC_REV_5784 || 15107 tg3_asic_rev(tp) == ASIC_REV_5785) 15108 tg3_get_5787_nvram_info(tp); 15109 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 15110 tg3_get_5761_nvram_info(tp); 15111 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 15112 tg3_get_5906_nvram_info(tp); 15113 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 15114 tg3_flag(tp, 57765_CLASS)) 15115 tg3_get_57780_nvram_info(tp); 15116 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15117 tg3_asic_rev(tp) == ASIC_REV_5719) 15118 tg3_get_5717_nvram_info(tp); 15119 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 15120 tg3_asic_rev(tp) == ASIC_REV_5762) 15121 tg3_get_5720_nvram_info(tp); 15122 else 15123 tg3_get_nvram_info(tp); 15124 15125 if (tp->nvram_size == 0) 15126 tg3_get_nvram_size(tp); 15127 15128 tg3_disable_nvram_access(tp); 15129 tg3_nvram_unlock(tp); 15130 15131 } else { 15132 tg3_flag_clear(tp, NVRAM); 15133 tg3_flag_clear(tp, NVRAM_BUFFERED); 15134 15135 tg3_get_eeprom_size(tp); 15136 } 15137 } 15138 15139 struct subsys_tbl_ent { 15140 u16 subsys_vendor, subsys_devid; 15141 u32 phy_id; 15142 }; 15143 15144 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 15145 /* Broadcom boards. */ 15146 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15147 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 15148 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15149 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 15150 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15151 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 15152 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15153 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 15154 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15155 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 15156 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15157 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15158 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15159 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15160 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15161 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15162 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15163 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15164 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15165 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15166 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15167 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15168 15169 /* 3com boards. */ 15170 { TG3PCI_SUBVENDOR_ID_3COM, 15171 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15172 { TG3PCI_SUBVENDOR_ID_3COM, 15173 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15174 { TG3PCI_SUBVENDOR_ID_3COM, 15175 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15176 { TG3PCI_SUBVENDOR_ID_3COM, 15177 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15178 { TG3PCI_SUBVENDOR_ID_3COM, 15179 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15180 15181 /* DELL boards. */ 15182 { TG3PCI_SUBVENDOR_ID_DELL, 15183 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15184 { TG3PCI_SUBVENDOR_ID_DELL, 15185 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15186 { TG3PCI_SUBVENDOR_ID_DELL, 15187 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15188 { TG3PCI_SUBVENDOR_ID_DELL, 15189 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15190 15191 /* Compaq boards. */ 15192 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15193 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15194 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15195 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15196 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15197 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15198 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15199 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15200 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15201 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15202 15203 /* IBM boards. */ 15204 { TG3PCI_SUBVENDOR_ID_IBM, 15205 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15206 }; 15207 15208 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15209 { 15210 int i; 15211 15212 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15213 if ((subsys_id_to_phy_id[i].subsys_vendor == 15214 tp->pdev->subsystem_vendor) && 15215 (subsys_id_to_phy_id[i].subsys_devid == 15216 tp->pdev->subsystem_device)) 15217 return &subsys_id_to_phy_id[i]; 15218 } 15219 return NULL; 15220 } 15221 15222 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15223 { 15224 u32 val; 15225 15226 tp->phy_id = TG3_PHY_ID_INVALID; 15227 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15228 15229 /* Assume an onboard device and WOL capable by default. */ 15230 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15231 tg3_flag_set(tp, WOL_CAP); 15232 15233 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15234 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15235 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15236 tg3_flag_set(tp, IS_NIC); 15237 } 15238 val = tr32(VCPU_CFGSHDW); 15239 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15240 tg3_flag_set(tp, ASPM_WORKAROUND); 15241 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15242 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15243 tg3_flag_set(tp, WOL_ENABLE); 15244 device_set_wakeup_enable(&tp->pdev->dev, true); 15245 } 15246 goto done; 15247 } 15248 15249 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15250 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15251 u32 nic_cfg, led_cfg; 15252 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15253 u32 nic_phy_id, ver, eeprom_phy_id; 15254 int eeprom_phy_serdes = 0; 15255 15256 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15257 tp->nic_sram_data_cfg = nic_cfg; 15258 15259 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15260 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15261 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15262 tg3_asic_rev(tp) != ASIC_REV_5701 && 15263 tg3_asic_rev(tp) != ASIC_REV_5703 && 15264 (ver > 0) && (ver < 0x100)) 15265 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15266 15267 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15268 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15269 15270 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15271 tg3_asic_rev(tp) == ASIC_REV_5719 || 15272 tg3_asic_rev(tp) == ASIC_REV_5720) 15273 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15274 15275 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15276 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15277 eeprom_phy_serdes = 1; 15278 15279 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15280 if (nic_phy_id != 0) { 15281 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15282 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15283 15284 eeprom_phy_id = (id1 >> 16) << 10; 15285 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15286 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15287 } else 15288 eeprom_phy_id = 0; 15289 15290 tp->phy_id = eeprom_phy_id; 15291 if (eeprom_phy_serdes) { 15292 if (!tg3_flag(tp, 5705_PLUS)) 15293 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15294 else 15295 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15296 } 15297 15298 if (tg3_flag(tp, 5750_PLUS)) 15299 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15300 SHASTA_EXT_LED_MODE_MASK); 15301 else 15302 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15303 15304 switch (led_cfg) { 15305 default: 15306 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15307 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15308 break; 15309 15310 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15311 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15312 break; 15313 15314 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15315 tp->led_ctrl = LED_CTRL_MODE_MAC; 15316 15317 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15318 * read on some older 5700/5701 bootcode. 15319 */ 15320 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15321 tg3_asic_rev(tp) == ASIC_REV_5701) 15322 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15323 15324 break; 15325 15326 case SHASTA_EXT_LED_SHARED: 15327 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15328 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15329 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15330 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15331 LED_CTRL_MODE_PHY_2); 15332 15333 if (tg3_flag(tp, 5717_PLUS) || 15334 tg3_asic_rev(tp) == ASIC_REV_5762) 15335 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15336 LED_CTRL_BLINK_RATE_MASK; 15337 15338 break; 15339 15340 case SHASTA_EXT_LED_MAC: 15341 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15342 break; 15343 15344 case SHASTA_EXT_LED_COMBO: 15345 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15346 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15347 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15348 LED_CTRL_MODE_PHY_2); 15349 break; 15350 15351 } 15352 15353 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15354 tg3_asic_rev(tp) == ASIC_REV_5701) && 15355 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15356 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15357 15358 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15359 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15360 15361 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15362 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15363 if ((tp->pdev->subsystem_vendor == 15364 PCI_VENDOR_ID_ARIMA) && 15365 (tp->pdev->subsystem_device == 0x205a || 15366 tp->pdev->subsystem_device == 0x2063)) 15367 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15368 } else { 15369 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15370 tg3_flag_set(tp, IS_NIC); 15371 } 15372 15373 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15374 tg3_flag_set(tp, ENABLE_ASF); 15375 if (tg3_flag(tp, 5750_PLUS)) 15376 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15377 } 15378 15379 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15380 tg3_flag(tp, 5750_PLUS)) 15381 tg3_flag_set(tp, ENABLE_APE); 15382 15383 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15384 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15385 tg3_flag_clear(tp, WOL_CAP); 15386 15387 if (tg3_flag(tp, WOL_CAP) && 15388 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15389 tg3_flag_set(tp, WOL_ENABLE); 15390 device_set_wakeup_enable(&tp->pdev->dev, true); 15391 } 15392 15393 if (cfg2 & (1 << 17)) 15394 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15395 15396 /* serdes signal pre-emphasis in register 0x590 set by */ 15397 /* bootcode if bit 18 is set */ 15398 if (cfg2 & (1 << 18)) 15399 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15400 15401 if ((tg3_flag(tp, 57765_PLUS) || 15402 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15403 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15404 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15405 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15406 15407 if (tg3_flag(tp, PCI_EXPRESS)) { 15408 u32 cfg3; 15409 15410 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15411 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15412 !tg3_flag(tp, 57765_PLUS) && 15413 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15414 tg3_flag_set(tp, ASPM_WORKAROUND); 15415 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15416 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15417 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15418 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15419 } 15420 15421 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15422 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15423 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15424 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15425 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15426 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15427 15428 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15429 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15430 } 15431 done: 15432 if (tg3_flag(tp, WOL_CAP)) 15433 device_set_wakeup_enable(&tp->pdev->dev, 15434 tg3_flag(tp, WOL_ENABLE)); 15435 else 15436 device_set_wakeup_capable(&tp->pdev->dev, false); 15437 } 15438 15439 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15440 { 15441 int i, err; 15442 u32 val2, off = offset * 8; 15443 15444 err = tg3_nvram_lock(tp); 15445 if (err) 15446 return err; 15447 15448 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15449 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15450 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15451 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15452 udelay(10); 15453 15454 for (i = 0; i < 100; i++) { 15455 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15456 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15457 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15458 break; 15459 } 15460 udelay(10); 15461 } 15462 15463 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15464 15465 tg3_nvram_unlock(tp); 15466 if (val2 & APE_OTP_STATUS_CMD_DONE) 15467 return 0; 15468 15469 return -EBUSY; 15470 } 15471 15472 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15473 { 15474 int i; 15475 u32 val; 15476 15477 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15478 tw32(OTP_CTRL, cmd); 15479 15480 /* Wait for up to 1 ms for command to execute. */ 15481 for (i = 0; i < 100; i++) { 15482 val = tr32(OTP_STATUS); 15483 if (val & OTP_STATUS_CMD_DONE) 15484 break; 15485 udelay(10); 15486 } 15487 15488 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15489 } 15490 15491 /* Read the gphy configuration from the OTP region of the chip. The gphy 15492 * configuration is a 32-bit value that straddles the alignment boundary. 15493 * We do two 32-bit reads and then shift and merge the results. 15494 */ 15495 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15496 { 15497 u32 bhalf_otp, thalf_otp; 15498 15499 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15500 15501 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15502 return 0; 15503 15504 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15505 15506 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15507 return 0; 15508 15509 thalf_otp = tr32(OTP_READ_DATA); 15510 15511 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15512 15513 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15514 return 0; 15515 15516 bhalf_otp = tr32(OTP_READ_DATA); 15517 15518 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15519 } 15520 15521 static void tg3_phy_init_link_config(struct tg3 *tp) 15522 { 15523 u32 adv = ADVERTISED_Autoneg; 15524 15525 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15526 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15527 adv |= ADVERTISED_1000baseT_Half; 15528 adv |= ADVERTISED_1000baseT_Full; 15529 } 15530 15531 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15532 adv |= ADVERTISED_100baseT_Half | 15533 ADVERTISED_100baseT_Full | 15534 ADVERTISED_10baseT_Half | 15535 ADVERTISED_10baseT_Full | 15536 ADVERTISED_TP; 15537 else 15538 adv |= ADVERTISED_FIBRE; 15539 15540 tp->link_config.advertising = adv; 15541 tp->link_config.speed = SPEED_UNKNOWN; 15542 tp->link_config.duplex = DUPLEX_UNKNOWN; 15543 tp->link_config.autoneg = AUTONEG_ENABLE; 15544 tp->link_config.active_speed = SPEED_UNKNOWN; 15545 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15546 15547 tp->old_link = -1; 15548 } 15549 15550 static int tg3_phy_probe(struct tg3 *tp) 15551 { 15552 u32 hw_phy_id_1, hw_phy_id_2; 15553 u32 hw_phy_id, hw_phy_id_masked; 15554 int err; 15555 15556 /* flow control autonegotiation is default behavior */ 15557 tg3_flag_set(tp, PAUSE_AUTONEG); 15558 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15559 15560 if (tg3_flag(tp, ENABLE_APE)) { 15561 switch (tp->pci_fn) { 15562 case 0: 15563 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15564 break; 15565 case 1: 15566 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15567 break; 15568 case 2: 15569 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15570 break; 15571 case 3: 15572 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15573 break; 15574 } 15575 } 15576 15577 if (!tg3_flag(tp, ENABLE_ASF) && 15578 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15579 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15580 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15581 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15582 15583 if (tg3_flag(tp, USE_PHYLIB)) 15584 return tg3_phy_init(tp); 15585 15586 /* Reading the PHY ID register can conflict with ASF 15587 * firmware access to the PHY hardware. 15588 */ 15589 err = 0; 15590 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15591 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15592 } else { 15593 /* Now read the physical PHY_ID from the chip and verify 15594 * that it is sane. If it doesn't look good, we fall back 15595 * to either the hard-coded table based PHY_ID and failing 15596 * that the value found in the eeprom area. 15597 */ 15598 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15599 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15600 15601 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15602 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15603 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15604 15605 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15606 } 15607 15608 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15609 tp->phy_id = hw_phy_id; 15610 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15611 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15612 else 15613 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15614 } else { 15615 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15616 /* Do nothing, phy ID already set up in 15617 * tg3_get_eeprom_hw_cfg(). 15618 */ 15619 } else { 15620 struct subsys_tbl_ent *p; 15621 15622 /* No eeprom signature? Try the hardcoded 15623 * subsys device table. 15624 */ 15625 p = tg3_lookup_by_subsys(tp); 15626 if (p) { 15627 tp->phy_id = p->phy_id; 15628 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15629 /* For now we saw the IDs 0xbc050cd0, 15630 * 0xbc050f80 and 0xbc050c30 on devices 15631 * connected to an BCM4785 and there are 15632 * probably more. Just assume that the phy is 15633 * supported when it is connected to a SSB core 15634 * for now. 15635 */ 15636 return -ENODEV; 15637 } 15638 15639 if (!tp->phy_id || 15640 tp->phy_id == TG3_PHY_ID_BCM8002) 15641 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15642 } 15643 } 15644 15645 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15646 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15647 tg3_asic_rev(tp) == ASIC_REV_5720 || 15648 tg3_asic_rev(tp) == ASIC_REV_57766 || 15649 tg3_asic_rev(tp) == ASIC_REV_5762 || 15650 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15651 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15652 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15653 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15654 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15655 15656 linkmode_zero(tp->eee.supported); 15657 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 15658 tp->eee.supported); 15659 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 15660 tp->eee.supported); 15661 linkmode_copy(tp->eee.advertised, tp->eee.supported); 15662 15663 tp->eee.eee_enabled = 1; 15664 tp->eee.tx_lpi_enabled = 1; 15665 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15666 } 15667 15668 tg3_phy_init_link_config(tp); 15669 15670 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15671 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15672 !tg3_flag(tp, ENABLE_APE) && 15673 !tg3_flag(tp, ENABLE_ASF)) { 15674 u32 bmsr, dummy; 15675 15676 tg3_readphy(tp, MII_BMSR, &bmsr); 15677 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15678 (bmsr & BMSR_LSTATUS)) 15679 goto skip_phy_reset; 15680 15681 err = tg3_phy_reset(tp); 15682 if (err) 15683 return err; 15684 15685 tg3_phy_set_wirespeed(tp); 15686 15687 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15688 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15689 tp->link_config.flowctrl); 15690 15691 tg3_writephy(tp, MII_BMCR, 15692 BMCR_ANENABLE | BMCR_ANRESTART); 15693 } 15694 } 15695 15696 skip_phy_reset: 15697 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15698 err = tg3_init_5401phy_dsp(tp); 15699 if (err) 15700 return err; 15701 15702 err = tg3_init_5401phy_dsp(tp); 15703 } 15704 15705 return err; 15706 } 15707 15708 static void tg3_read_vpd(struct tg3 *tp) 15709 { 15710 u8 *vpd_data; 15711 unsigned int len, vpdlen; 15712 int i; 15713 15714 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15715 if (!vpd_data) 15716 goto out_no_vpd; 15717 15718 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15719 PCI_VPD_RO_KEYWORD_MFR_ID, &len); 15720 if (i < 0) 15721 goto partno; 15722 15723 if (len != 4 || memcmp(vpd_data + i, "1028", 4)) 15724 goto partno; 15725 15726 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15727 PCI_VPD_RO_KEYWORD_VENDOR0, &len); 15728 if (i < 0) 15729 goto partno; 15730 15731 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15732 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i); 15733 15734 partno: 15735 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15736 PCI_VPD_RO_KEYWORD_PARTNO, &len); 15737 if (i < 0) 15738 goto out_not_found; 15739 15740 if (len > TG3_BPN_SIZE) 15741 goto out_not_found; 15742 15743 memcpy(tp->board_part_number, &vpd_data[i], len); 15744 15745 out_not_found: 15746 kfree(vpd_data); 15747 if (tp->board_part_number[0]) 15748 return; 15749 15750 out_no_vpd: 15751 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15752 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15753 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15754 strcpy(tp->board_part_number, "BCM5717"); 15755 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15756 strcpy(tp->board_part_number, "BCM5718"); 15757 else 15758 goto nomatch; 15759 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15760 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15761 strcpy(tp->board_part_number, "BCM57780"); 15762 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15763 strcpy(tp->board_part_number, "BCM57760"); 15764 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15765 strcpy(tp->board_part_number, "BCM57790"); 15766 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15767 strcpy(tp->board_part_number, "BCM57788"); 15768 else 15769 goto nomatch; 15770 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15771 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15772 strcpy(tp->board_part_number, "BCM57761"); 15773 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15774 strcpy(tp->board_part_number, "BCM57765"); 15775 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15776 strcpy(tp->board_part_number, "BCM57781"); 15777 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15778 strcpy(tp->board_part_number, "BCM57785"); 15779 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15780 strcpy(tp->board_part_number, "BCM57791"); 15781 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15782 strcpy(tp->board_part_number, "BCM57795"); 15783 else 15784 goto nomatch; 15785 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15786 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15787 strcpy(tp->board_part_number, "BCM57762"); 15788 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15789 strcpy(tp->board_part_number, "BCM57766"); 15790 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15791 strcpy(tp->board_part_number, "BCM57782"); 15792 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15793 strcpy(tp->board_part_number, "BCM57786"); 15794 else 15795 goto nomatch; 15796 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15797 strcpy(tp->board_part_number, "BCM95906"); 15798 } else { 15799 nomatch: 15800 strcpy(tp->board_part_number, "none"); 15801 } 15802 } 15803 15804 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15805 { 15806 u32 val; 15807 15808 if (tg3_nvram_read(tp, offset, &val) || 15809 (val & 0xfc000000) != 0x0c000000 || 15810 tg3_nvram_read(tp, offset + 4, &val) || 15811 val != 0) 15812 return 0; 15813 15814 return 1; 15815 } 15816 15817 static void tg3_read_bc_ver(struct tg3 *tp) 15818 { 15819 u32 val, offset, start, ver_offset; 15820 int i, dst_off; 15821 bool newver = false; 15822 15823 if (tg3_nvram_read(tp, 0xc, &offset) || 15824 tg3_nvram_read(tp, 0x4, &start)) 15825 return; 15826 15827 offset = tg3_nvram_logical_addr(tp, offset); 15828 15829 if (tg3_nvram_read(tp, offset, &val)) 15830 return; 15831 15832 if ((val & 0xfc000000) == 0x0c000000) { 15833 if (tg3_nvram_read(tp, offset + 4, &val)) 15834 return; 15835 15836 if (val == 0) 15837 newver = true; 15838 } 15839 15840 dst_off = strlen(tp->fw_ver); 15841 15842 if (newver) { 15843 if (TG3_VER_SIZE - dst_off < 16 || 15844 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15845 return; 15846 15847 offset = offset + ver_offset - start; 15848 for (i = 0; i < 16; i += 4) { 15849 __be32 v; 15850 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15851 return; 15852 15853 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15854 } 15855 } else { 15856 u32 major, minor; 15857 15858 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15859 return; 15860 15861 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15862 TG3_NVM_BCVER_MAJSFT; 15863 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15864 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15865 "v%d.%02d", major, minor); 15866 } 15867 } 15868 15869 static void tg3_read_hwsb_ver(struct tg3 *tp) 15870 { 15871 u32 val, major, minor; 15872 15873 /* Use native endian representation */ 15874 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15875 return; 15876 15877 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15878 TG3_NVM_HWSB_CFG1_MAJSFT; 15879 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15880 TG3_NVM_HWSB_CFG1_MINSFT; 15881 15882 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15883 } 15884 15885 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15886 { 15887 u32 offset, major, minor, build; 15888 15889 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15890 15891 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15892 return; 15893 15894 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15895 case TG3_EEPROM_SB_REVISION_0: 15896 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15897 break; 15898 case TG3_EEPROM_SB_REVISION_2: 15899 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15900 break; 15901 case TG3_EEPROM_SB_REVISION_3: 15902 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15903 break; 15904 case TG3_EEPROM_SB_REVISION_4: 15905 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15906 break; 15907 case TG3_EEPROM_SB_REVISION_5: 15908 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15909 break; 15910 case TG3_EEPROM_SB_REVISION_6: 15911 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15912 break; 15913 default: 15914 return; 15915 } 15916 15917 if (tg3_nvram_read(tp, offset, &val)) 15918 return; 15919 15920 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15921 TG3_EEPROM_SB_EDH_BLD_SHFT; 15922 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15923 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15924 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15925 15926 if (minor > 99 || build > 26) 15927 return; 15928 15929 offset = strlen(tp->fw_ver); 15930 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15931 " v%d.%02d", major, minor); 15932 15933 if (build > 0) { 15934 offset = strlen(tp->fw_ver); 15935 if (offset < TG3_VER_SIZE - 1) 15936 tp->fw_ver[offset] = 'a' + build - 1; 15937 } 15938 } 15939 15940 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15941 { 15942 u32 val, offset, start; 15943 int i, vlen; 15944 15945 for (offset = TG3_NVM_DIR_START; 15946 offset < TG3_NVM_DIR_END; 15947 offset += TG3_NVM_DIRENT_SIZE) { 15948 if (tg3_nvram_read(tp, offset, &val)) 15949 return; 15950 15951 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15952 break; 15953 } 15954 15955 if (offset == TG3_NVM_DIR_END) 15956 return; 15957 15958 if (!tg3_flag(tp, 5705_PLUS)) 15959 start = 0x08000000; 15960 else if (tg3_nvram_read(tp, offset - 4, &start)) 15961 return; 15962 15963 if (tg3_nvram_read(tp, offset + 4, &offset) || 15964 !tg3_fw_img_is_valid(tp, offset) || 15965 tg3_nvram_read(tp, offset + 8, &val)) 15966 return; 15967 15968 offset += val - start; 15969 15970 vlen = strlen(tp->fw_ver); 15971 15972 tp->fw_ver[vlen++] = ','; 15973 tp->fw_ver[vlen++] = ' '; 15974 15975 for (i = 0; i < 4; i++) { 15976 __be32 v; 15977 if (tg3_nvram_read_be32(tp, offset, &v)) 15978 return; 15979 15980 offset += sizeof(v); 15981 15982 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15983 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15984 break; 15985 } 15986 15987 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15988 vlen += sizeof(v); 15989 } 15990 } 15991 15992 static void tg3_probe_ncsi(struct tg3 *tp) 15993 { 15994 u32 apedata; 15995 15996 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15997 if (apedata != APE_SEG_SIG_MAGIC) 15998 return; 15999 16000 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 16001 if (!(apedata & APE_FW_STATUS_READY)) 16002 return; 16003 16004 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 16005 tg3_flag_set(tp, APE_HAS_NCSI); 16006 } 16007 16008 static void tg3_read_dash_ver(struct tg3 *tp) 16009 { 16010 int vlen; 16011 u32 apedata; 16012 char *fwtype; 16013 16014 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 16015 16016 if (tg3_flag(tp, APE_HAS_NCSI)) 16017 fwtype = "NCSI"; 16018 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 16019 fwtype = "SMASH"; 16020 else 16021 fwtype = "DASH"; 16022 16023 vlen = strlen(tp->fw_ver); 16024 16025 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 16026 fwtype, 16027 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 16028 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 16029 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 16030 (apedata & APE_FW_VERSION_BLDMSK)); 16031 } 16032 16033 static void tg3_read_otp_ver(struct tg3 *tp) 16034 { 16035 u32 val, val2; 16036 16037 if (tg3_asic_rev(tp) != ASIC_REV_5762) 16038 return; 16039 16040 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 16041 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 16042 TG3_OTP_MAGIC0_VALID(val)) { 16043 u64 val64 = (u64) val << 32 | val2; 16044 u32 ver = 0; 16045 int i, vlen; 16046 16047 for (i = 0; i < 7; i++) { 16048 if ((val64 & 0xff) == 0) 16049 break; 16050 ver = val64 & 0xff; 16051 val64 >>= 8; 16052 } 16053 vlen = strlen(tp->fw_ver); 16054 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 16055 } 16056 } 16057 16058 static void tg3_read_fw_ver(struct tg3 *tp) 16059 { 16060 u32 val; 16061 bool vpd_vers = false; 16062 16063 if (tp->fw_ver[0] != 0) 16064 vpd_vers = true; 16065 16066 if (tg3_flag(tp, NO_NVRAM)) { 16067 strcat(tp->fw_ver, "sb"); 16068 tg3_read_otp_ver(tp); 16069 return; 16070 } 16071 16072 if (tg3_nvram_read(tp, 0, &val)) 16073 return; 16074 16075 if (val == TG3_EEPROM_MAGIC) 16076 tg3_read_bc_ver(tp); 16077 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 16078 tg3_read_sb_ver(tp, val); 16079 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 16080 tg3_read_hwsb_ver(tp); 16081 16082 if (tg3_flag(tp, ENABLE_ASF)) { 16083 if (tg3_flag(tp, ENABLE_APE)) { 16084 tg3_probe_ncsi(tp); 16085 if (!vpd_vers) 16086 tg3_read_dash_ver(tp); 16087 } else if (!vpd_vers) { 16088 tg3_read_mgmtfw_ver(tp); 16089 } 16090 } 16091 16092 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 16093 } 16094 16095 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 16096 { 16097 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 16098 return TG3_RX_RET_MAX_SIZE_5717; 16099 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 16100 return TG3_RX_RET_MAX_SIZE_5700; 16101 else 16102 return TG3_RX_RET_MAX_SIZE_5705; 16103 } 16104 16105 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 16106 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 16107 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 16108 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 16109 { }, 16110 }; 16111 16112 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 16113 { 16114 struct pci_dev *peer; 16115 unsigned int func, devnr = tp->pdev->devfn & ~7; 16116 16117 for (func = 0; func < 8; func++) { 16118 peer = pci_get_slot(tp->pdev->bus, devnr | func); 16119 if (peer && peer != tp->pdev) 16120 break; 16121 pci_dev_put(peer); 16122 } 16123 /* 5704 can be configured in single-port mode, set peer to 16124 * tp->pdev in that case. 16125 */ 16126 if (!peer) { 16127 peer = tp->pdev; 16128 return peer; 16129 } 16130 16131 /* 16132 * We don't need to keep the refcount elevated; there's no way 16133 * to remove one half of this device without removing the other 16134 */ 16135 pci_dev_put(peer); 16136 16137 return peer; 16138 } 16139 16140 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16141 { 16142 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16143 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16144 u32 reg; 16145 16146 /* All devices that use the alternate 16147 * ASIC REV location have a CPMU. 16148 */ 16149 tg3_flag_set(tp, CPMU_PRESENT); 16150 16151 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16152 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16153 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16154 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16155 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16159 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16160 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16161 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16162 reg = TG3PCI_GEN2_PRODID_ASICREV; 16163 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16164 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16165 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16166 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16167 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16168 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16169 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16170 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16171 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16172 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16173 reg = TG3PCI_GEN15_PRODID_ASICREV; 16174 else 16175 reg = TG3PCI_PRODID_ASICREV; 16176 16177 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16178 } 16179 16180 /* Wrong chip ID in 5752 A0. This code can be removed later 16181 * as A0 is not in production. 16182 */ 16183 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16184 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16185 16186 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16187 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16188 16189 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16190 tg3_asic_rev(tp) == ASIC_REV_5719 || 16191 tg3_asic_rev(tp) == ASIC_REV_5720) 16192 tg3_flag_set(tp, 5717_PLUS); 16193 16194 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16195 tg3_asic_rev(tp) == ASIC_REV_57766) 16196 tg3_flag_set(tp, 57765_CLASS); 16197 16198 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16199 tg3_asic_rev(tp) == ASIC_REV_5762) 16200 tg3_flag_set(tp, 57765_PLUS); 16201 16202 /* Intentionally exclude ASIC_REV_5906 */ 16203 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16204 tg3_asic_rev(tp) == ASIC_REV_5787 || 16205 tg3_asic_rev(tp) == ASIC_REV_5784 || 16206 tg3_asic_rev(tp) == ASIC_REV_5761 || 16207 tg3_asic_rev(tp) == ASIC_REV_5785 || 16208 tg3_asic_rev(tp) == ASIC_REV_57780 || 16209 tg3_flag(tp, 57765_PLUS)) 16210 tg3_flag_set(tp, 5755_PLUS); 16211 16212 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16213 tg3_asic_rev(tp) == ASIC_REV_5714) 16214 tg3_flag_set(tp, 5780_CLASS); 16215 16216 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16217 tg3_asic_rev(tp) == ASIC_REV_5752 || 16218 tg3_asic_rev(tp) == ASIC_REV_5906 || 16219 tg3_flag(tp, 5755_PLUS) || 16220 tg3_flag(tp, 5780_CLASS)) 16221 tg3_flag_set(tp, 5750_PLUS); 16222 16223 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16224 tg3_flag(tp, 5750_PLUS)) 16225 tg3_flag_set(tp, 5705_PLUS); 16226 } 16227 16228 static bool tg3_10_100_only_device(struct tg3 *tp, 16229 const struct pci_device_id *ent) 16230 { 16231 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16232 16233 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16234 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16235 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16236 return true; 16237 16238 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16239 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16240 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16241 return true; 16242 } else { 16243 return true; 16244 } 16245 } 16246 16247 return false; 16248 } 16249 16250 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16251 { 16252 u32 misc_ctrl_reg; 16253 u32 pci_state_reg, grc_misc_cfg; 16254 u32 val; 16255 u16 pci_cmd; 16256 int err; 16257 16258 /* Force memory write invalidate off. If we leave it on, 16259 * then on 5700_BX chips we have to enable a workaround. 16260 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16261 * to match the cacheline size. The Broadcom driver have this 16262 * workaround but turns MWI off all the times so never uses 16263 * it. This seems to suggest that the workaround is insufficient. 16264 */ 16265 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16266 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16267 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16268 16269 /* Important! -- Make sure register accesses are byteswapped 16270 * correctly. Also, for those chips that require it, make 16271 * sure that indirect register accesses are enabled before 16272 * the first operation. 16273 */ 16274 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16275 &misc_ctrl_reg); 16276 tp->misc_host_ctrl |= (misc_ctrl_reg & 16277 MISC_HOST_CTRL_CHIPREV); 16278 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16279 tp->misc_host_ctrl); 16280 16281 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16282 16283 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16284 * we need to disable memory and use config. cycles 16285 * only to access all registers. The 5702/03 chips 16286 * can mistakenly decode the special cycles from the 16287 * ICH chipsets as memory write cycles, causing corruption 16288 * of register and memory space. Only certain ICH bridges 16289 * will drive special cycles with non-zero data during the 16290 * address phase which can fall within the 5703's address 16291 * range. This is not an ICH bug as the PCI spec allows 16292 * non-zero address during special cycles. However, only 16293 * these ICH bridges are known to drive non-zero addresses 16294 * during special cycles. 16295 * 16296 * Since special cycles do not cross PCI bridges, we only 16297 * enable this workaround if the 5703 is on the secondary 16298 * bus of these ICH bridges. 16299 */ 16300 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16301 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16302 static struct tg3_dev_id { 16303 u32 vendor; 16304 u32 device; 16305 u32 rev; 16306 } ich_chipsets[] = { 16307 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16308 PCI_ANY_ID }, 16309 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16310 PCI_ANY_ID }, 16311 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16312 0xa }, 16313 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16314 PCI_ANY_ID }, 16315 { }, 16316 }; 16317 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16318 struct pci_dev *bridge = NULL; 16319 16320 while (pci_id->vendor != 0) { 16321 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16322 bridge); 16323 if (!bridge) { 16324 pci_id++; 16325 continue; 16326 } 16327 if (pci_id->rev != PCI_ANY_ID) { 16328 if (bridge->revision > pci_id->rev) 16329 continue; 16330 } 16331 if (bridge->subordinate && 16332 (bridge->subordinate->number == 16333 tp->pdev->bus->number)) { 16334 tg3_flag_set(tp, ICH_WORKAROUND); 16335 pci_dev_put(bridge); 16336 break; 16337 } 16338 } 16339 } 16340 16341 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16342 static struct tg3_dev_id { 16343 u32 vendor; 16344 u32 device; 16345 } bridge_chipsets[] = { 16346 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16347 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16348 { }, 16349 }; 16350 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16351 struct pci_dev *bridge = NULL; 16352 16353 while (pci_id->vendor != 0) { 16354 bridge = pci_get_device(pci_id->vendor, 16355 pci_id->device, 16356 bridge); 16357 if (!bridge) { 16358 pci_id++; 16359 continue; 16360 } 16361 if (bridge->subordinate && 16362 (bridge->subordinate->number <= 16363 tp->pdev->bus->number) && 16364 (bridge->subordinate->busn_res.end >= 16365 tp->pdev->bus->number)) { 16366 tg3_flag_set(tp, 5701_DMA_BUG); 16367 pci_dev_put(bridge); 16368 break; 16369 } 16370 } 16371 } 16372 16373 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16374 * DMA addresses > 40-bit. This bridge may have other additional 16375 * 57xx devices behind it in some 4-port NIC designs for example. 16376 * Any tg3 device found behind the bridge will also need the 40-bit 16377 * DMA workaround. 16378 */ 16379 if (tg3_flag(tp, 5780_CLASS)) { 16380 tg3_flag_set(tp, 40BIT_DMA_BUG); 16381 tp->msi_cap = tp->pdev->msi_cap; 16382 } else { 16383 struct pci_dev *bridge = NULL; 16384 16385 do { 16386 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16387 PCI_DEVICE_ID_SERVERWORKS_EPB, 16388 bridge); 16389 if (bridge && bridge->subordinate && 16390 (bridge->subordinate->number <= 16391 tp->pdev->bus->number) && 16392 (bridge->subordinate->busn_res.end >= 16393 tp->pdev->bus->number)) { 16394 tg3_flag_set(tp, 40BIT_DMA_BUG); 16395 pci_dev_put(bridge); 16396 break; 16397 } 16398 } while (bridge); 16399 } 16400 16401 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16402 tg3_asic_rev(tp) == ASIC_REV_5714) 16403 tp->pdev_peer = tg3_find_peer(tp); 16404 16405 /* Determine TSO capabilities */ 16406 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16407 ; /* Do nothing. HW bug. */ 16408 else if (tg3_flag(tp, 57765_PLUS)) 16409 tg3_flag_set(tp, HW_TSO_3); 16410 else if (tg3_flag(tp, 5755_PLUS) || 16411 tg3_asic_rev(tp) == ASIC_REV_5906) 16412 tg3_flag_set(tp, HW_TSO_2); 16413 else if (tg3_flag(tp, 5750_PLUS)) { 16414 tg3_flag_set(tp, HW_TSO_1); 16415 tg3_flag_set(tp, TSO_BUG); 16416 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16417 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16418 tg3_flag_clear(tp, TSO_BUG); 16419 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16420 tg3_asic_rev(tp) != ASIC_REV_5701 && 16421 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16422 tg3_flag_set(tp, FW_TSO); 16423 tg3_flag_set(tp, TSO_BUG); 16424 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16425 tp->fw_needed = FIRMWARE_TG3TSO5; 16426 else 16427 tp->fw_needed = FIRMWARE_TG3TSO; 16428 } 16429 16430 /* Selectively allow TSO based on operating conditions */ 16431 if (tg3_flag(tp, HW_TSO_1) || 16432 tg3_flag(tp, HW_TSO_2) || 16433 tg3_flag(tp, HW_TSO_3) || 16434 tg3_flag(tp, FW_TSO)) { 16435 /* For firmware TSO, assume ASF is disabled. 16436 * We'll disable TSO later if we discover ASF 16437 * is enabled in tg3_get_eeprom_hw_cfg(). 16438 */ 16439 tg3_flag_set(tp, TSO_CAPABLE); 16440 } else { 16441 tg3_flag_clear(tp, TSO_CAPABLE); 16442 tg3_flag_clear(tp, TSO_BUG); 16443 tp->fw_needed = NULL; 16444 } 16445 16446 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16447 tp->fw_needed = FIRMWARE_TG3; 16448 16449 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16450 tp->fw_needed = FIRMWARE_TG357766; 16451 16452 tp->irq_max = 1; 16453 16454 if (tg3_flag(tp, 5750_PLUS)) { 16455 tg3_flag_set(tp, SUPPORT_MSI); 16456 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16457 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16458 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16459 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16460 tp->pdev_peer == tp->pdev)) 16461 tg3_flag_clear(tp, SUPPORT_MSI); 16462 16463 if (tg3_flag(tp, 5755_PLUS) || 16464 tg3_asic_rev(tp) == ASIC_REV_5906) { 16465 tg3_flag_set(tp, 1SHOT_MSI); 16466 } 16467 16468 if (tg3_flag(tp, 57765_PLUS)) { 16469 tg3_flag_set(tp, SUPPORT_MSIX); 16470 tp->irq_max = TG3_IRQ_MAX_VECS; 16471 } 16472 } 16473 16474 tp->txq_max = 1; 16475 tp->rxq_max = 1; 16476 if (tp->irq_max > 1) { 16477 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16478 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16479 16480 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16481 tg3_asic_rev(tp) == ASIC_REV_5720) 16482 tp->txq_max = tp->irq_max - 1; 16483 } 16484 16485 if (tg3_flag(tp, 5755_PLUS) || 16486 tg3_asic_rev(tp) == ASIC_REV_5906) 16487 tg3_flag_set(tp, SHORT_DMA_BUG); 16488 16489 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16490 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16491 16492 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16493 tg3_asic_rev(tp) == ASIC_REV_5719 || 16494 tg3_asic_rev(tp) == ASIC_REV_5720 || 16495 tg3_asic_rev(tp) == ASIC_REV_5762) 16496 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16497 16498 if (tg3_flag(tp, 57765_PLUS) && 16499 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16500 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16501 16502 if (!tg3_flag(tp, 5705_PLUS) || 16503 tg3_flag(tp, 5780_CLASS) || 16504 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16505 tg3_flag_set(tp, JUMBO_CAPABLE); 16506 16507 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16508 &pci_state_reg); 16509 16510 if (pci_is_pcie(tp->pdev)) { 16511 u16 lnkctl; 16512 16513 tg3_flag_set(tp, PCI_EXPRESS); 16514 16515 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16516 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16517 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16518 tg3_flag_clear(tp, HW_TSO_2); 16519 tg3_flag_clear(tp, TSO_CAPABLE); 16520 } 16521 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16522 tg3_asic_rev(tp) == ASIC_REV_5761 || 16523 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16524 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16525 tg3_flag_set(tp, CLKREQ_BUG); 16526 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16527 tg3_flag_set(tp, L1PLLPD_EN); 16528 } 16529 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16530 /* BCM5785 devices are effectively PCIe devices, and should 16531 * follow PCIe codepaths, but do not have a PCIe capabilities 16532 * section. 16533 */ 16534 tg3_flag_set(tp, PCI_EXPRESS); 16535 } else if (!tg3_flag(tp, 5705_PLUS) || 16536 tg3_flag(tp, 5780_CLASS)) { 16537 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16538 if (!tp->pcix_cap) { 16539 dev_err(&tp->pdev->dev, 16540 "Cannot find PCI-X capability, aborting\n"); 16541 return -EIO; 16542 } 16543 16544 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16545 tg3_flag_set(tp, PCIX_MODE); 16546 } 16547 16548 /* If we have an AMD 762 or VIA K8T800 chipset, write 16549 * reordering to the mailbox registers done by the host 16550 * controller can cause major troubles. We read back from 16551 * every mailbox register write to force the writes to be 16552 * posted to the chip in order. 16553 */ 16554 if (pci_dev_present(tg3_write_reorder_chipsets) && 16555 !tg3_flag(tp, PCI_EXPRESS)) 16556 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16557 16558 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16559 &tp->pci_cacheline_sz); 16560 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16561 &tp->pci_lat_timer); 16562 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16563 tp->pci_lat_timer < 64) { 16564 tp->pci_lat_timer = 64; 16565 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16566 tp->pci_lat_timer); 16567 } 16568 16569 /* Important! -- It is critical that the PCI-X hw workaround 16570 * situation is decided before the first MMIO register access. 16571 */ 16572 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16573 /* 5700 BX chips need to have their TX producer index 16574 * mailboxes written twice to workaround a bug. 16575 */ 16576 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16577 16578 /* If we are in PCI-X mode, enable register write workaround. 16579 * 16580 * The workaround is to use indirect register accesses 16581 * for all chip writes not to mailbox registers. 16582 */ 16583 if (tg3_flag(tp, PCIX_MODE)) { 16584 u32 pm_reg; 16585 16586 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16587 16588 /* The chip can have it's power management PCI config 16589 * space registers clobbered due to this bug. 16590 * So explicitly force the chip into D0 here. 16591 */ 16592 pci_read_config_dword(tp->pdev, 16593 tp->pdev->pm_cap + PCI_PM_CTRL, 16594 &pm_reg); 16595 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16596 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16597 pci_write_config_dword(tp->pdev, 16598 tp->pdev->pm_cap + PCI_PM_CTRL, 16599 pm_reg); 16600 16601 /* Also, force SERR#/PERR# in PCI command. */ 16602 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16603 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16604 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16605 } 16606 } 16607 16608 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16609 tg3_flag_set(tp, PCI_HIGH_SPEED); 16610 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16611 tg3_flag_set(tp, PCI_32BIT); 16612 16613 /* Chip-specific fixup from Broadcom driver */ 16614 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16615 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16616 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16617 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16618 } 16619 16620 /* Default fast path register access methods */ 16621 tp->read32 = tg3_read32; 16622 tp->write32 = tg3_write32; 16623 tp->read32_mbox = tg3_read32; 16624 tp->write32_mbox = tg3_write32; 16625 tp->write32_tx_mbox = tg3_write32; 16626 tp->write32_rx_mbox = tg3_write32; 16627 16628 /* Various workaround register access methods */ 16629 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16630 tp->write32 = tg3_write_indirect_reg32; 16631 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16632 (tg3_flag(tp, PCI_EXPRESS) && 16633 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16634 /* 16635 * Back to back register writes can cause problems on these 16636 * chips, the workaround is to read back all reg writes 16637 * except those to mailbox regs. 16638 * 16639 * See tg3_write_indirect_reg32(). 16640 */ 16641 tp->write32 = tg3_write_flush_reg32; 16642 } 16643 16644 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16645 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16646 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16647 tp->write32_rx_mbox = tg3_write_flush_reg32; 16648 } 16649 16650 if (tg3_flag(tp, ICH_WORKAROUND)) { 16651 tp->read32 = tg3_read_indirect_reg32; 16652 tp->write32 = tg3_write_indirect_reg32; 16653 tp->read32_mbox = tg3_read_indirect_mbox; 16654 tp->write32_mbox = tg3_write_indirect_mbox; 16655 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16656 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16657 16658 iounmap(tp->regs); 16659 tp->regs = NULL; 16660 16661 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16662 pci_cmd &= ~PCI_COMMAND_MEMORY; 16663 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16664 } 16665 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16666 tp->read32_mbox = tg3_read32_mbox_5906; 16667 tp->write32_mbox = tg3_write32_mbox_5906; 16668 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16669 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16670 } 16671 16672 if (tp->write32 == tg3_write_indirect_reg32 || 16673 (tg3_flag(tp, PCIX_MODE) && 16674 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16675 tg3_asic_rev(tp) == ASIC_REV_5701))) 16676 tg3_flag_set(tp, SRAM_USE_CONFIG); 16677 16678 /* The memory arbiter has to be enabled in order for SRAM accesses 16679 * to succeed. Normally on powerup the tg3 chip firmware will make 16680 * sure it is enabled, but other entities such as system netboot 16681 * code might disable it. 16682 */ 16683 val = tr32(MEMARB_MODE); 16684 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16685 16686 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16687 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16688 tg3_flag(tp, 5780_CLASS)) { 16689 if (tg3_flag(tp, PCIX_MODE)) { 16690 pci_read_config_dword(tp->pdev, 16691 tp->pcix_cap + PCI_X_STATUS, 16692 &val); 16693 tp->pci_fn = val & 0x7; 16694 } 16695 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16696 tg3_asic_rev(tp) == ASIC_REV_5719 || 16697 tg3_asic_rev(tp) == ASIC_REV_5720) { 16698 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16699 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16700 val = tr32(TG3_CPMU_STATUS); 16701 16702 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16703 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16704 else 16705 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16706 TG3_CPMU_STATUS_FSHFT_5719; 16707 } 16708 16709 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16710 tp->write32_tx_mbox = tg3_write_flush_reg32; 16711 tp->write32_rx_mbox = tg3_write_flush_reg32; 16712 } 16713 16714 /* Get eeprom hw config before calling tg3_set_power_state(). 16715 * In particular, the TG3_FLAG_IS_NIC flag must be 16716 * determined before calling tg3_set_power_state() so that 16717 * we know whether or not to switch out of Vaux power. 16718 * When the flag is set, it means that GPIO1 is used for eeprom 16719 * write protect and also implies that it is a LOM where GPIOs 16720 * are not used to switch power. 16721 */ 16722 tg3_get_eeprom_hw_cfg(tp); 16723 16724 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16725 tg3_flag_clear(tp, TSO_CAPABLE); 16726 tg3_flag_clear(tp, TSO_BUG); 16727 tp->fw_needed = NULL; 16728 } 16729 16730 if (tg3_flag(tp, ENABLE_APE)) { 16731 /* Allow reads and writes to the 16732 * APE register and memory space. 16733 */ 16734 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16735 PCISTATE_ALLOW_APE_SHMEM_WR | 16736 PCISTATE_ALLOW_APE_PSPACE_WR; 16737 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16738 pci_state_reg); 16739 16740 tg3_ape_lock_init(tp); 16741 tp->ape_hb_interval = 16742 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); 16743 } 16744 16745 /* Set up tp->grc_local_ctrl before calling 16746 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16747 * will bring 5700's external PHY out of reset. 16748 * It is also used as eeprom write protect on LOMs. 16749 */ 16750 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16751 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16752 tg3_flag(tp, EEPROM_WRITE_PROT)) 16753 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16754 GRC_LCLCTRL_GPIO_OUTPUT1); 16755 /* Unused GPIO3 must be driven as output on 5752 because there 16756 * are no pull-up resistors on unused GPIO pins. 16757 */ 16758 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16759 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16760 16761 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16762 tg3_asic_rev(tp) == ASIC_REV_57780 || 16763 tg3_flag(tp, 57765_CLASS)) 16764 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16765 16766 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16767 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16768 /* Turn off the debug UART. */ 16769 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16770 if (tg3_flag(tp, IS_NIC)) 16771 /* Keep VMain power. */ 16772 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16773 GRC_LCLCTRL_GPIO_OUTPUT0; 16774 } 16775 16776 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16777 tp->grc_local_ctrl |= 16778 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16779 16780 /* Switch out of Vaux if it is a NIC */ 16781 tg3_pwrsrc_switch_to_vmain(tp); 16782 16783 /* Derive initial jumbo mode from MTU assigned in 16784 * ether_setup() via the alloc_etherdev() call 16785 */ 16786 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16787 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16788 16789 /* Determine WakeOnLan speed to use. */ 16790 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16791 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16792 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16793 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16794 tg3_flag_clear(tp, WOL_SPEED_100MB); 16795 } else { 16796 tg3_flag_set(tp, WOL_SPEED_100MB); 16797 } 16798 16799 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16800 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16801 16802 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16803 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16804 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16805 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16806 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16807 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16808 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16809 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16810 16811 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16812 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16813 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16814 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16815 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16816 16817 if (tg3_flag(tp, 5705_PLUS) && 16818 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16819 tg3_asic_rev(tp) != ASIC_REV_5785 && 16820 tg3_asic_rev(tp) != ASIC_REV_57780 && 16821 !tg3_flag(tp, 57765_PLUS)) { 16822 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16823 tg3_asic_rev(tp) == ASIC_REV_5787 || 16824 tg3_asic_rev(tp) == ASIC_REV_5784 || 16825 tg3_asic_rev(tp) == ASIC_REV_5761) { 16826 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16827 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16828 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16829 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16830 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16831 } else 16832 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16833 } 16834 16835 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16836 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16837 tp->phy_otp = tg3_read_otp_phycfg(tp); 16838 if (tp->phy_otp == 0) 16839 tp->phy_otp = TG3_OTP_DEFAULT; 16840 } 16841 16842 if (tg3_flag(tp, CPMU_PRESENT)) 16843 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16844 else 16845 tp->mi_mode = MAC_MI_MODE_BASE; 16846 16847 tp->coalesce_mode = 0; 16848 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16849 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16850 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16851 16852 /* Set these bits to enable statistics workaround. */ 16853 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16854 tg3_asic_rev(tp) == ASIC_REV_5762 || 16855 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16856 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16857 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16858 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16859 } 16860 16861 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16862 tg3_asic_rev(tp) == ASIC_REV_57780) 16863 tg3_flag_set(tp, USE_PHYLIB); 16864 16865 err = tg3_mdio_init(tp); 16866 if (err) 16867 return err; 16868 16869 /* Initialize data/descriptor byte/word swapping. */ 16870 val = tr32(GRC_MODE); 16871 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16872 tg3_asic_rev(tp) == ASIC_REV_5762) 16873 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16874 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16875 GRC_MODE_B2HRX_ENABLE | 16876 GRC_MODE_HTX2B_ENABLE | 16877 GRC_MODE_HOST_STACKUP); 16878 else 16879 val &= GRC_MODE_HOST_STACKUP; 16880 16881 tw32(GRC_MODE, val | tp->grc_mode); 16882 16883 tg3_switch_clocks(tp); 16884 16885 /* Clear this out for sanity. */ 16886 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16887 16888 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16889 tw32(TG3PCI_REG_BASE_ADDR, 0); 16890 16891 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16892 &pci_state_reg); 16893 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16894 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16895 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16896 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16897 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16898 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16899 void __iomem *sram_base; 16900 16901 /* Write some dummy words into the SRAM status block 16902 * area, see if it reads back correctly. If the return 16903 * value is bad, force enable the PCIX workaround. 16904 */ 16905 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16906 16907 writel(0x00000000, sram_base); 16908 writel(0x00000000, sram_base + 4); 16909 writel(0xffffffff, sram_base + 4); 16910 if (readl(sram_base) != 0x00000000) 16911 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16912 } 16913 } 16914 16915 udelay(50); 16916 tg3_nvram_init(tp); 16917 16918 /* If the device has an NVRAM, no need to load patch firmware */ 16919 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16920 !tg3_flag(tp, NO_NVRAM)) 16921 tp->fw_needed = NULL; 16922 16923 grc_misc_cfg = tr32(GRC_MISC_CFG); 16924 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16925 16926 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16927 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16928 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16929 tg3_flag_set(tp, IS_5788); 16930 16931 if (!tg3_flag(tp, IS_5788) && 16932 tg3_asic_rev(tp) != ASIC_REV_5700) 16933 tg3_flag_set(tp, TAGGED_STATUS); 16934 if (tg3_flag(tp, TAGGED_STATUS)) { 16935 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16936 HOSTCC_MODE_CLRTICK_TXBD); 16937 16938 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16939 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16940 tp->misc_host_ctrl); 16941 } 16942 16943 /* Preserve the APE MAC_MODE bits */ 16944 if (tg3_flag(tp, ENABLE_APE)) 16945 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16946 else 16947 tp->mac_mode = 0; 16948 16949 if (tg3_10_100_only_device(tp, ent)) 16950 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16951 16952 err = tg3_phy_probe(tp); 16953 if (err) { 16954 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16955 /* ... but do not return immediately ... */ 16956 tg3_mdio_fini(tp); 16957 } 16958 16959 tg3_read_vpd(tp); 16960 tg3_read_fw_ver(tp); 16961 16962 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16963 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16964 } else { 16965 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16966 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16967 else 16968 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16969 } 16970 16971 /* 5700 {AX,BX} chips have a broken status block link 16972 * change bit implementation, so we must use the 16973 * status register in those cases. 16974 */ 16975 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16976 tg3_flag_set(tp, USE_LINKCHG_REG); 16977 else 16978 tg3_flag_clear(tp, USE_LINKCHG_REG); 16979 16980 /* The led_ctrl is set during tg3_phy_probe, here we might 16981 * have to force the link status polling mechanism based 16982 * upon subsystem IDs. 16983 */ 16984 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16985 tg3_asic_rev(tp) == ASIC_REV_5701 && 16986 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16987 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16988 tg3_flag_set(tp, USE_LINKCHG_REG); 16989 } 16990 16991 /* For all SERDES we poll the MAC status register. */ 16992 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16993 tg3_flag_set(tp, POLL_SERDES); 16994 else 16995 tg3_flag_clear(tp, POLL_SERDES); 16996 16997 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16998 tg3_flag_set(tp, POLL_CPMU_LINK); 16999 17000 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 17001 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 17002 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 17003 tg3_flag(tp, PCIX_MODE)) { 17004 tp->rx_offset = NET_SKB_PAD; 17005 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 17006 tp->rx_copy_thresh = ~(u16)0; 17007 #endif 17008 } 17009 17010 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 17011 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 17012 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 17013 17014 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 17015 17016 /* Increment the rx prod index on the rx std ring by at most 17017 * 8 for these chips to workaround hw errata. 17018 */ 17019 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 17020 tg3_asic_rev(tp) == ASIC_REV_5752 || 17021 tg3_asic_rev(tp) == ASIC_REV_5755) 17022 tp->rx_std_max_post = 8; 17023 17024 if (tg3_flag(tp, ASPM_WORKAROUND)) 17025 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 17026 PCIE_PWR_MGMT_L1_THRESH_MSK; 17027 17028 return err; 17029 } 17030 17031 static int tg3_get_device_address(struct tg3 *tp, u8 *addr) 17032 { 17033 u32 hi, lo, mac_offset; 17034 int addr_ok = 0; 17035 int err; 17036 17037 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) 17038 return 0; 17039 17040 if (tg3_flag(tp, IS_SSB_CORE)) { 17041 err = ssb_gige_get_macaddr(tp->pdev, addr); 17042 if (!err && is_valid_ether_addr(addr)) 17043 return 0; 17044 } 17045 17046 mac_offset = 0x7c; 17047 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 17048 tg3_flag(tp, 5780_CLASS)) { 17049 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 17050 mac_offset = 0xcc; 17051 if (tg3_nvram_lock(tp)) 17052 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 17053 else 17054 tg3_nvram_unlock(tp); 17055 } else if (tg3_flag(tp, 5717_PLUS)) { 17056 if (tp->pci_fn & 1) 17057 mac_offset = 0xcc; 17058 if (tp->pci_fn > 1) 17059 mac_offset += 0x18c; 17060 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 17061 mac_offset = 0x10; 17062 17063 /* First try to get it from MAC address mailbox. */ 17064 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 17065 if ((hi >> 16) == 0x484b) { 17066 addr[0] = (hi >> 8) & 0xff; 17067 addr[1] = (hi >> 0) & 0xff; 17068 17069 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 17070 addr[2] = (lo >> 24) & 0xff; 17071 addr[3] = (lo >> 16) & 0xff; 17072 addr[4] = (lo >> 8) & 0xff; 17073 addr[5] = (lo >> 0) & 0xff; 17074 17075 /* Some old bootcode may report a 0 MAC address in SRAM */ 17076 addr_ok = is_valid_ether_addr(addr); 17077 } 17078 if (!addr_ok) { 17079 /* Next, try NVRAM. */ 17080 if (!tg3_flag(tp, NO_NVRAM) && 17081 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 17082 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 17083 memcpy(&addr[0], ((char *)&hi) + 2, 2); 17084 memcpy(&addr[2], (char *)&lo, sizeof(lo)); 17085 } 17086 /* Finally just fetch it out of the MAC control regs. */ 17087 else { 17088 hi = tr32(MAC_ADDR_0_HIGH); 17089 lo = tr32(MAC_ADDR_0_LOW); 17090 17091 addr[5] = lo & 0xff; 17092 addr[4] = (lo >> 8) & 0xff; 17093 addr[3] = (lo >> 16) & 0xff; 17094 addr[2] = (lo >> 24) & 0xff; 17095 addr[1] = hi & 0xff; 17096 addr[0] = (hi >> 8) & 0xff; 17097 } 17098 } 17099 17100 if (!is_valid_ether_addr(addr)) 17101 return -EINVAL; 17102 return 0; 17103 } 17104 17105 #define BOUNDARY_SINGLE_CACHELINE 1 17106 #define BOUNDARY_MULTI_CACHELINE 2 17107 17108 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 17109 { 17110 int cacheline_size; 17111 u8 byte; 17112 int goal; 17113 17114 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 17115 if (byte == 0) 17116 cacheline_size = 1024; 17117 else 17118 cacheline_size = (int) byte * 4; 17119 17120 /* On 5703 and later chips, the boundary bits have no 17121 * effect. 17122 */ 17123 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17124 tg3_asic_rev(tp) != ASIC_REV_5701 && 17125 !tg3_flag(tp, PCI_EXPRESS)) 17126 goto out; 17127 17128 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC) 17129 goal = BOUNDARY_MULTI_CACHELINE; 17130 #else 17131 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17132 goal = BOUNDARY_SINGLE_CACHELINE; 17133 #else 17134 goal = 0; 17135 #endif 17136 #endif 17137 17138 if (tg3_flag(tp, 57765_PLUS)) { 17139 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17140 goto out; 17141 } 17142 17143 if (!goal) 17144 goto out; 17145 17146 /* PCI controllers on most RISC systems tend to disconnect 17147 * when a device tries to burst across a cache-line boundary. 17148 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17149 * 17150 * Unfortunately, for PCI-E there are only limited 17151 * write-side controls for this, and thus for reads 17152 * we will still get the disconnects. We'll also waste 17153 * these PCI cycles for both read and write for chips 17154 * other than 5700 and 5701 which do not implement the 17155 * boundary bits. 17156 */ 17157 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17158 switch (cacheline_size) { 17159 case 16: 17160 case 32: 17161 case 64: 17162 case 128: 17163 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17164 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17165 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17166 } else { 17167 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17168 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17169 } 17170 break; 17171 17172 case 256: 17173 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17174 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17175 break; 17176 17177 default: 17178 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17179 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17180 break; 17181 } 17182 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17183 switch (cacheline_size) { 17184 case 16: 17185 case 32: 17186 case 64: 17187 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17188 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17189 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17190 break; 17191 } 17192 fallthrough; 17193 case 128: 17194 default: 17195 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17196 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17197 break; 17198 } 17199 } else { 17200 switch (cacheline_size) { 17201 case 16: 17202 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17203 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17204 DMA_RWCTRL_WRITE_BNDRY_16); 17205 break; 17206 } 17207 fallthrough; 17208 case 32: 17209 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17210 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17211 DMA_RWCTRL_WRITE_BNDRY_32); 17212 break; 17213 } 17214 fallthrough; 17215 case 64: 17216 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17217 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17218 DMA_RWCTRL_WRITE_BNDRY_64); 17219 break; 17220 } 17221 fallthrough; 17222 case 128: 17223 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17224 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17225 DMA_RWCTRL_WRITE_BNDRY_128); 17226 break; 17227 } 17228 fallthrough; 17229 case 256: 17230 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17231 DMA_RWCTRL_WRITE_BNDRY_256); 17232 break; 17233 case 512: 17234 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17235 DMA_RWCTRL_WRITE_BNDRY_512); 17236 break; 17237 case 1024: 17238 default: 17239 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17240 DMA_RWCTRL_WRITE_BNDRY_1024); 17241 break; 17242 } 17243 } 17244 17245 out: 17246 return val; 17247 } 17248 17249 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17250 int size, bool to_device) 17251 { 17252 struct tg3_internal_buffer_desc test_desc; 17253 u32 sram_dma_descs; 17254 int i, ret; 17255 17256 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17257 17258 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17259 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17260 tw32(RDMAC_STATUS, 0); 17261 tw32(WDMAC_STATUS, 0); 17262 17263 tw32(BUFMGR_MODE, 0); 17264 tw32(FTQ_RESET, 0); 17265 17266 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17267 test_desc.addr_lo = buf_dma & 0xffffffff; 17268 test_desc.nic_mbuf = 0x00002100; 17269 test_desc.len = size; 17270 17271 /* 17272 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17273 * the *second* time the tg3 driver was getting loaded after an 17274 * initial scan. 17275 * 17276 * Broadcom tells me: 17277 * ...the DMA engine is connected to the GRC block and a DMA 17278 * reset may affect the GRC block in some unpredictable way... 17279 * The behavior of resets to individual blocks has not been tested. 17280 * 17281 * Broadcom noted the GRC reset will also reset all sub-components. 17282 */ 17283 if (to_device) { 17284 test_desc.cqid_sqid = (13 << 8) | 2; 17285 17286 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17287 udelay(40); 17288 } else { 17289 test_desc.cqid_sqid = (16 << 8) | 7; 17290 17291 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17292 udelay(40); 17293 } 17294 test_desc.flags = 0x00000005; 17295 17296 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17297 u32 val; 17298 17299 val = *(((u32 *)&test_desc) + i); 17300 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17301 sram_dma_descs + (i * sizeof(u32))); 17302 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17303 } 17304 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17305 17306 if (to_device) 17307 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17308 else 17309 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17310 17311 ret = -ENODEV; 17312 for (i = 0; i < 40; i++) { 17313 u32 val; 17314 17315 if (to_device) 17316 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17317 else 17318 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17319 if ((val & 0xffff) == sram_dma_descs) { 17320 ret = 0; 17321 break; 17322 } 17323 17324 udelay(100); 17325 } 17326 17327 return ret; 17328 } 17329 17330 #define TEST_BUFFER_SIZE 0x2000 17331 17332 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17333 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17334 { }, 17335 }; 17336 17337 static int tg3_test_dma(struct tg3 *tp) 17338 { 17339 dma_addr_t buf_dma; 17340 u32 *buf, saved_dma_rwctrl; 17341 int ret = 0; 17342 17343 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17344 &buf_dma, GFP_KERNEL); 17345 if (!buf) { 17346 ret = -ENOMEM; 17347 goto out_nofree; 17348 } 17349 17350 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17351 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17352 17353 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17354 17355 if (tg3_flag(tp, 57765_PLUS)) 17356 goto out; 17357 17358 if (tg3_flag(tp, PCI_EXPRESS)) { 17359 /* DMA read watermark not used on PCIE */ 17360 tp->dma_rwctrl |= 0x00180000; 17361 } else if (!tg3_flag(tp, PCIX_MODE)) { 17362 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17363 tg3_asic_rev(tp) == ASIC_REV_5750) 17364 tp->dma_rwctrl |= 0x003f0000; 17365 else 17366 tp->dma_rwctrl |= 0x003f000f; 17367 } else { 17368 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17369 tg3_asic_rev(tp) == ASIC_REV_5704) { 17370 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17371 u32 read_water = 0x7; 17372 17373 /* If the 5704 is behind the EPB bridge, we can 17374 * do the less restrictive ONE_DMA workaround for 17375 * better performance. 17376 */ 17377 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17378 tg3_asic_rev(tp) == ASIC_REV_5704) 17379 tp->dma_rwctrl |= 0x8000; 17380 else if (ccval == 0x6 || ccval == 0x7) 17381 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17382 17383 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17384 read_water = 4; 17385 /* Set bit 23 to enable PCIX hw bug fix */ 17386 tp->dma_rwctrl |= 17387 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17388 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17389 (1 << 23); 17390 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17391 /* 5780 always in PCIX mode */ 17392 tp->dma_rwctrl |= 0x00144000; 17393 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17394 /* 5714 always in PCIX mode */ 17395 tp->dma_rwctrl |= 0x00148000; 17396 } else { 17397 tp->dma_rwctrl |= 0x001b000f; 17398 } 17399 } 17400 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17401 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17402 17403 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17404 tg3_asic_rev(tp) == ASIC_REV_5704) 17405 tp->dma_rwctrl &= 0xfffffff0; 17406 17407 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17408 tg3_asic_rev(tp) == ASIC_REV_5701) { 17409 /* Remove this if it causes problems for some boards. */ 17410 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17411 17412 /* On 5700/5701 chips, we need to set this bit. 17413 * Otherwise the chip will issue cacheline transactions 17414 * to streamable DMA memory with not all the byte 17415 * enables turned on. This is an error on several 17416 * RISC PCI controllers, in particular sparc64. 17417 * 17418 * On 5703/5704 chips, this bit has been reassigned 17419 * a different meaning. In particular, it is used 17420 * on those chips to enable a PCI-X workaround. 17421 */ 17422 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17423 } 17424 17425 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17426 17427 17428 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17429 tg3_asic_rev(tp) != ASIC_REV_5701) 17430 goto out; 17431 17432 /* It is best to perform DMA test with maximum write burst size 17433 * to expose the 5700/5701 write DMA bug. 17434 */ 17435 saved_dma_rwctrl = tp->dma_rwctrl; 17436 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17437 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17438 17439 while (1) { 17440 u32 *p = buf, i; 17441 17442 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17443 p[i] = i; 17444 17445 /* Send the buffer to the chip. */ 17446 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17447 if (ret) { 17448 dev_err(&tp->pdev->dev, 17449 "%s: Buffer write failed. err = %d\n", 17450 __func__, ret); 17451 break; 17452 } 17453 17454 /* Now read it back. */ 17455 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17456 if (ret) { 17457 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17458 "err = %d\n", __func__, ret); 17459 break; 17460 } 17461 17462 /* Verify it. */ 17463 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17464 if (p[i] == i) 17465 continue; 17466 17467 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17468 DMA_RWCTRL_WRITE_BNDRY_16) { 17469 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17470 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17471 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17472 break; 17473 } else { 17474 dev_err(&tp->pdev->dev, 17475 "%s: Buffer corrupted on read back! " 17476 "(%d != %d)\n", __func__, p[i], i); 17477 ret = -ENODEV; 17478 goto out; 17479 } 17480 } 17481 17482 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17483 /* Success. */ 17484 ret = 0; 17485 break; 17486 } 17487 } 17488 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17489 DMA_RWCTRL_WRITE_BNDRY_16) { 17490 /* DMA test passed without adjusting DMA boundary, 17491 * now look for chipsets that are known to expose the 17492 * DMA bug without failing the test. 17493 */ 17494 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17495 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17496 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17497 } else { 17498 /* Safe to use the calculated DMA boundary. */ 17499 tp->dma_rwctrl = saved_dma_rwctrl; 17500 } 17501 17502 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17503 } 17504 17505 out: 17506 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17507 out_nofree: 17508 return ret; 17509 } 17510 17511 static void tg3_init_bufmgr_config(struct tg3 *tp) 17512 { 17513 if (tg3_flag(tp, 57765_PLUS)) { 17514 tp->bufmgr_config.mbuf_read_dma_low_water = 17515 DEFAULT_MB_RDMA_LOW_WATER_5705; 17516 tp->bufmgr_config.mbuf_mac_rx_low_water = 17517 DEFAULT_MB_MACRX_LOW_WATER_57765; 17518 tp->bufmgr_config.mbuf_high_water = 17519 DEFAULT_MB_HIGH_WATER_57765; 17520 17521 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17522 DEFAULT_MB_RDMA_LOW_WATER_5705; 17523 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17524 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17525 tp->bufmgr_config.mbuf_high_water_jumbo = 17526 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17527 } else if (tg3_flag(tp, 5705_PLUS)) { 17528 tp->bufmgr_config.mbuf_read_dma_low_water = 17529 DEFAULT_MB_RDMA_LOW_WATER_5705; 17530 tp->bufmgr_config.mbuf_mac_rx_low_water = 17531 DEFAULT_MB_MACRX_LOW_WATER_5705; 17532 tp->bufmgr_config.mbuf_high_water = 17533 DEFAULT_MB_HIGH_WATER_5705; 17534 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17535 tp->bufmgr_config.mbuf_mac_rx_low_water = 17536 DEFAULT_MB_MACRX_LOW_WATER_5906; 17537 tp->bufmgr_config.mbuf_high_water = 17538 DEFAULT_MB_HIGH_WATER_5906; 17539 } 17540 17541 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17542 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17543 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17544 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17545 tp->bufmgr_config.mbuf_high_water_jumbo = 17546 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17547 } else { 17548 tp->bufmgr_config.mbuf_read_dma_low_water = 17549 DEFAULT_MB_RDMA_LOW_WATER; 17550 tp->bufmgr_config.mbuf_mac_rx_low_water = 17551 DEFAULT_MB_MACRX_LOW_WATER; 17552 tp->bufmgr_config.mbuf_high_water = 17553 DEFAULT_MB_HIGH_WATER; 17554 17555 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17556 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17557 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17558 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17559 tp->bufmgr_config.mbuf_high_water_jumbo = 17560 DEFAULT_MB_HIGH_WATER_JUMBO; 17561 } 17562 17563 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17564 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17565 } 17566 17567 static char *tg3_phy_string(struct tg3 *tp) 17568 { 17569 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17570 case TG3_PHY_ID_BCM5400: return "5400"; 17571 case TG3_PHY_ID_BCM5401: return "5401"; 17572 case TG3_PHY_ID_BCM5411: return "5411"; 17573 case TG3_PHY_ID_BCM5701: return "5701"; 17574 case TG3_PHY_ID_BCM5703: return "5703"; 17575 case TG3_PHY_ID_BCM5704: return "5704"; 17576 case TG3_PHY_ID_BCM5705: return "5705"; 17577 case TG3_PHY_ID_BCM5750: return "5750"; 17578 case TG3_PHY_ID_BCM5752: return "5752"; 17579 case TG3_PHY_ID_BCM5714: return "5714"; 17580 case TG3_PHY_ID_BCM5780: return "5780"; 17581 case TG3_PHY_ID_BCM5755: return "5755"; 17582 case TG3_PHY_ID_BCM5787: return "5787"; 17583 case TG3_PHY_ID_BCM5784: return "5784"; 17584 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17585 case TG3_PHY_ID_BCM5906: return "5906"; 17586 case TG3_PHY_ID_BCM5761: return "5761"; 17587 case TG3_PHY_ID_BCM5718C: return "5718C"; 17588 case TG3_PHY_ID_BCM5718S: return "5718S"; 17589 case TG3_PHY_ID_BCM57765: return "57765"; 17590 case TG3_PHY_ID_BCM5719C: return "5719C"; 17591 case TG3_PHY_ID_BCM5720C: return "5720C"; 17592 case TG3_PHY_ID_BCM5762: return "5762C"; 17593 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17594 case 0: return "serdes"; 17595 default: return "unknown"; 17596 } 17597 } 17598 17599 static char *tg3_bus_string(struct tg3 *tp, char *str) 17600 { 17601 if (tg3_flag(tp, PCI_EXPRESS)) { 17602 strcpy(str, "PCI Express"); 17603 return str; 17604 } else if (tg3_flag(tp, PCIX_MODE)) { 17605 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17606 17607 strcpy(str, "PCIX:"); 17608 17609 if ((clock_ctrl == 7) || 17610 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17611 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17612 strcat(str, "133MHz"); 17613 else if (clock_ctrl == 0) 17614 strcat(str, "33MHz"); 17615 else if (clock_ctrl == 2) 17616 strcat(str, "50MHz"); 17617 else if (clock_ctrl == 4) 17618 strcat(str, "66MHz"); 17619 else if (clock_ctrl == 6) 17620 strcat(str, "100MHz"); 17621 } else { 17622 strcpy(str, "PCI:"); 17623 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17624 strcat(str, "66MHz"); 17625 else 17626 strcat(str, "33MHz"); 17627 } 17628 if (tg3_flag(tp, PCI_32BIT)) 17629 strcat(str, ":32-bit"); 17630 else 17631 strcat(str, ":64-bit"); 17632 return str; 17633 } 17634 17635 static void tg3_init_coal(struct tg3 *tp) 17636 { 17637 struct ethtool_coalesce *ec = &tp->coal; 17638 17639 memset(ec, 0, sizeof(*ec)); 17640 ec->cmd = ETHTOOL_GCOALESCE; 17641 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17642 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17643 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17644 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17645 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17646 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17647 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17648 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17649 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17650 17651 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17652 HOSTCC_MODE_CLRTICK_TXBD)) { 17653 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17654 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17655 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17656 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17657 } 17658 17659 if (tg3_flag(tp, 5705_PLUS)) { 17660 ec->rx_coalesce_usecs_irq = 0; 17661 ec->tx_coalesce_usecs_irq = 0; 17662 ec->stats_block_coalesce_usecs = 0; 17663 } 17664 } 17665 17666 static int tg3_init_one(struct pci_dev *pdev, 17667 const struct pci_device_id *ent) 17668 { 17669 struct net_device *dev; 17670 struct tg3 *tp; 17671 int i, err; 17672 u32 sndmbx, rcvmbx, intmbx; 17673 char str[40]; 17674 u64 dma_mask, persist_dma_mask; 17675 netdev_features_t features = 0; 17676 u8 addr[ETH_ALEN] __aligned(2); 17677 17678 err = pci_enable_device(pdev); 17679 if (err) { 17680 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17681 return err; 17682 } 17683 17684 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17685 if (err) { 17686 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17687 goto err_out_disable_pdev; 17688 } 17689 17690 pci_set_master(pdev); 17691 17692 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17693 if (!dev) { 17694 err = -ENOMEM; 17695 goto err_out_free_res; 17696 } 17697 17698 SET_NETDEV_DEV(dev, &pdev->dev); 17699 17700 tp = netdev_priv(dev); 17701 tp->pdev = pdev; 17702 tp->dev = dev; 17703 tp->rx_mode = TG3_DEF_RX_MODE; 17704 tp->tx_mode = TG3_DEF_TX_MODE; 17705 tp->irq_sync = 1; 17706 tp->pcierr_recovery = false; 17707 17708 if (tg3_debug > 0) 17709 tp->msg_enable = tg3_debug; 17710 else 17711 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17712 17713 if (pdev_is_ssb_gige_core(pdev)) { 17714 tg3_flag_set(tp, IS_SSB_CORE); 17715 if (ssb_gige_must_flush_posted_writes(pdev)) 17716 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17717 if (ssb_gige_one_dma_at_once(pdev)) 17718 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17719 if (ssb_gige_have_roboswitch(pdev)) { 17720 tg3_flag_set(tp, USE_PHYLIB); 17721 tg3_flag_set(tp, ROBOSWITCH); 17722 } 17723 if (ssb_gige_is_rgmii(pdev)) 17724 tg3_flag_set(tp, RGMII_MODE); 17725 } 17726 17727 /* The word/byte swap controls here control register access byte 17728 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17729 * setting below. 17730 */ 17731 tp->misc_host_ctrl = 17732 MISC_HOST_CTRL_MASK_PCI_INT | 17733 MISC_HOST_CTRL_WORD_SWAP | 17734 MISC_HOST_CTRL_INDIR_ACCESS | 17735 MISC_HOST_CTRL_PCISTATE_RW; 17736 17737 /* The NONFRM (non-frame) byte/word swap controls take effect 17738 * on descriptor entries, anything which isn't packet data. 17739 * 17740 * The StrongARM chips on the board (one for tx, one for rx) 17741 * are running in big-endian mode. 17742 */ 17743 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17744 GRC_MODE_WSWAP_NONFRM_DATA); 17745 #ifdef __BIG_ENDIAN 17746 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17747 #endif 17748 spin_lock_init(&tp->lock); 17749 spin_lock_init(&tp->indirect_lock); 17750 INIT_WORK(&tp->reset_task, tg3_reset_task); 17751 17752 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17753 if (!tp->regs) { 17754 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17755 err = -ENOMEM; 17756 goto err_out_free_dev; 17757 } 17758 17759 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17760 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17763 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17764 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17765 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17766 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17767 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17768 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17769 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17770 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17771 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17772 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17773 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17774 tg3_flag_set(tp, ENABLE_APE); 17775 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17776 if (!tp->aperegs) { 17777 dev_err(&pdev->dev, 17778 "Cannot map APE registers, aborting\n"); 17779 err = -ENOMEM; 17780 goto err_out_iounmap; 17781 } 17782 } 17783 17784 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17785 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17786 17787 dev->ethtool_ops = &tg3_ethtool_ops; 17788 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17789 dev->netdev_ops = &tg3_netdev_ops; 17790 dev->irq = pdev->irq; 17791 17792 err = tg3_get_invariants(tp, ent); 17793 if (err) { 17794 dev_err(&pdev->dev, 17795 "Problem fetching invariants of chip, aborting\n"); 17796 goto err_out_apeunmap; 17797 } 17798 17799 /* The EPB bridge inside 5714, 5715, and 5780 and any 17800 * device behind the EPB cannot support DMA addresses > 40-bit. 17801 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17802 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17803 * do DMA address check in __tg3_start_xmit(). 17804 */ 17805 if (tg3_flag(tp, IS_5788)) 17806 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17807 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17808 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17809 #ifdef CONFIG_HIGHMEM 17810 dma_mask = DMA_BIT_MASK(64); 17811 #endif 17812 } else 17813 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17814 17815 /* Configure DMA attributes. */ 17816 if (dma_mask > DMA_BIT_MASK(32)) { 17817 err = dma_set_mask(&pdev->dev, dma_mask); 17818 if (!err) { 17819 features |= NETIF_F_HIGHDMA; 17820 err = dma_set_coherent_mask(&pdev->dev, 17821 persist_dma_mask); 17822 if (err < 0) { 17823 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17824 "DMA for consistent allocations\n"); 17825 goto err_out_apeunmap; 17826 } 17827 } 17828 } 17829 if (err || dma_mask == DMA_BIT_MASK(32)) { 17830 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 17831 if (err) { 17832 dev_err(&pdev->dev, 17833 "No usable DMA configuration, aborting\n"); 17834 goto err_out_apeunmap; 17835 } 17836 } 17837 17838 tg3_init_bufmgr_config(tp); 17839 17840 /* 5700 B0 chips do not support checksumming correctly due 17841 * to hardware bugs. 17842 */ 17843 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17844 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17845 17846 if (tg3_flag(tp, 5755_PLUS)) 17847 features |= NETIF_F_IPV6_CSUM; 17848 } 17849 17850 /* TSO is on by default on chips that support hardware TSO. 17851 * Firmware TSO on older chips gives lower performance, so it 17852 * is off by default, but can be enabled using ethtool. 17853 */ 17854 if ((tg3_flag(tp, HW_TSO_1) || 17855 tg3_flag(tp, HW_TSO_2) || 17856 tg3_flag(tp, HW_TSO_3)) && 17857 (features & NETIF_F_IP_CSUM)) 17858 features |= NETIF_F_TSO; 17859 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17860 if (features & NETIF_F_IPV6_CSUM) 17861 features |= NETIF_F_TSO6; 17862 if (tg3_flag(tp, HW_TSO_3) || 17863 tg3_asic_rev(tp) == ASIC_REV_5761 || 17864 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17865 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17866 tg3_asic_rev(tp) == ASIC_REV_5785 || 17867 tg3_asic_rev(tp) == ASIC_REV_57780) 17868 features |= NETIF_F_TSO_ECN; 17869 } 17870 17871 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17872 NETIF_F_HW_VLAN_CTAG_RX; 17873 dev->vlan_features |= features; 17874 17875 /* 17876 * Add loopback capability only for a subset of devices that support 17877 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17878 * loopback for the remaining devices. 17879 */ 17880 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17881 !tg3_flag(tp, CPMU_PRESENT)) 17882 /* Add the loopback capability */ 17883 features |= NETIF_F_LOOPBACK; 17884 17885 dev->hw_features |= features; 17886 dev->priv_flags |= IFF_UNICAST_FLT; 17887 17888 /* MTU range: 60 - 9000 or 1500, depending on hardware */ 17889 dev->min_mtu = TG3_MIN_MTU; 17890 dev->max_mtu = TG3_MAX_MTU(tp); 17891 17892 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17893 !tg3_flag(tp, TSO_CAPABLE) && 17894 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17895 tg3_flag_set(tp, MAX_RXPEND_64); 17896 tp->rx_pending = 63; 17897 } 17898 17899 err = tg3_get_device_address(tp, addr); 17900 if (err) { 17901 dev_err(&pdev->dev, 17902 "Could not obtain valid ethernet address, aborting\n"); 17903 goto err_out_apeunmap; 17904 } 17905 eth_hw_addr_set(dev, addr); 17906 17907 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17908 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17909 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17910 for (i = 0; i < tp->irq_max; i++) { 17911 struct tg3_napi *tnapi = &tp->napi[i]; 17912 17913 tnapi->tp = tp; 17914 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17915 17916 tnapi->int_mbox = intmbx; 17917 intmbx += 0x8; 17918 17919 tnapi->consmbox = rcvmbx; 17920 tnapi->prodmbox = sndmbx; 17921 17922 if (i) 17923 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17924 else 17925 tnapi->coal_now = HOSTCC_MODE_NOW; 17926 17927 if (!tg3_flag(tp, SUPPORT_MSIX)) 17928 break; 17929 17930 /* 17931 * If we support MSIX, we'll be using RSS. If we're using 17932 * RSS, the first vector only handles link interrupts and the 17933 * remaining vectors handle rx and tx interrupts. Reuse the 17934 * mailbox values for the next iteration. The values we setup 17935 * above are still useful for the single vectored mode. 17936 */ 17937 if (!i) 17938 continue; 17939 17940 rcvmbx += 0x8; 17941 17942 if (sndmbx & 0x4) 17943 sndmbx -= 0x4; 17944 else 17945 sndmbx += 0xc; 17946 } 17947 17948 /* 17949 * Reset chip in case UNDI or EFI driver did not shutdown 17950 * DMA self test will enable WDMAC and we'll see (spurious) 17951 * pending DMA on the PCI bus at that point. 17952 */ 17953 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17954 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17955 tg3_full_lock(tp, 0); 17956 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17957 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17958 tg3_full_unlock(tp); 17959 } 17960 17961 err = tg3_test_dma(tp); 17962 if (err) { 17963 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17964 goto err_out_apeunmap; 17965 } 17966 17967 tg3_init_coal(tp); 17968 17969 pci_set_drvdata(pdev, dev); 17970 17971 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17972 tg3_asic_rev(tp) == ASIC_REV_5720 || 17973 tg3_asic_rev(tp) == ASIC_REV_5762) 17974 tg3_flag_set(tp, PTP_CAPABLE); 17975 17976 tg3_timer_init(tp); 17977 17978 tg3_carrier_off(tp); 17979 17980 err = register_netdev(dev); 17981 if (err) { 17982 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17983 goto err_out_apeunmap; 17984 } 17985 17986 if (tg3_flag(tp, PTP_CAPABLE)) { 17987 tg3_ptp_init(tp); 17988 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17989 &tp->pdev->dev); 17990 if (IS_ERR(tp->ptp_clock)) 17991 tp->ptp_clock = NULL; 17992 } 17993 17994 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17995 tp->board_part_number, 17996 tg3_chip_rev_id(tp), 17997 tg3_bus_string(tp, str), 17998 dev->dev_addr); 17999 18000 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 18001 char *ethtype; 18002 18003 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 18004 ethtype = "10/100Base-TX"; 18005 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 18006 ethtype = "1000Base-SX"; 18007 else 18008 ethtype = "10/100/1000Base-T"; 18009 18010 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 18011 "(WireSpeed[%d], EEE[%d])\n", 18012 tg3_phy_string(tp), ethtype, 18013 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 18014 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 18015 } 18016 18017 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 18018 (dev->features & NETIF_F_RXCSUM) != 0, 18019 tg3_flag(tp, USE_LINKCHG_REG) != 0, 18020 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 18021 tg3_flag(tp, ENABLE_ASF) != 0, 18022 tg3_flag(tp, TSO_CAPABLE) != 0); 18023 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 18024 tp->dma_rwctrl, 18025 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 18026 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 18027 18028 pci_save_state(pdev); 18029 18030 return 0; 18031 18032 err_out_apeunmap: 18033 if (tp->aperegs) { 18034 iounmap(tp->aperegs); 18035 tp->aperegs = NULL; 18036 } 18037 18038 err_out_iounmap: 18039 if (tp->regs) { 18040 iounmap(tp->regs); 18041 tp->regs = NULL; 18042 } 18043 18044 err_out_free_dev: 18045 free_netdev(dev); 18046 18047 err_out_free_res: 18048 pci_release_regions(pdev); 18049 18050 err_out_disable_pdev: 18051 if (pci_is_enabled(pdev)) 18052 pci_disable_device(pdev); 18053 return err; 18054 } 18055 18056 static void tg3_remove_one(struct pci_dev *pdev) 18057 { 18058 struct net_device *dev = pci_get_drvdata(pdev); 18059 18060 if (dev) { 18061 struct tg3 *tp = netdev_priv(dev); 18062 18063 tg3_ptp_fini(tp); 18064 18065 release_firmware(tp->fw); 18066 18067 tg3_reset_task_cancel(tp); 18068 18069 if (tg3_flag(tp, USE_PHYLIB)) { 18070 tg3_phy_fini(tp); 18071 tg3_mdio_fini(tp); 18072 } 18073 18074 unregister_netdev(dev); 18075 if (tp->aperegs) { 18076 iounmap(tp->aperegs); 18077 tp->aperegs = NULL; 18078 } 18079 if (tp->regs) { 18080 iounmap(tp->regs); 18081 tp->regs = NULL; 18082 } 18083 free_netdev(dev); 18084 pci_release_regions(pdev); 18085 pci_disable_device(pdev); 18086 } 18087 } 18088 18089 #ifdef CONFIG_PM_SLEEP 18090 static int tg3_suspend(struct device *device) 18091 { 18092 struct net_device *dev = dev_get_drvdata(device); 18093 struct tg3 *tp = netdev_priv(dev); 18094 int err = 0; 18095 18096 rtnl_lock(); 18097 18098 if (!netif_running(dev)) 18099 goto unlock; 18100 18101 tg3_reset_task_cancel(tp); 18102 tg3_phy_stop(tp); 18103 tg3_netif_stop(tp); 18104 18105 tg3_timer_stop(tp); 18106 18107 tg3_full_lock(tp, 1); 18108 tg3_disable_ints(tp); 18109 tg3_full_unlock(tp); 18110 18111 netif_device_detach(dev); 18112 18113 tg3_full_lock(tp, 0); 18114 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 18115 tg3_flag_clear(tp, INIT_COMPLETE); 18116 tg3_full_unlock(tp); 18117 18118 err = tg3_power_down_prepare(tp); 18119 if (err) { 18120 int err2; 18121 18122 tg3_full_lock(tp, 0); 18123 18124 tg3_flag_set(tp, INIT_COMPLETE); 18125 err2 = tg3_restart_hw(tp, true); 18126 if (err2) 18127 goto out; 18128 18129 tg3_timer_start(tp); 18130 18131 netif_device_attach(dev); 18132 tg3_netif_start(tp); 18133 18134 out: 18135 tg3_full_unlock(tp); 18136 18137 if (!err2) 18138 tg3_phy_start(tp); 18139 } 18140 18141 unlock: 18142 rtnl_unlock(); 18143 return err; 18144 } 18145 18146 static int tg3_resume(struct device *device) 18147 { 18148 struct net_device *dev = dev_get_drvdata(device); 18149 struct tg3 *tp = netdev_priv(dev); 18150 int err = 0; 18151 18152 rtnl_lock(); 18153 18154 if (!netif_running(dev)) 18155 goto unlock; 18156 18157 netif_device_attach(dev); 18158 18159 tg3_full_lock(tp, 0); 18160 18161 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18162 18163 tg3_flag_set(tp, INIT_COMPLETE); 18164 err = tg3_restart_hw(tp, 18165 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18166 if (err) 18167 goto out; 18168 18169 tg3_timer_start(tp); 18170 18171 tg3_netif_start(tp); 18172 18173 out: 18174 tg3_full_unlock(tp); 18175 18176 if (!err) 18177 tg3_phy_start(tp); 18178 18179 unlock: 18180 rtnl_unlock(); 18181 return err; 18182 } 18183 #endif /* CONFIG_PM_SLEEP */ 18184 18185 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18186 18187 static void tg3_shutdown(struct pci_dev *pdev) 18188 { 18189 struct net_device *dev = pci_get_drvdata(pdev); 18190 struct tg3 *tp = netdev_priv(dev); 18191 18192 tg3_reset_task_cancel(tp); 18193 18194 rtnl_lock(); 18195 18196 netif_device_detach(dev); 18197 18198 if (netif_running(dev)) 18199 dev_close(dev); 18200 18201 if (system_state == SYSTEM_POWER_OFF) 18202 tg3_power_down(tp); 18203 18204 rtnl_unlock(); 18205 18206 pci_disable_device(pdev); 18207 } 18208 18209 /** 18210 * tg3_io_error_detected - called when PCI error is detected 18211 * @pdev: Pointer to PCI device 18212 * @state: The current pci connection state 18213 * 18214 * This function is called after a PCI bus error affecting 18215 * this device has been detected. 18216 */ 18217 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18218 pci_channel_state_t state) 18219 { 18220 struct net_device *netdev = pci_get_drvdata(pdev); 18221 struct tg3 *tp = netdev_priv(netdev); 18222 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18223 18224 netdev_info(netdev, "PCI I/O error detected\n"); 18225 18226 /* Want to make sure that the reset task doesn't run */ 18227 tg3_reset_task_cancel(tp); 18228 18229 rtnl_lock(); 18230 18231 /* Could be second call or maybe we don't have netdev yet */ 18232 if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) 18233 goto done; 18234 18235 /* We needn't recover from permanent error */ 18236 if (state == pci_channel_io_frozen) 18237 tp->pcierr_recovery = true; 18238 18239 tg3_phy_stop(tp); 18240 18241 tg3_netif_stop(tp); 18242 18243 tg3_timer_stop(tp); 18244 18245 netif_device_detach(netdev); 18246 18247 /* Clean up software state, even if MMIO is blocked */ 18248 tg3_full_lock(tp, 0); 18249 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18250 tg3_full_unlock(tp); 18251 18252 done: 18253 if (state == pci_channel_io_perm_failure) { 18254 if (netdev) { 18255 tg3_napi_enable(tp); 18256 dev_close(netdev); 18257 } 18258 err = PCI_ERS_RESULT_DISCONNECT; 18259 } else { 18260 pci_disable_device(pdev); 18261 } 18262 18263 rtnl_unlock(); 18264 18265 return err; 18266 } 18267 18268 /** 18269 * tg3_io_slot_reset - called after the pci bus has been reset. 18270 * @pdev: Pointer to PCI device 18271 * 18272 * Restart the card from scratch, as if from a cold-boot. 18273 * At this point, the card has exprienced a hard reset, 18274 * followed by fixups by BIOS, and has its config space 18275 * set up identically to what it was at cold boot. 18276 */ 18277 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18278 { 18279 struct net_device *netdev = pci_get_drvdata(pdev); 18280 struct tg3 *tp = netdev_priv(netdev); 18281 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18282 int err; 18283 18284 rtnl_lock(); 18285 18286 if (pci_enable_device(pdev)) { 18287 dev_err(&pdev->dev, 18288 "Cannot re-enable PCI device after reset.\n"); 18289 goto done; 18290 } 18291 18292 pci_set_master(pdev); 18293 pci_restore_state(pdev); 18294 pci_save_state(pdev); 18295 18296 if (!netdev || !netif_running(netdev)) { 18297 rc = PCI_ERS_RESULT_RECOVERED; 18298 goto done; 18299 } 18300 18301 err = tg3_power_up(tp); 18302 if (err) 18303 goto done; 18304 18305 rc = PCI_ERS_RESULT_RECOVERED; 18306 18307 done: 18308 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18309 tg3_napi_enable(tp); 18310 dev_close(netdev); 18311 } 18312 rtnl_unlock(); 18313 18314 return rc; 18315 } 18316 18317 /** 18318 * tg3_io_resume - called when traffic can start flowing again. 18319 * @pdev: Pointer to PCI device 18320 * 18321 * This callback is called when the error recovery driver tells 18322 * us that its OK to resume normal operation. 18323 */ 18324 static void tg3_io_resume(struct pci_dev *pdev) 18325 { 18326 struct net_device *netdev = pci_get_drvdata(pdev); 18327 struct tg3 *tp = netdev_priv(netdev); 18328 int err; 18329 18330 rtnl_lock(); 18331 18332 if (!netdev || !netif_running(netdev)) 18333 goto done; 18334 18335 tg3_full_lock(tp, 0); 18336 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18337 tg3_flag_set(tp, INIT_COMPLETE); 18338 err = tg3_restart_hw(tp, true); 18339 if (err) { 18340 tg3_full_unlock(tp); 18341 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18342 goto done; 18343 } 18344 18345 netif_device_attach(netdev); 18346 18347 tg3_timer_start(tp); 18348 18349 tg3_netif_start(tp); 18350 18351 tg3_full_unlock(tp); 18352 18353 tg3_phy_start(tp); 18354 18355 done: 18356 tp->pcierr_recovery = false; 18357 rtnl_unlock(); 18358 } 18359 18360 static const struct pci_error_handlers tg3_err_handler = { 18361 .error_detected = tg3_io_error_detected, 18362 .slot_reset = tg3_io_slot_reset, 18363 .resume = tg3_io_resume 18364 }; 18365 18366 static struct pci_driver tg3_driver = { 18367 .name = DRV_MODULE_NAME, 18368 .id_table = tg3_pci_tbl, 18369 .probe = tg3_init_one, 18370 .remove = tg3_remove_one, 18371 .err_handler = &tg3_err_handler, 18372 .driver.pm = &tg3_pm_ops, 18373 .shutdown = tg3_shutdown, 18374 }; 18375 18376 module_pci_driver(tg3_driver); 18377