1 /* 2 * tg3.c: Broadcom Tigon3 ethernet driver. 3 * 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2005-2016 Broadcom Corporation. 8 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 10 * refers to Broadcom Inc. and/or its subsidiaries. 11 * 12 * Firmware is: 13 * Derived from proprietary unpublished source code, 14 * Copyright (C) 2000-2016 Broadcom Corporation. 15 * Copyright (C) 2016-2017 Broadcom Ltd. 16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" 17 * refers to Broadcom Inc. and/or its subsidiaries. 18 * 19 * Permission is hereby granted for the distribution of this firmware 20 * data in hexadecimal or equivalent format, provided this copyright 21 * notice is accompanying it. 22 */ 23 24 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/stringify.h> 28 #include <linux/kernel.h> 29 #include <linux/sched/signal.h> 30 #include <linux/types.h> 31 #include <linux/compiler.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/in.h> 35 #include <linux/interrupt.h> 36 #include <linux/ioport.h> 37 #include <linux/pci.h> 38 #include <linux/netdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/ethtool.h> 42 #include <linux/mdio.h> 43 #include <linux/mii.h> 44 #include <linux/phy.h> 45 #include <linux/brcmphy.h> 46 #include <linux/if.h> 47 #include <linux/if_vlan.h> 48 #include <linux/ip.h> 49 #include <linux/tcp.h> 50 #include <linux/workqueue.h> 51 #include <linux/prefetch.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/firmware.h> 54 #include <linux/ssb/ssb_driver_gige.h> 55 #include <linux/hwmon.h> 56 #include <linux/hwmon-sysfs.h> 57 #include <linux/crc32poly.h> 58 59 #include <net/checksum.h> 60 #include <net/gso.h> 61 #include <net/ip.h> 62 63 #include <linux/io.h> 64 #include <asm/byteorder.h> 65 #include <linux/uaccess.h> 66 67 #include <uapi/linux/net_tstamp.h> 68 #include <linux/ptp_clock_kernel.h> 69 70 #define BAR_0 0 71 #define BAR_2 2 72 73 #include "tg3.h" 74 75 /* Functions & macros to verify TG3_FLAGS types */ 76 77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) 78 { 79 return test_bit(flag, bits); 80 } 81 82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) 83 { 84 set_bit(flag, bits); 85 } 86 87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) 88 { 89 clear_bit(flag, bits); 90 } 91 92 #define tg3_flag(tp, flag) \ 93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) 94 #define tg3_flag_set(tp, flag) \ 95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) 96 #define tg3_flag_clear(tp, flag) \ 97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) 98 99 #define DRV_MODULE_NAME "tg3" 100 /* DO NOT UPDATE TG3_*_NUM defines */ 101 #define TG3_MAJ_NUM 3 102 #define TG3_MIN_NUM 137 103 104 #define RESET_KIND_SHUTDOWN 0 105 #define RESET_KIND_INIT 1 106 #define RESET_KIND_SUSPEND 2 107 108 #define TG3_DEF_RX_MODE 0 109 #define TG3_DEF_TX_MODE 0 110 #define TG3_DEF_MSG_ENABLE \ 111 (NETIF_MSG_DRV | \ 112 NETIF_MSG_PROBE | \ 113 NETIF_MSG_LINK | \ 114 NETIF_MSG_TIMER | \ 115 NETIF_MSG_IFDOWN | \ 116 NETIF_MSG_IFUP | \ 117 NETIF_MSG_RX_ERR | \ 118 NETIF_MSG_TX_ERR) 119 120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100 121 122 /* length of time before we decide the hardware is borked, 123 * and dev->tx_timeout() should be called to fix the problem 124 */ 125 126 #define TG3_TX_TIMEOUT (5 * HZ) 127 128 /* hardware minimum and maximum for a single frame's data payload */ 129 #define TG3_MIN_MTU ETH_ZLEN 130 #define TG3_MAX_MTU(tp) \ 131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) 132 133 /* These numbers seem to be hard coded in the NIC firmware somehow. 134 * You can't change the ring sizes, but you can change where you place 135 * them in the NIC onboard memory. 136 */ 137 #define TG3_RX_STD_RING_SIZE(tp) \ 138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) 140 #define TG3_DEF_RX_RING_PENDING 200 141 #define TG3_RX_JMB_RING_SIZE(tp) \ 142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ 143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) 144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 145 146 /* Do not place this n-ring entries value into the tp struct itself, 147 * we really want to expose these constants to GCC so that modulo et 148 * al. operations are done with shifts and masks instead of with 149 * hw multiply/modulo instructions. Another solution would be to 150 * replace things like '% foo' with '& (foo - 1)'. 151 */ 152 153 #define TG3_TX_RING_SIZE 512 154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 155 156 #define TG3_RX_STD_RING_BYTES(tp) \ 157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) 158 #define TG3_RX_JMB_RING_BYTES(tp) \ 159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) 160 #define TG3_RX_RCB_RING_BYTES(tp) \ 161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) 162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 163 TG3_TX_RING_SIZE) 164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 165 166 #define TG3_DMA_BYTE_ENAB 64 167 168 #define TG3_RX_STD_DMA_SZ 1536 169 #define TG3_RX_JMB_DMA_SZ 9046 170 171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) 172 173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 175 176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \ 177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) 178 179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ 180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) 181 182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses 183 * that are at least dword aligned when used in PCIX mode. The driver 184 * works around this bug by double copying the packet. This workaround 185 * is built into the normal double copy length check for efficiency. 186 * 187 * However, the double copy is only necessary on those architectures 188 * where unaligned memory accesses are inefficient. For those architectures 189 * where unaligned memory accesses incur little penalty, we can reintegrate 190 * the 5701 in the normal rx path. Doing so saves a device structure 191 * dereference by hardcoding the double copy threshold in place. 192 */ 193 #define TG3_RX_COPY_THRESHOLD 256 194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD 196 #else 197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) 198 #endif 199 200 #if (NET_IP_ALIGN != 0) 201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) 202 #else 203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 204 #endif 205 206 /* minimum number of free TX descriptors required to wake up TX process */ 207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 208 #define TG3_TX_BD_DMA_MAX_2K 2048 209 #define TG3_TX_BD_DMA_MAX_4K 4096 210 211 #define TG3_RAW_IP_ALIGN 2 212 213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) 214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) 215 216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5 217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 218 219 #define FIRMWARE_TG3 "tigon/tg3.bin" 220 #define FIRMWARE_TG357766 "tigon/tg357766.bin" 221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 223 224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 226 MODULE_LICENSE("GPL"); 227 MODULE_FIRMWARE(FIRMWARE_TG3); 228 MODULE_FIRMWARE(FIRMWARE_TG357766); 229 MODULE_FIRMWARE(FIRMWARE_TG3TSO); 230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 231 232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 233 module_param(tg3_debug, int, 0); 234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 235 236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 238 239 static const struct pci_device_id tg3_pci_tbl[] = { 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), 259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 260 TG3_DRV_DATA_FLAG_5705_10_100}, 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), 262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 263 TG3_DRV_DATA_FLAG_5705_10_100}, 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), 266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | 267 TG3_DRV_DATA_FLAG_5705_10_100}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), 274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), 280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, 289 PCI_VENDOR_ID_LENOVO, 290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M), 291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), 294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, 310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), 315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, 317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), 318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), 322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), 332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), 334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, 335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, 339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, 340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, 341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, 342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, 343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, 344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, 345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, 346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, 347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, 351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ 355 {} 356 }; 357 358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 359 360 static const struct { 361 const char string[ETH_GSTRING_LEN]; 362 } ethtool_stats_keys[] = { 363 { "rx_octets" }, 364 { "rx_fragments" }, 365 { "rx_ucast_packets" }, 366 { "rx_mcast_packets" }, 367 { "rx_bcast_packets" }, 368 { "rx_fcs_errors" }, 369 { "rx_align_errors" }, 370 { "rx_xon_pause_rcvd" }, 371 { "rx_xoff_pause_rcvd" }, 372 { "rx_mac_ctrl_rcvd" }, 373 { "rx_xoff_entered" }, 374 { "rx_frame_too_long_errors" }, 375 { "rx_jabbers" }, 376 { "rx_undersize_packets" }, 377 { "rx_in_length_errors" }, 378 { "rx_out_length_errors" }, 379 { "rx_64_or_less_octet_packets" }, 380 { "rx_65_to_127_octet_packets" }, 381 { "rx_128_to_255_octet_packets" }, 382 { "rx_256_to_511_octet_packets" }, 383 { "rx_512_to_1023_octet_packets" }, 384 { "rx_1024_to_1522_octet_packets" }, 385 { "rx_1523_to_2047_octet_packets" }, 386 { "rx_2048_to_4095_octet_packets" }, 387 { "rx_4096_to_8191_octet_packets" }, 388 { "rx_8192_to_9022_octet_packets" }, 389 390 { "tx_octets" }, 391 { "tx_collisions" }, 392 393 { "tx_xon_sent" }, 394 { "tx_xoff_sent" }, 395 { "tx_flow_control" }, 396 { "tx_mac_errors" }, 397 { "tx_single_collisions" }, 398 { "tx_mult_collisions" }, 399 { "tx_deferred" }, 400 { "tx_excessive_collisions" }, 401 { "tx_late_collisions" }, 402 { "tx_collide_2times" }, 403 { "tx_collide_3times" }, 404 { "tx_collide_4times" }, 405 { "tx_collide_5times" }, 406 { "tx_collide_6times" }, 407 { "tx_collide_7times" }, 408 { "tx_collide_8times" }, 409 { "tx_collide_9times" }, 410 { "tx_collide_10times" }, 411 { "tx_collide_11times" }, 412 { "tx_collide_12times" }, 413 { "tx_collide_13times" }, 414 { "tx_collide_14times" }, 415 { "tx_collide_15times" }, 416 { "tx_ucast_packets" }, 417 { "tx_mcast_packets" }, 418 { "tx_bcast_packets" }, 419 { "tx_carrier_sense_errors" }, 420 { "tx_discards" }, 421 { "tx_errors" }, 422 423 { "dma_writeq_full" }, 424 { "dma_write_prioq_full" }, 425 { "rxbds_empty" }, 426 { "rx_discards" }, 427 { "rx_errors" }, 428 { "rx_threshold_hit" }, 429 430 { "dma_readq_full" }, 431 { "dma_read_prioq_full" }, 432 { "tx_comp_queue_full" }, 433 434 { "ring_set_send_prod_index" }, 435 { "ring_status_update" }, 436 { "nic_irqs" }, 437 { "nic_avoided_irqs" }, 438 { "nic_tx_threshold_hit" }, 439 440 { "mbuf_lwm_thresh_hit" }, 441 }; 442 443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 444 #define TG3_NVRAM_TEST 0 445 #define TG3_LINK_TEST 1 446 #define TG3_REGISTER_TEST 2 447 #define TG3_MEMORY_TEST 3 448 #define TG3_MAC_LOOPB_TEST 4 449 #define TG3_PHY_LOOPB_TEST 5 450 #define TG3_EXT_LOOPB_TEST 6 451 #define TG3_INTERRUPT_TEST 7 452 453 454 static const struct { 455 const char string[ETH_GSTRING_LEN]; 456 } ethtool_test_keys[] = { 457 [TG3_NVRAM_TEST] = { "nvram test (online) " }, 458 [TG3_LINK_TEST] = { "link test (online) " }, 459 [TG3_REGISTER_TEST] = { "register test (offline)" }, 460 [TG3_MEMORY_TEST] = { "memory test (offline)" }, 461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, 462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, 463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, 464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, 465 }; 466 467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 468 469 470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 471 { 472 writel(val, tp->regs + off); 473 } 474 475 static u32 tg3_read32(struct tg3 *tp, u32 off) 476 { 477 return readl(tp->regs + off); 478 } 479 480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 481 { 482 writel(val, tp->aperegs + off); 483 } 484 485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 486 { 487 return readl(tp->aperegs + off); 488 } 489 490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 491 { 492 unsigned long flags; 493 494 spin_lock_irqsave(&tp->indirect_lock, flags); 495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 497 spin_unlock_irqrestore(&tp->indirect_lock, flags); 498 } 499 500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 501 { 502 writel(val, tp->regs + off); 503 readl(tp->regs + off); 504 } 505 506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 507 { 508 unsigned long flags; 509 u32 val; 510 511 spin_lock_irqsave(&tp->indirect_lock, flags); 512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 514 spin_unlock_irqrestore(&tp->indirect_lock, flags); 515 return val; 516 } 517 518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 519 { 520 unsigned long flags; 521 522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 524 TG3_64BIT_REG_LOW, val); 525 return; 526 } 527 if (off == TG3_RX_STD_PROD_IDX_REG) { 528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 529 TG3_64BIT_REG_LOW, val); 530 return; 531 } 532 533 spin_lock_irqsave(&tp->indirect_lock, flags); 534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 536 spin_unlock_irqrestore(&tp->indirect_lock, flags); 537 538 /* In indirect mode when disabling interrupts, we also need 539 * to clear the interrupt bit in the GRC local ctrl register. 540 */ 541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 542 (val == 0x1)) { 543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 545 } 546 } 547 548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 549 { 550 unsigned long flags; 551 u32 val; 552 553 spin_lock_irqsave(&tp->indirect_lock, flags); 554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 556 spin_unlock_irqrestore(&tp->indirect_lock, flags); 557 return val; 558 } 559 560 /* usec_wait specifies the wait time in usec when writing to certain registers 561 * where it is unsafe to read back the register without some delay. 562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 564 */ 565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 566 { 567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) 568 /* Non-posted methods */ 569 tp->write32(tp, off, val); 570 else { 571 /* Posted method */ 572 tg3_write32(tp, off, val); 573 if (usec_wait) 574 udelay(usec_wait); 575 tp->read32(tp, off); 576 } 577 /* Wait again after the read for the posted method to guarantee that 578 * the wait time is met. 579 */ 580 if (usec_wait) 581 udelay(usec_wait); 582 } 583 584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 585 { 586 tp->write32_mbox(tp, off, val); 587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) || 588 (!tg3_flag(tp, MBOX_WRITE_REORDER) && 589 !tg3_flag(tp, ICH_WORKAROUND))) 590 tp->read32_mbox(tp, off); 591 } 592 593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 594 { 595 void __iomem *mbox = tp->regs + off; 596 writel(val, mbox); 597 if (tg3_flag(tp, TXD_MBOX_HWBUG)) 598 writel(val, mbox); 599 if (tg3_flag(tp, MBOX_WRITE_REORDER) || 600 tg3_flag(tp, FLUSH_POSTED_WRITES)) 601 readl(mbox); 602 } 603 604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 605 { 606 return readl(tp->regs + off + GRCMBOX_BASE); 607 } 608 609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 610 { 611 writel(val, tp->regs + off + GRCMBOX_BASE); 612 } 613 614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 619 620 #define tw32(reg, val) tp->write32(tp, reg, val) 621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) 622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) 623 #define tr32(reg) tp->read32(tp, reg) 624 625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 626 { 627 unsigned long flags; 628 629 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) 631 return; 632 633 spin_lock_irqsave(&tp->indirect_lock, flags); 634 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 637 638 /* Always leave this as zero. */ 639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 640 } else { 641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 642 tw32_f(TG3PCI_MEM_WIN_DATA, val); 643 644 /* Always leave this as zero. */ 645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 646 } 647 spin_unlock_irqrestore(&tp->indirect_lock, flags); 648 } 649 650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 651 { 652 unsigned long flags; 653 654 if (tg3_asic_rev(tp) == ASIC_REV_5906 && 655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { 656 *val = 0; 657 return; 658 } 659 660 spin_lock_irqsave(&tp->indirect_lock, flags); 661 if (tg3_flag(tp, SRAM_USE_CONFIG)) { 662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 664 665 /* Always leave this as zero. */ 666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 667 } else { 668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 669 *val = tr32(TG3PCI_MEM_WIN_DATA); 670 671 /* Always leave this as zero. */ 672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 673 } 674 spin_unlock_irqrestore(&tp->indirect_lock, flags); 675 } 676 677 static void tg3_ape_lock_init(struct tg3 *tp) 678 { 679 int i; 680 u32 regbase, bit; 681 682 if (tg3_asic_rev(tp) == ASIC_REV_5761) 683 regbase = TG3_APE_LOCK_GRANT; 684 else 685 regbase = TG3_APE_PER_LOCK_GRANT; 686 687 /* Make sure the driver hasn't any stale locks. */ 688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 689 switch (i) { 690 case TG3_APE_LOCK_PHY0: 691 case TG3_APE_LOCK_PHY1: 692 case TG3_APE_LOCK_PHY2: 693 case TG3_APE_LOCK_PHY3: 694 bit = APE_LOCK_GRANT_DRIVER; 695 break; 696 default: 697 if (!tp->pci_fn) 698 bit = APE_LOCK_GRANT_DRIVER; 699 else 700 bit = 1 << tp->pci_fn; 701 } 702 tg3_ape_write32(tp, regbase + 4 * i, bit); 703 } 704 705 } 706 707 static int tg3_ape_lock(struct tg3 *tp, int locknum) 708 { 709 int i, off; 710 int ret = 0; 711 u32 status, req, gnt, bit; 712 713 if (!tg3_flag(tp, ENABLE_APE)) 714 return 0; 715 716 switch (locknum) { 717 case TG3_APE_LOCK_GPIO: 718 if (tg3_asic_rev(tp) == ASIC_REV_5761) 719 return 0; 720 fallthrough; 721 case TG3_APE_LOCK_GRC: 722 case TG3_APE_LOCK_MEM: 723 if (!tp->pci_fn) 724 bit = APE_LOCK_REQ_DRIVER; 725 else 726 bit = 1 << tp->pci_fn; 727 break; 728 case TG3_APE_LOCK_PHY0: 729 case TG3_APE_LOCK_PHY1: 730 case TG3_APE_LOCK_PHY2: 731 case TG3_APE_LOCK_PHY3: 732 bit = APE_LOCK_REQ_DRIVER; 733 break; 734 default: 735 return -EINVAL; 736 } 737 738 if (tg3_asic_rev(tp) == ASIC_REV_5761) { 739 req = TG3_APE_LOCK_REQ; 740 gnt = TG3_APE_LOCK_GRANT; 741 } else { 742 req = TG3_APE_PER_LOCK_REQ; 743 gnt = TG3_APE_PER_LOCK_GRANT; 744 } 745 746 off = 4 * locknum; 747 748 tg3_ape_write32(tp, req + off, bit); 749 750 /* Wait for up to 1 millisecond to acquire lock. */ 751 for (i = 0; i < 100; i++) { 752 status = tg3_ape_read32(tp, gnt + off); 753 if (status == bit) 754 break; 755 if (pci_channel_offline(tp->pdev)) 756 break; 757 758 udelay(10); 759 } 760 761 if (status != bit) { 762 /* Revoke the lock request. */ 763 tg3_ape_write32(tp, gnt + off, bit); 764 ret = -EBUSY; 765 } 766 767 return ret; 768 } 769 770 static void tg3_ape_unlock(struct tg3 *tp, int locknum) 771 { 772 u32 gnt, bit; 773 774 if (!tg3_flag(tp, ENABLE_APE)) 775 return; 776 777 switch (locknum) { 778 case TG3_APE_LOCK_GPIO: 779 if (tg3_asic_rev(tp) == ASIC_REV_5761) 780 return; 781 fallthrough; 782 case TG3_APE_LOCK_GRC: 783 case TG3_APE_LOCK_MEM: 784 if (!tp->pci_fn) 785 bit = APE_LOCK_GRANT_DRIVER; 786 else 787 bit = 1 << tp->pci_fn; 788 break; 789 case TG3_APE_LOCK_PHY0: 790 case TG3_APE_LOCK_PHY1: 791 case TG3_APE_LOCK_PHY2: 792 case TG3_APE_LOCK_PHY3: 793 bit = APE_LOCK_GRANT_DRIVER; 794 break; 795 default: 796 return; 797 } 798 799 if (tg3_asic_rev(tp) == ASIC_REV_5761) 800 gnt = TG3_APE_LOCK_GRANT; 801 else 802 gnt = TG3_APE_PER_LOCK_GRANT; 803 804 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 805 } 806 807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) 808 { 809 u32 apedata; 810 811 while (timeout_us) { 812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 813 return -EBUSY; 814 815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 817 break; 818 819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 820 821 udelay(10); 822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 823 } 824 825 return timeout_us ? 0 : -EBUSY; 826 } 827 828 #ifdef CONFIG_TIGON3_HWMON 829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) 830 { 831 u32 i, apedata; 832 833 for (i = 0; i < timeout_us / 10; i++) { 834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 835 836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 837 break; 838 839 udelay(10); 840 } 841 842 return i == timeout_us / 10; 843 } 844 845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, 846 u32 len) 847 { 848 int err; 849 u32 i, bufoff, msgoff, maxlen, apedata; 850 851 if (!tg3_flag(tp, APE_HAS_NCSI)) 852 return 0; 853 854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 855 if (apedata != APE_SEG_SIG_MAGIC) 856 return -ENODEV; 857 858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 859 if (!(apedata & APE_FW_STATUS_READY)) 860 return -EAGAIN; 861 862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + 863 TG3_APE_SHMEM_BASE; 864 msgoff = bufoff + 2 * sizeof(u32); 865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); 866 867 while (len) { 868 u32 length; 869 870 /* Cap xfer sizes to scratchpad limits. */ 871 length = (len > maxlen) ? maxlen : len; 872 len -= length; 873 874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 875 if (!(apedata & APE_FW_STATUS_READY)) 876 return -EAGAIN; 877 878 /* Wait for up to 1 msec for APE to service previous event. */ 879 err = tg3_ape_event_lock(tp, 1000); 880 if (err) 881 return err; 882 883 apedata = APE_EVENT_STATUS_DRIVER_EVNT | 884 APE_EVENT_STATUS_SCRTCHPD_READ | 885 APE_EVENT_STATUS_EVENT_PENDING; 886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); 887 888 tg3_ape_write32(tp, bufoff, base_off); 889 tg3_ape_write32(tp, bufoff + sizeof(u32), length); 890 891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 893 894 base_off += length; 895 896 if (tg3_ape_wait_for_event(tp, 30000)) 897 return -EAGAIN; 898 899 for (i = 0; length; i += 4, length -= 4) { 900 u32 val = tg3_ape_read32(tp, msgoff + i); 901 memcpy(data, &val, sizeof(u32)); 902 data++; 903 } 904 } 905 906 return 0; 907 } 908 #endif 909 910 static int tg3_ape_send_event(struct tg3 *tp, u32 event) 911 { 912 int err; 913 u32 apedata; 914 915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 916 if (apedata != APE_SEG_SIG_MAGIC) 917 return -EAGAIN; 918 919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 920 if (!(apedata & APE_FW_STATUS_READY)) 921 return -EAGAIN; 922 923 /* Wait for up to 20 millisecond for APE to service previous event. */ 924 err = tg3_ape_event_lock(tp, 20000); 925 if (err) 926 return err; 927 928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 929 event | APE_EVENT_STATUS_EVENT_PENDING); 930 931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 933 934 return 0; 935 } 936 937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 938 { 939 u32 event; 940 u32 apedata; 941 942 if (!tg3_flag(tp, ENABLE_APE)) 943 return; 944 945 switch (kind) { 946 case RESET_KIND_INIT: 947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 949 APE_HOST_SEG_SIG_MAGIC); 950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 951 APE_HOST_SEG_LEN_MAGIC); 952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 957 APE_HOST_BEHAV_NO_PHYLOCK); 958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, 959 TG3_APE_HOST_DRVR_STATE_START); 960 961 event = APE_EVENT_STATUS_STATE_START; 962 break; 963 case RESET_KIND_SHUTDOWN: 964 if (device_may_wakeup(&tp->pdev->dev) && 965 tg3_flag(tp, WOL_ENABLE)) { 966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 967 TG3_APE_HOST_WOL_SPEED_AUTO); 968 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 969 } else 970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; 971 972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); 973 974 event = APE_EVENT_STATUS_STATE_UNLOAD; 975 break; 976 default: 977 return; 978 } 979 980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 981 982 tg3_ape_send_event(tp, event); 983 } 984 985 static void tg3_send_ape_heartbeat(struct tg3 *tp, 986 unsigned long interval) 987 { 988 /* Check if hb interval has exceeded */ 989 if (!tg3_flag(tp, ENABLE_APE) || 990 time_before(jiffies, tp->ape_hb_jiffies + interval)) 991 return; 992 993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); 994 tp->ape_hb_jiffies = jiffies; 995 } 996 997 static void tg3_disable_ints(struct tg3 *tp) 998 { 999 int i; 1000 1001 tw32(TG3PCI_MISC_HOST_CTRL, 1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 1003 for (i = 0; i < tp->irq_max; i++) 1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); 1005 } 1006 1007 static void tg3_enable_ints(struct tg3 *tp) 1008 { 1009 int i; 1010 1011 tp->irq_sync = 0; 1012 wmb(); 1013 1014 tw32(TG3PCI_MISC_HOST_CTRL, 1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 1016 1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 1018 for (i = 0; i < tp->irq_cnt; i++) { 1019 struct tg3_napi *tnapi = &tp->napi[i]; 1020 1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1022 if (tg3_flag(tp, 1SHOT_MSI)) 1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 1024 1025 tp->coal_now |= tnapi->coal_now; 1026 } 1027 1028 /* Force an initial interrupt */ 1029 if (!tg3_flag(tp, TAGGED_STATUS) && 1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 1032 else 1033 tw32(HOSTCC_MODE, tp->coal_now); 1034 1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); 1036 } 1037 1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) 1039 { 1040 struct tg3 *tp = tnapi->tp; 1041 struct tg3_hw_status *sblk = tnapi->hw_status; 1042 unsigned int work_exists = 0; 1043 1044 /* check for phy events */ 1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 1046 if (sblk->status & SD_STATUS_LINK_CHG) 1047 work_exists = 1; 1048 } 1049 1050 /* check for TX work to do */ 1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) 1052 work_exists = 1; 1053 1054 /* check for RX work to do */ 1055 if (tnapi->rx_rcb_prod_idx && 1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 1057 work_exists = 1; 1058 1059 return work_exists; 1060 } 1061 1062 /* tg3_int_reenable 1063 * similar to tg3_enable_ints, but it accurately determines whether there 1064 * is new work pending and can return without flushing the PIO write 1065 * which reenables interrupts 1066 */ 1067 static void tg3_int_reenable(struct tg3_napi *tnapi) 1068 { 1069 struct tg3 *tp = tnapi->tp; 1070 1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 1072 1073 /* When doing tagged status, this work check is unnecessary. 1074 * The last_tag we write above tells the chip which piece of 1075 * work we've completed. 1076 */ 1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) 1078 tw32(HOSTCC_MODE, tp->coalesce_mode | 1079 HOSTCC_MODE_ENABLE | tnapi->coal_now); 1080 } 1081 1082 static void tg3_switch_clocks(struct tg3 *tp) 1083 { 1084 u32 clock_ctrl; 1085 u32 orig_clock_ctrl; 1086 1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) 1088 return; 1089 1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 1091 1092 orig_clock_ctrl = clock_ctrl; 1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 1094 CLOCK_CTRL_CLKRUN_OENABLE | 1095 0x1f); 1096 tp->pci_clock_ctrl = clock_ctrl; 1097 1098 if (tg3_flag(tp, 5705_PLUS)) { 1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 1100 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 1102 } 1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1105 clock_ctrl | 1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 1107 40); 1108 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1109 clock_ctrl | (CLOCK_CTRL_ALTCLK), 1110 40); 1111 } 1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 1113 } 1114 1115 #define PHY_BUSY_LOOPS 5000 1116 1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, 1118 u32 *val) 1119 { 1120 u32 frame_val; 1121 unsigned int loops; 1122 int ret; 1123 1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1125 tw32_f(MAC_MI_MODE, 1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1127 udelay(80); 1128 } 1129 1130 tg3_ape_lock(tp, tp->phy_ape_lock); 1131 1132 *val = 0x0; 1133 1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1135 MI_COM_PHY_ADDR_MASK); 1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1137 MI_COM_REG_ADDR_MASK); 1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 1139 1140 tw32_f(MAC_MI_COM, frame_val); 1141 1142 loops = PHY_BUSY_LOOPS; 1143 while (loops != 0) { 1144 udelay(10); 1145 frame_val = tr32(MAC_MI_COM); 1146 1147 if ((frame_val & MI_COM_BUSY) == 0) { 1148 udelay(5); 1149 frame_val = tr32(MAC_MI_COM); 1150 break; 1151 } 1152 loops -= 1; 1153 } 1154 1155 ret = -EBUSY; 1156 if (loops != 0) { 1157 *val = frame_val & MI_COM_DATA_MASK; 1158 ret = 0; 1159 } 1160 1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1162 tw32_f(MAC_MI_MODE, tp->mi_mode); 1163 udelay(80); 1164 } 1165 1166 tg3_ape_unlock(tp, tp->phy_ape_lock); 1167 1168 return ret; 1169 } 1170 1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 1172 { 1173 return __tg3_readphy(tp, tp->phy_addr, reg, val); 1174 } 1175 1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, 1177 u32 val) 1178 { 1179 u32 frame_val; 1180 unsigned int loops; 1181 int ret; 1182 1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) 1185 return 0; 1186 1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1188 tw32_f(MAC_MI_MODE, 1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 1190 udelay(80); 1191 } 1192 1193 tg3_ape_lock(tp, tp->phy_ape_lock); 1194 1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & 1196 MI_COM_PHY_ADDR_MASK); 1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 1198 MI_COM_REG_ADDR_MASK); 1199 frame_val |= (val & MI_COM_DATA_MASK); 1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 1201 1202 tw32_f(MAC_MI_COM, frame_val); 1203 1204 loops = PHY_BUSY_LOOPS; 1205 while (loops != 0) { 1206 udelay(10); 1207 frame_val = tr32(MAC_MI_COM); 1208 if ((frame_val & MI_COM_BUSY) == 0) { 1209 udelay(5); 1210 frame_val = tr32(MAC_MI_COM); 1211 break; 1212 } 1213 loops -= 1; 1214 } 1215 1216 ret = -EBUSY; 1217 if (loops != 0) 1218 ret = 0; 1219 1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 1221 tw32_f(MAC_MI_MODE, tp->mi_mode); 1222 udelay(80); 1223 } 1224 1225 tg3_ape_unlock(tp, tp->phy_ape_lock); 1226 1227 return ret; 1228 } 1229 1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 1231 { 1232 return __tg3_writephy(tp, tp->phy_addr, reg, val); 1233 } 1234 1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) 1236 { 1237 int err; 1238 1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1240 if (err) 1241 goto done; 1242 1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1244 if (err) 1245 goto done; 1246 1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1249 if (err) 1250 goto done; 1251 1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); 1253 1254 done: 1255 return err; 1256 } 1257 1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) 1259 { 1260 int err; 1261 1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); 1263 if (err) 1264 goto done; 1265 1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); 1267 if (err) 1268 goto done; 1269 1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, 1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad); 1272 if (err) 1273 goto done; 1274 1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); 1276 1277 done: 1278 return err; 1279 } 1280 1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) 1282 { 1283 int err; 1284 1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1286 if (!err) 1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); 1288 1289 return err; 1290 } 1291 1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1293 { 1294 int err; 1295 1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1297 if (!err) 1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1299 1300 return err; 1301 } 1302 1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) 1304 { 1305 int err; 1306 1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | 1309 MII_TG3_AUXCTL_SHDWSEL_MISC); 1310 if (!err) 1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); 1312 1313 return err; 1314 } 1315 1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) 1317 { 1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) 1319 set |= MII_TG3_AUXCTL_MISC_WREN; 1320 1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1322 } 1323 1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1325 { 1326 u32 val; 1327 int err; 1328 1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1330 1331 if (err) 1332 return err; 1333 1334 if (enable) 1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1336 else 1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1338 1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1341 1342 return err; 1343 } 1344 1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) 1346 { 1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW, 1348 reg | val | MII_TG3_MISC_SHDW_WREN); 1349 } 1350 1351 static int tg3_bmcr_reset(struct tg3 *tp) 1352 { 1353 u32 phy_control; 1354 int limit, err; 1355 1356 /* OK, reset it, and poll the BMCR_RESET bit until it 1357 * clears or we time out. 1358 */ 1359 phy_control = BMCR_RESET; 1360 err = tg3_writephy(tp, MII_BMCR, phy_control); 1361 if (err != 0) 1362 return -EBUSY; 1363 1364 limit = 5000; 1365 while (limit--) { 1366 err = tg3_readphy(tp, MII_BMCR, &phy_control); 1367 if (err != 0) 1368 return -EBUSY; 1369 1370 if ((phy_control & BMCR_RESET) == 0) { 1371 udelay(40); 1372 break; 1373 } 1374 udelay(10); 1375 } 1376 if (limit < 0) 1377 return -EBUSY; 1378 1379 return 0; 1380 } 1381 1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) 1383 { 1384 struct tg3 *tp = bp->priv; 1385 u32 val; 1386 1387 spin_lock_bh(&tp->lock); 1388 1389 if (__tg3_readphy(tp, mii_id, reg, &val)) 1390 val = -EIO; 1391 1392 spin_unlock_bh(&tp->lock); 1393 1394 return val; 1395 } 1396 1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 1398 { 1399 struct tg3 *tp = bp->priv; 1400 u32 ret = 0; 1401 1402 spin_lock_bh(&tp->lock); 1403 1404 if (__tg3_writephy(tp, mii_id, reg, val)) 1405 ret = -EIO; 1406 1407 spin_unlock_bh(&tp->lock); 1408 1409 return ret; 1410 } 1411 1412 static void tg3_mdio_config_5785(struct tg3 *tp) 1413 { 1414 u32 val; 1415 struct phy_device *phydev; 1416 1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1419 case PHY_ID_BCM50610: 1420 case PHY_ID_BCM50610M: 1421 val = MAC_PHYCFG2_50610_LED_MODES; 1422 break; 1423 case PHY_ID_BCMAC131: 1424 val = MAC_PHYCFG2_AC131_LED_MODES; 1425 break; 1426 case PHY_ID_RTL8211C: 1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES; 1428 break; 1429 case PHY_ID_RTL8201E: 1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES; 1431 break; 1432 default: 1433 return; 1434 } 1435 1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { 1437 tw32(MAC_PHYCFG2, val); 1438 1439 val = tr32(MAC_PHYCFG1); 1440 val &= ~(MAC_PHYCFG1_RGMII_INT | 1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); 1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; 1443 tw32(MAC_PHYCFG1, val); 1444 1445 return; 1446 } 1447 1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) 1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1450 MAC_PHYCFG2_FMODE_MASK_MASK | 1451 MAC_PHYCFG2_GMODE_MASK_MASK | 1452 MAC_PHYCFG2_ACT_MASK_MASK | 1453 MAC_PHYCFG2_QUAL_MASK_MASK | 1454 MAC_PHYCFG2_INBAND_ENABLE; 1455 1456 tw32(MAC_PHYCFG2, val); 1457 1458 val = tr32(MAC_PHYCFG1); 1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1466 } 1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; 1469 tw32(MAC_PHYCFG1, val); 1470 1471 val = tr32(MAC_EXT_RGMII_MODE); 1472 val &= ~(MAC_RGMII_MODE_RX_INT_B | 1473 MAC_RGMII_MODE_RX_QUALITY | 1474 MAC_RGMII_MODE_RX_ACTIVITY | 1475 MAC_RGMII_MODE_RX_ENG_DET | 1476 MAC_RGMII_MODE_TX_ENABLE | 1477 MAC_RGMII_MODE_TX_LOWPWR | 1478 MAC_RGMII_MODE_TX_RESET); 1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { 1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) 1481 val |= MAC_RGMII_MODE_RX_INT_B | 1482 MAC_RGMII_MODE_RX_QUALITY | 1483 MAC_RGMII_MODE_RX_ACTIVITY | 1484 MAC_RGMII_MODE_RX_ENG_DET; 1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) 1486 val |= MAC_RGMII_MODE_TX_ENABLE | 1487 MAC_RGMII_MODE_TX_LOWPWR | 1488 MAC_RGMII_MODE_TX_RESET; 1489 } 1490 tw32(MAC_EXT_RGMII_MODE, val); 1491 } 1492 1493 static void tg3_mdio_start(struct tg3 *tp) 1494 { 1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1496 tw32_f(MAC_MI_MODE, tp->mi_mode); 1497 udelay(80); 1498 1499 if (tg3_flag(tp, MDIOBUS_INITED) && 1500 tg3_asic_rev(tp) == ASIC_REV_5785) 1501 tg3_mdio_config_5785(tp); 1502 } 1503 1504 static int tg3_mdio_init(struct tg3 *tp) 1505 { 1506 int i; 1507 u32 reg; 1508 struct phy_device *phydev; 1509 1510 if (tg3_flag(tp, 5717_PLUS)) { 1511 u32 is_serdes; 1512 1513 tp->phy_addr = tp->pci_fn + 1; 1514 1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) 1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1517 else 1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) & 1519 TG3_CPMU_PHY_STRAP_IS_SERDES; 1520 if (is_serdes) 1521 tp->phy_addr += 7; 1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { 1523 int addr; 1524 1525 addr = ssb_gige_get_phyaddr(tp->pdev); 1526 if (addr < 0) 1527 return addr; 1528 tp->phy_addr = addr; 1529 } else 1530 tp->phy_addr = TG3_PHY_MII_ADDR; 1531 1532 tg3_mdio_start(tp); 1533 1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) 1535 return 0; 1536 1537 tp->mdio_bus = mdiobus_alloc(); 1538 if (tp->mdio_bus == NULL) 1539 return -ENOMEM; 1540 1541 tp->mdio_bus->name = "tg3 mdio bus"; 1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev)); 1543 tp->mdio_bus->priv = tp; 1544 tp->mdio_bus->parent = &tp->pdev->dev; 1545 tp->mdio_bus->read = &tg3_mdio_read; 1546 tp->mdio_bus->write = &tg3_mdio_write; 1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); 1548 1549 /* The bus registration will look for all the PHYs on the mdio bus. 1550 * Unfortunately, it does not ensure the PHY is powered up before 1551 * accessing the PHY ID registers. A chip reset is the 1552 * quickest way to bring the device back to an operational state.. 1553 */ 1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) 1555 tg3_bmcr_reset(tp); 1556 1557 i = mdiobus_register(tp->mdio_bus); 1558 if (i) { 1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); 1560 mdiobus_free(tp->mdio_bus); 1561 return i; 1562 } 1563 1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 1565 1566 if (!phydev || !phydev->drv) { 1567 dev_warn(&tp->pdev->dev, "No PHY devices\n"); 1568 mdiobus_unregister(tp->mdio_bus); 1569 mdiobus_free(tp->mdio_bus); 1570 return -ENODEV; 1571 } 1572 1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1574 case PHY_ID_BCM57780: 1575 phydev->interface = PHY_INTERFACE_MODE_GMII; 1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1577 break; 1578 case PHY_ID_BCM50610: 1579 case PHY_ID_BCM50610M: 1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | 1581 PHY_BRCM_RX_REFCLK_UNUSED | 1582 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1583 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1584 fallthrough; 1585 case PHY_ID_RTL8211C: 1586 phydev->interface = PHY_INTERFACE_MODE_RGMII; 1587 break; 1588 case PHY_ID_RTL8201E: 1589 case PHY_ID_BCMAC131: 1590 phydev->interface = PHY_INTERFACE_MODE_MII; 1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; 1592 tp->phy_flags |= TG3_PHYFLG_IS_FET; 1593 break; 1594 } 1595 1596 tg3_flag_set(tp, MDIOBUS_INITED); 1597 1598 if (tg3_asic_rev(tp) == ASIC_REV_5785) 1599 tg3_mdio_config_5785(tp); 1600 1601 return 0; 1602 } 1603 1604 static void tg3_mdio_fini(struct tg3 *tp) 1605 { 1606 if (tg3_flag(tp, MDIOBUS_INITED)) { 1607 tg3_flag_clear(tp, MDIOBUS_INITED); 1608 mdiobus_unregister(tp->mdio_bus); 1609 mdiobus_free(tp->mdio_bus); 1610 } 1611 } 1612 1613 /* tp->lock is held. */ 1614 static inline void tg3_generate_fw_event(struct tg3 *tp) 1615 { 1616 u32 val; 1617 1618 val = tr32(GRC_RX_CPU_EVENT); 1619 val |= GRC_RX_CPU_DRIVER_EVENT; 1620 tw32_f(GRC_RX_CPU_EVENT, val); 1621 1622 tp->last_event_jiffies = jiffies; 1623 } 1624 1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500 1626 1627 /* tp->lock is held. */ 1628 static void tg3_wait_for_event_ack(struct tg3 *tp) 1629 { 1630 int i; 1631 unsigned int delay_cnt; 1632 long time_remain; 1633 1634 /* If enough time has passed, no wait is necessary. */ 1635 time_remain = (long)(tp->last_event_jiffies + 1 + 1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - 1637 (long)jiffies; 1638 if (time_remain < 0) 1639 return; 1640 1641 /* Check if we can shorten the wait time. */ 1642 delay_cnt = jiffies_to_usecs(time_remain); 1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) 1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; 1645 delay_cnt = (delay_cnt >> 3) + 1; 1646 1647 for (i = 0; i < delay_cnt; i++) { 1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1649 break; 1650 if (pci_channel_offline(tp->pdev)) 1651 break; 1652 1653 udelay(8); 1654 } 1655 } 1656 1657 /* tp->lock is held. */ 1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) 1659 { 1660 u32 reg, val; 1661 1662 val = 0; 1663 if (!tg3_readphy(tp, MII_BMCR, ®)) 1664 val = reg << 16; 1665 if (!tg3_readphy(tp, MII_BMSR, ®)) 1666 val |= (reg & 0xffff); 1667 *data++ = val; 1668 1669 val = 0; 1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®)) 1671 val = reg << 16; 1672 if (!tg3_readphy(tp, MII_LPA, ®)) 1673 val |= (reg & 0xffff); 1674 *data++ = val; 1675 1676 val = 0; 1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { 1678 if (!tg3_readphy(tp, MII_CTRL1000, ®)) 1679 val = reg << 16; 1680 if (!tg3_readphy(tp, MII_STAT1000, ®)) 1681 val |= (reg & 0xffff); 1682 } 1683 *data++ = val; 1684 1685 if (!tg3_readphy(tp, MII_PHYADDR, ®)) 1686 val = reg << 16; 1687 else 1688 val = 0; 1689 *data++ = val; 1690 } 1691 1692 /* tp->lock is held. */ 1693 static void tg3_ump_link_report(struct tg3 *tp) 1694 { 1695 u32 data[4]; 1696 1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) 1698 return; 1699 1700 tg3_phy_gather_ump_data(tp, data); 1701 1702 tg3_wait_for_event_ack(tp); 1703 1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); 1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); 1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); 1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); 1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); 1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); 1710 1711 tg3_generate_fw_event(tp); 1712 } 1713 1714 /* tp->lock is held. */ 1715 static void tg3_stop_fw(struct tg3 *tp) 1716 { 1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 1718 /* Wait for RX cpu to ACK the previous event. */ 1719 tg3_wait_for_event_ack(tp); 1720 1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 1722 1723 tg3_generate_fw_event(tp); 1724 1725 /* Wait for RX cpu to ACK this event. */ 1726 tg3_wait_for_event_ack(tp); 1727 } 1728 } 1729 1730 /* tp->lock is held. */ 1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 1732 { 1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 1735 1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1737 switch (kind) { 1738 case RESET_KIND_INIT: 1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1740 DRV_STATE_START); 1741 break; 1742 1743 case RESET_KIND_SHUTDOWN: 1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1745 DRV_STATE_UNLOAD); 1746 break; 1747 1748 case RESET_KIND_SUSPEND: 1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1750 DRV_STATE_SUSPEND); 1751 break; 1752 1753 default: 1754 break; 1755 } 1756 } 1757 } 1758 1759 /* tp->lock is held. */ 1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 1761 { 1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { 1763 switch (kind) { 1764 case RESET_KIND_INIT: 1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1766 DRV_STATE_START_DONE); 1767 break; 1768 1769 case RESET_KIND_SHUTDOWN: 1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1771 DRV_STATE_UNLOAD_DONE); 1772 break; 1773 1774 default: 1775 break; 1776 } 1777 } 1778 } 1779 1780 /* tp->lock is held. */ 1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 1782 { 1783 if (tg3_flag(tp, ENABLE_ASF)) { 1784 switch (kind) { 1785 case RESET_KIND_INIT: 1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1787 DRV_STATE_START); 1788 break; 1789 1790 case RESET_KIND_SHUTDOWN: 1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1792 DRV_STATE_UNLOAD); 1793 break; 1794 1795 case RESET_KIND_SUSPEND: 1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 1797 DRV_STATE_SUSPEND); 1798 break; 1799 1800 default: 1801 break; 1802 } 1803 } 1804 } 1805 1806 static int tg3_poll_fw(struct tg3 *tp) 1807 { 1808 int i; 1809 u32 val; 1810 1811 if (tg3_flag(tp, NO_FWARE_REPORTED)) 1812 return 0; 1813 1814 if (tg3_flag(tp, IS_SSB_CORE)) { 1815 /* We don't use firmware. */ 1816 return 0; 1817 } 1818 1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 1820 /* Wait up to 20ms for init done. */ 1821 for (i = 0; i < 200; i++) { 1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1823 return 0; 1824 if (pci_channel_offline(tp->pdev)) 1825 return -ENODEV; 1826 1827 udelay(100); 1828 } 1829 return -ENODEV; 1830 } 1831 1832 /* Wait for firmware initialization to complete. */ 1833 for (i = 0; i < 100000; i++) { 1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1836 break; 1837 if (pci_channel_offline(tp->pdev)) { 1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) { 1839 tg3_flag_set(tp, NO_FWARE_REPORTED); 1840 netdev_info(tp->dev, "No firmware running\n"); 1841 } 1842 1843 break; 1844 } 1845 1846 udelay(10); 1847 } 1848 1849 /* Chip might not be fitted with firmware. Some Sun onboard 1850 * parts are configured like that. So don't signal the timeout 1851 * of the above loop as an error, but do report the lack of 1852 * running firmware once. 1853 */ 1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { 1855 tg3_flag_set(tp, NO_FWARE_REPORTED); 1856 1857 netdev_info(tp->dev, "No firmware running\n"); 1858 } 1859 1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 1861 /* The 57765 A0 needs a little more 1862 * time to do some important work. 1863 */ 1864 mdelay(10); 1865 } 1866 1867 return 0; 1868 } 1869 1870 static void tg3_link_report(struct tg3 *tp) 1871 { 1872 if (!netif_carrier_ok(tp->dev)) { 1873 netif_info(tp, link, tp->dev, "Link is down\n"); 1874 tg3_ump_link_report(tp); 1875 } else if (netif_msg_link(tp)) { 1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", 1877 (tp->link_config.active_speed == SPEED_1000 ? 1878 1000 : 1879 (tp->link_config.active_speed == SPEED_100 ? 1880 100 : 10)), 1881 (tp->link_config.active_duplex == DUPLEX_FULL ? 1882 "full" : "half")); 1883 1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", 1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? 1886 "on" : "off", 1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1888 "on" : "off"); 1889 1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 1891 netdev_info(tp->dev, "EEE is %s\n", 1892 tp->setlpicnt ? "enabled" : "disabled"); 1893 1894 tg3_ump_link_report(tp); 1895 } 1896 1897 tp->link_up = netif_carrier_ok(tp->dev); 1898 } 1899 1900 static u32 tg3_decode_flowctrl_1000T(u32 adv) 1901 { 1902 u32 flowctrl = 0; 1903 1904 if (adv & ADVERTISE_PAUSE_CAP) { 1905 flowctrl |= FLOW_CTRL_RX; 1906 if (!(adv & ADVERTISE_PAUSE_ASYM)) 1907 flowctrl |= FLOW_CTRL_TX; 1908 } else if (adv & ADVERTISE_PAUSE_ASYM) 1909 flowctrl |= FLOW_CTRL_TX; 1910 1911 return flowctrl; 1912 } 1913 1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1915 { 1916 u16 miireg; 1917 1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) 1919 miireg = ADVERTISE_1000XPAUSE; 1920 else if (flow_ctrl & FLOW_CTRL_TX) 1921 miireg = ADVERTISE_1000XPSE_ASYM; 1922 else if (flow_ctrl & FLOW_CTRL_RX) 1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1924 else 1925 miireg = 0; 1926 1927 return miireg; 1928 } 1929 1930 static u32 tg3_decode_flowctrl_1000X(u32 adv) 1931 { 1932 u32 flowctrl = 0; 1933 1934 if (adv & ADVERTISE_1000XPAUSE) { 1935 flowctrl |= FLOW_CTRL_RX; 1936 if (!(adv & ADVERTISE_1000XPSE_ASYM)) 1937 flowctrl |= FLOW_CTRL_TX; 1938 } else if (adv & ADVERTISE_1000XPSE_ASYM) 1939 flowctrl |= FLOW_CTRL_TX; 1940 1941 return flowctrl; 1942 } 1943 1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1945 { 1946 u8 cap = 0; 1947 1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { 1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX; 1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { 1951 if (lcladv & ADVERTISE_1000XPAUSE) 1952 cap = FLOW_CTRL_RX; 1953 if (rmtadv & ADVERTISE_1000XPAUSE) 1954 cap = FLOW_CTRL_TX; 1955 } 1956 1957 return cap; 1958 } 1959 1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) 1961 { 1962 u8 autoneg; 1963 u8 flowctrl = 0; 1964 u32 old_rx_mode = tp->rx_mode; 1965 u32 old_tx_mode = tp->tx_mode; 1966 1967 if (tg3_flag(tp, USE_PHYLIB)) 1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; 1969 else 1970 autoneg = tp->link_config.autoneg; 1971 1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { 1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1975 else 1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1977 } else 1978 flowctrl = tp->link_config.flowctrl; 1979 1980 tp->link_config.active_flowctrl = flowctrl; 1981 1982 if (flowctrl & FLOW_CTRL_RX) 1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1984 else 1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1986 1987 if (old_rx_mode != tp->rx_mode) 1988 tw32_f(MAC_RX_MODE, tp->rx_mode); 1989 1990 if (flowctrl & FLOW_CTRL_TX) 1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1992 else 1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1994 1995 if (old_tx_mode != tp->tx_mode) 1996 tw32_f(MAC_TX_MODE, tp->tx_mode); 1997 } 1998 1999 static void tg3_adjust_link(struct net_device *dev) 2000 { 2001 u8 oldflowctrl, linkmesg = 0; 2002 u32 mac_mode, lcl_adv, rmt_adv; 2003 struct tg3 *tp = netdev_priv(dev); 2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2005 2006 spin_lock_bh(&tp->lock); 2007 2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 2009 MAC_MODE_HALF_DUPLEX); 2010 2011 oldflowctrl = tp->link_config.active_flowctrl; 2012 2013 if (phydev->link) { 2014 lcl_adv = 0; 2015 rmt_adv = 0; 2016 2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 2018 mac_mode |= MAC_MODE_PORT_MODE_MII; 2019 else if (phydev->speed == SPEED_1000 || 2020 tg3_asic_rev(tp) != ASIC_REV_5785) 2021 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2022 else 2023 mac_mode |= MAC_MODE_PORT_MODE_MII; 2024 2025 if (phydev->duplex == DUPLEX_HALF) 2026 mac_mode |= MAC_MODE_HALF_DUPLEX; 2027 else { 2028 lcl_adv = mii_advertise_flowctrl( 2029 tp->link_config.flowctrl); 2030 2031 if (phydev->pause) 2032 rmt_adv = LPA_PAUSE_CAP; 2033 if (phydev->asym_pause) 2034 rmt_adv |= LPA_PAUSE_ASYM; 2035 } 2036 2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 2038 } else 2039 mac_mode |= MAC_MODE_PORT_MODE_GMII; 2040 2041 if (mac_mode != tp->mac_mode) { 2042 tp->mac_mode = mac_mode; 2043 tw32_f(MAC_MODE, tp->mac_mode); 2044 udelay(40); 2045 } 2046 2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) { 2048 if (phydev->speed == SPEED_10) 2049 tw32(MAC_MI_STAT, 2050 MAC_MI_STAT_10MBPS_MODE | 2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2052 else 2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 2054 } 2055 2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 2057 tw32(MAC_TX_LENGTHS, 2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2059 (6 << TX_LENGTHS_IPG_SHIFT) | 2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2061 else 2062 tw32(MAC_TX_LENGTHS, 2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2064 (6 << TX_LENGTHS_IPG_SHIFT) | 2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2066 2067 if (phydev->link != tp->old_link || 2068 phydev->speed != tp->link_config.active_speed || 2069 phydev->duplex != tp->link_config.active_duplex || 2070 oldflowctrl != tp->link_config.active_flowctrl) 2071 linkmesg = 1; 2072 2073 tp->old_link = phydev->link; 2074 tp->link_config.active_speed = phydev->speed; 2075 tp->link_config.active_duplex = phydev->duplex; 2076 2077 spin_unlock_bh(&tp->lock); 2078 2079 if (linkmesg) 2080 tg3_link_report(tp); 2081 } 2082 2083 static int tg3_phy_init(struct tg3 *tp) 2084 { 2085 struct phy_device *phydev; 2086 2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) 2088 return 0; 2089 2090 /* Bring the PHY back to a known state. */ 2091 tg3_bmcr_reset(tp); 2092 2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2094 2095 /* Attach the MAC to the PHY. */ 2096 phydev = phy_connect(tp->dev, phydev_name(phydev), 2097 tg3_adjust_link, phydev->interface); 2098 if (IS_ERR(phydev)) { 2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); 2100 return PTR_ERR(phydev); 2101 } 2102 2103 /* Mask with MAC supported features. */ 2104 switch (phydev->interface) { 2105 case PHY_INTERFACE_MODE_GMII: 2106 case PHY_INTERFACE_MODE_RGMII: 2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 2108 phy_set_max_speed(phydev, SPEED_1000); 2109 phy_support_asym_pause(phydev); 2110 break; 2111 } 2112 fallthrough; 2113 case PHY_INTERFACE_MODE_MII: 2114 phy_set_max_speed(phydev, SPEED_100); 2115 phy_support_asym_pause(phydev); 2116 break; 2117 default: 2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2119 return -EINVAL; 2120 } 2121 2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; 2123 2124 phy_attached_info(phydev); 2125 2126 return 0; 2127 } 2128 2129 static void tg3_phy_start(struct tg3 *tp) 2130 { 2131 struct phy_device *phydev; 2132 2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2134 return; 2135 2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 2137 2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 2140 phydev->speed = tp->link_config.speed; 2141 phydev->duplex = tp->link_config.duplex; 2142 phydev->autoneg = tp->link_config.autoneg; 2143 ethtool_convert_legacy_u32_to_link_mode( 2144 phydev->advertising, tp->link_config.advertising); 2145 } 2146 2147 phy_start(phydev); 2148 2149 phy_start_aneg(phydev); 2150 } 2151 2152 static void tg3_phy_stop(struct tg3 *tp) 2153 { 2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 2155 return; 2156 2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2158 } 2159 2160 static void tg3_phy_fini(struct tg3 *tp) 2161 { 2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; 2165 } 2166 } 2167 2168 static int tg3_phy_set_extloopbk(struct tg3 *tp) 2169 { 2170 int err; 2171 u32 val; 2172 2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 2174 return 0; 2175 2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2177 /* Cannot do read-modify-write on 5401 */ 2178 err = tg3_phy_auxctl_write(tp, 2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK | 2181 0x4c20); 2182 goto done; 2183 } 2184 2185 err = tg3_phy_auxctl_read(tp, 2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2187 if (err) 2188 return err; 2189 2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; 2191 err = tg3_phy_auxctl_write(tp, 2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); 2193 2194 done: 2195 return err; 2196 } 2197 2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 2199 { 2200 u32 phytest; 2201 2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 2203 u32 phy; 2204 2205 tg3_writephy(tp, MII_TG3_FET_TEST, 2206 phytest | MII_TG3_FET_SHADOW_EN); 2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { 2208 if (enable) 2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; 2210 else 2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; 2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); 2213 } 2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 2215 } 2216 } 2217 2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) 2219 { 2220 u32 reg; 2221 2222 if (!tg3_flag(tp, 5705_PLUS) || 2223 (tg3_flag(tp, 5717_PLUS) && 2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 2225 return; 2226 2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2228 tg3_phy_fet_toggle_apd(tp, enable); 2229 return; 2230 } 2231 2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED | 2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM | 2234 MII_TG3_MISC_SHDW_SCR5_SDTL | 2235 MII_TG3_MISC_SHDW_SCR5_C125OE; 2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) 2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; 2238 2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); 2240 2241 2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; 2243 if (enable) 2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE; 2245 2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); 2247 } 2248 2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) 2250 { 2251 u32 phy; 2252 2253 if (!tg3_flag(tp, 5705_PLUS) || 2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 2255 return; 2256 2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 2258 u32 ephy; 2259 2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { 2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL; 2262 2263 tg3_writephy(tp, MII_TG3_FET_TEST, 2264 ephy | MII_TG3_FET_SHADOW_EN); 2265 if (!tg3_readphy(tp, reg, &phy)) { 2266 if (enable) 2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2268 else 2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; 2270 tg3_writephy(tp, reg, phy); 2271 } 2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 2273 } 2274 } else { 2275 int ret; 2276 2277 ret = tg3_phy_auxctl_read(tp, 2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); 2279 if (!ret) { 2280 if (enable) 2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2282 else 2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 2284 tg3_phy_auxctl_write(tp, 2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy); 2286 } 2287 } 2288 } 2289 2290 static void tg3_phy_set_wirespeed(struct tg3 *tp) 2291 { 2292 int ret; 2293 u32 val; 2294 2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 2296 return; 2297 2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); 2299 if (!ret) 2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, 2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); 2302 } 2303 2304 static void tg3_phy_apply_otp(struct tg3 *tp) 2305 { 2306 u32 otp, phy; 2307 2308 if (!tp->phy_otp) 2309 return; 2310 2311 otp = tp->phy_otp; 2312 2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2314 return; 2315 2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); 2319 2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | 2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); 2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); 2323 2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); 2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; 2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); 2327 2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); 2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); 2330 2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); 2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); 2333 2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | 2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2337 2338 tg3_phy_toggle_auxctl_smdsp(tp, false); 2339 } 2340 2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) 2342 { 2343 u32 val; 2344 struct ethtool_eee *dest = &tp->eee; 2345 2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2347 return; 2348 2349 if (eee) 2350 dest = eee; 2351 2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) 2353 return; 2354 2355 /* Pull eee_active */ 2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { 2358 dest->eee_active = 1; 2359 } else 2360 dest->eee_active = 0; 2361 2362 /* Pull lp advertised settings */ 2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) 2364 return; 2365 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2366 2367 /* Pull advertised and eee_enabled settings */ 2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) 2369 return; 2370 dest->eee_enabled = !!val; 2371 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); 2372 2373 /* Pull tx_lpi_enabled */ 2374 val = tr32(TG3_CPMU_EEE_MODE); 2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); 2376 2377 /* Pull lpi timer value */ 2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; 2379 } 2380 2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) 2382 { 2383 u32 val; 2384 2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 2386 return; 2387 2388 tp->setlpicnt = 0; 2389 2390 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2391 current_link_up && 2392 tp->link_config.active_duplex == DUPLEX_FULL && 2393 (tp->link_config.active_speed == SPEED_100 || 2394 tp->link_config.active_speed == SPEED_1000)) { 2395 u32 eeectl; 2396 2397 if (tp->link_config.active_speed == SPEED_1000) 2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; 2399 else 2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; 2401 2402 tw32(TG3_CPMU_EEE_CTRL, eeectl); 2403 2404 tg3_eee_pull_config(tp, NULL); 2405 if (tp->eee.eee_active) 2406 tp->setlpicnt = 2; 2407 } 2408 2409 if (!tp->setlpicnt) { 2410 if (current_link_up && 2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2413 tg3_phy_toggle_auxctl_smdsp(tp, false); 2414 } 2415 2416 val = tr32(TG3_CPMU_EEE_MODE); 2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2418 } 2419 } 2420 2421 static void tg3_phy_eee_enable(struct tg3 *tp) 2422 { 2423 u32 val; 2424 2425 if (tp->link_config.active_speed == SPEED_1000 && 2426 (tg3_asic_rev(tp) == ASIC_REV_5717 || 2427 tg3_asic_rev(tp) == ASIC_REV_5719 || 2428 tg3_flag(tp, 57765_CLASS)) && 2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2430 val = MII_TG3_DSP_TAP26_ALNOKO | 2431 MII_TG3_DSP_TAP26_RMRXSTO; 2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2433 tg3_phy_toggle_auxctl_smdsp(tp, false); 2434 } 2435 2436 val = tr32(TG3_CPMU_EEE_MODE); 2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); 2438 } 2439 2440 static int tg3_wait_macro_done(struct tg3 *tp) 2441 { 2442 int limit = 100; 2443 2444 while (limit--) { 2445 u32 tmp32; 2446 2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { 2448 if ((tmp32 & 0x1000) == 0) 2449 break; 2450 } 2451 } 2452 if (limit < 0) 2453 return -EBUSY; 2454 2455 return 0; 2456 } 2457 2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 2459 { 2460 static const u32 test_pat[4][6] = { 2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 2465 }; 2466 int chan; 2467 2468 for (chan = 0; chan < 4; chan++) { 2469 int i; 2470 2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2472 (chan * 0x2000) | 0x0200); 2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2474 2475 for (i = 0; i < 6; i++) 2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 2477 test_pat[chan][i]); 2478 2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2480 if (tg3_wait_macro_done(tp)) { 2481 *resetp = 1; 2482 return -EBUSY; 2483 } 2484 2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2486 (chan * 0x2000) | 0x0200); 2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); 2488 if (tg3_wait_macro_done(tp)) { 2489 *resetp = 1; 2490 return -EBUSY; 2491 } 2492 2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); 2494 if (tg3_wait_macro_done(tp)) { 2495 *resetp = 1; 2496 return -EBUSY; 2497 } 2498 2499 for (i = 0; i < 6; i += 2) { 2500 u32 low, high; 2501 2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 2504 tg3_wait_macro_done(tp)) { 2505 *resetp = 1; 2506 return -EBUSY; 2507 } 2508 low &= 0x7fff; 2509 high &= 0x000f; 2510 if (low != test_pat[chan][i] || 2511 high != test_pat[chan][i+1]) { 2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 2515 2516 return -EBUSY; 2517 } 2518 } 2519 } 2520 2521 return 0; 2522 } 2523 2524 static int tg3_phy_reset_chanpat(struct tg3 *tp) 2525 { 2526 int chan; 2527 2528 for (chan = 0; chan < 4; chan++) { 2529 int i; 2530 2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 2532 (chan * 0x2000) | 0x0200); 2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); 2534 for (i = 0; i < 6; i++) 2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); 2537 if (tg3_wait_macro_done(tp)) 2538 return -EBUSY; 2539 } 2540 2541 return 0; 2542 } 2543 2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 2545 { 2546 u32 reg32, phy9_orig; 2547 int retries, do_phy_reset, err; 2548 2549 retries = 10; 2550 do_phy_reset = 1; 2551 do { 2552 if (do_phy_reset) { 2553 err = tg3_bmcr_reset(tp); 2554 if (err) 2555 return err; 2556 do_phy_reset = 0; 2557 } 2558 2559 /* Disable transmitter and interrupt. */ 2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) 2561 continue; 2562 2563 reg32 |= 0x3000; 2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2565 2566 /* Set full-duplex, 1000 mbps. */ 2567 tg3_writephy(tp, MII_BMCR, 2568 BMCR_FULLDPLX | BMCR_SPEED1000); 2569 2570 /* Set to master mode. */ 2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) 2572 continue; 2573 2574 tg3_writephy(tp, MII_CTRL1000, 2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2576 2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2578 if (err) 2579 return err; 2580 2581 /* Block the PHY control access. */ 2582 tg3_phydsp_write(tp, 0x8005, 0x0800); 2583 2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 2585 if (!err) 2586 break; 2587 } while (--retries); 2588 2589 err = tg3_phy_reset_chanpat(tp); 2590 if (err) 2591 return err; 2592 2593 tg3_phydsp_write(tp, 0x8005, 0x0000); 2594 2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2597 2598 tg3_phy_toggle_auxctl_smdsp(tp, false); 2599 2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2601 2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); 2603 if (err) 2604 return err; 2605 2606 reg32 &= ~0x3000; 2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2608 2609 return 0; 2610 } 2611 2612 static void tg3_carrier_off(struct tg3 *tp) 2613 { 2614 netif_carrier_off(tp->dev); 2615 tp->link_up = false; 2616 } 2617 2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp) 2619 { 2620 if (tg3_flag(tp, ENABLE_ASF)) 2621 netdev_warn(tp->dev, 2622 "Management side-band traffic will be interrupted during phy settings change\n"); 2623 } 2624 2625 /* This will reset the tigon3 PHY if there is no valid 2626 * link unless the FORCE argument is non-zero. 2627 */ 2628 static int tg3_phy_reset(struct tg3 *tp) 2629 { 2630 u32 val, cpmuctrl; 2631 int err; 2632 2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2634 val = tr32(GRC_MISC_CFG); 2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 2636 udelay(40); 2637 } 2638 err = tg3_readphy(tp, MII_BMSR, &val); 2639 err |= tg3_readphy(tp, MII_BMSR, &val); 2640 if (err != 0) 2641 return -EBUSY; 2642 2643 if (netif_running(tp->dev) && tp->link_up) { 2644 netif_carrier_off(tp->dev); 2645 tg3_link_report(tp); 2646 } 2647 2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 2649 tg3_asic_rev(tp) == ASIC_REV_5704 || 2650 tg3_asic_rev(tp) == ASIC_REV_5705) { 2651 err = tg3_phy_reset_5703_4_5(tp); 2652 if (err) 2653 return err; 2654 goto out; 2655 } 2656 2657 cpmuctrl = 0; 2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 2660 cpmuctrl = tr32(TG3_CPMU_CTRL); 2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) 2662 tw32(TG3_CPMU_CTRL, 2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); 2664 } 2665 2666 err = tg3_bmcr_reset(tp); 2667 if (err) 2668 return err; 2669 2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; 2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); 2673 2674 tw32(TG3_CPMU_CTRL, cpmuctrl); 2675 } 2676 2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2681 CPMU_LSPD_1000MB_MACCLK_12_5) { 2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2683 udelay(40); 2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 2685 } 2686 } 2687 2688 if (tg3_flag(tp, 5717_PLUS) && 2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2690 return 0; 2691 2692 tg3_phy_apply_otp(tp); 2693 2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 2695 tg3_phy_toggle_apd(tp, true); 2696 else 2697 tg3_phy_toggle_apd(tp, false); 2698 2699 out: 2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2703 tg3_phydsp_write(tp, 0x000a, 0x0323); 2704 tg3_phy_toggle_auxctl_smdsp(tp, false); 2705 } 2706 2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2710 } 2711 2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2714 tg3_phydsp_write(tp, 0x000a, 0x310b); 2715 tg3_phydsp_write(tp, 0x201f, 0x9506); 2716 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2717 tg3_phy_toggle_auxctl_smdsp(tp, false); 2718 } 2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2724 tg3_writephy(tp, MII_TG3_TEST1, 2725 MII_TG3_TEST1_TRIM_EN | 0x4); 2726 } else 2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2728 2729 tg3_phy_toggle_auxctl_smdsp(tp, false); 2730 } 2731 } 2732 2733 /* Set Extended packet length bit (bit 14) on all chips that */ 2734 /* support jumbo frames */ 2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2736 /* Cannot do read-modify-write on 5401 */ 2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) { 2739 /* Set bit 14 with read-modify-write to preserve other bits */ 2740 err = tg3_phy_auxctl_read(tp, 2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 2742 if (!err) 2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); 2745 } 2746 2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2748 * jumbo frames transmission. 2749 */ 2750 if (tg3_flag(tp, JUMBO_CAPABLE)) { 2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2752 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2754 } 2755 2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 2757 /* adjust output voltage */ 2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); 2759 } 2760 2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2762 tg3_phydsp_write(tp, 0xffb, 0x4000); 2763 2764 tg3_phy_toggle_automdix(tp, true); 2765 tg3_phy_set_wirespeed(tp); 2766 return 0; 2767 } 2768 2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001 2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002 2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ 2772 TG3_GPIO_MSG_NEED_VAUX) 2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ 2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ 2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \ 2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \ 2777 (TG3_GPIO_MSG_DRVR_PRES << 12)) 2778 2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ 2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ 2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \ 2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \ 2783 (TG3_GPIO_MSG_NEED_VAUX << 12)) 2784 2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) 2786 { 2787 u32 status, shift; 2788 2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2790 tg3_asic_rev(tp) == ASIC_REV_5719) 2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); 2792 else 2793 status = tr32(TG3_CPMU_DRV_STATUS); 2794 2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; 2796 status &= ~(TG3_GPIO_MSG_MASK << shift); 2797 status |= (newstat << shift); 2798 2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2800 tg3_asic_rev(tp) == ASIC_REV_5719) 2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); 2802 else 2803 tw32(TG3_CPMU_DRV_STATUS, status); 2804 2805 return status >> TG3_APE_GPIO_MSG_SHIFT; 2806 } 2807 2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) 2809 { 2810 if (!tg3_flag(tp, IS_NIC)) 2811 return 0; 2812 2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2814 tg3_asic_rev(tp) == ASIC_REV_5719 || 2815 tg3_asic_rev(tp) == ASIC_REV_5720) { 2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2817 return -EIO; 2818 2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); 2820 2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2822 TG3_GRC_LCLCTL_PWRSW_DELAY); 2823 2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2825 } else { 2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 2827 TG3_GRC_LCLCTL_PWRSW_DELAY); 2828 } 2829 2830 return 0; 2831 } 2832 2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) 2834 { 2835 u32 grc_local_ctrl; 2836 2837 if (!tg3_flag(tp, IS_NIC) || 2838 tg3_asic_rev(tp) == ASIC_REV_5700 || 2839 tg3_asic_rev(tp) == ASIC_REV_5701) 2840 return; 2841 2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; 2843 2844 tw32_wait_f(GRC_LOCAL_CTRL, 2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2846 TG3_GRC_LCLCTL_PWRSW_DELAY); 2847 2848 tw32_wait_f(GRC_LOCAL_CTRL, 2849 grc_local_ctrl, 2850 TG3_GRC_LCLCTL_PWRSW_DELAY); 2851 2852 tw32_wait_f(GRC_LOCAL_CTRL, 2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, 2854 TG3_GRC_LCLCTL_PWRSW_DELAY); 2855 } 2856 2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) 2858 { 2859 if (!tg3_flag(tp, IS_NIC)) 2860 return; 2861 2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 2863 tg3_asic_rev(tp) == ASIC_REV_5701) { 2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2865 (GRC_LCLCTRL_GPIO_OE0 | 2866 GRC_LCLCTRL_GPIO_OE1 | 2867 GRC_LCLCTRL_GPIO_OE2 | 2868 GRC_LCLCTRL_GPIO_OUTPUT0 | 2869 GRC_LCLCTRL_GPIO_OUTPUT1), 2870 TG3_GRC_LCLCTL_PWRSW_DELAY); 2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ 2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | 2875 GRC_LCLCTRL_GPIO_OE1 | 2876 GRC_LCLCTRL_GPIO_OE2 | 2877 GRC_LCLCTRL_GPIO_OUTPUT0 | 2878 GRC_LCLCTRL_GPIO_OUTPUT1 | 2879 tp->grc_local_ctrl; 2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2881 TG3_GRC_LCLCTL_PWRSW_DELAY); 2882 2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; 2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2885 TG3_GRC_LCLCTL_PWRSW_DELAY); 2886 2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; 2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 2889 TG3_GRC_LCLCTL_PWRSW_DELAY); 2890 } else { 2891 u32 no_gpio2; 2892 u32 grc_local_ctrl = 0; 2893 2894 /* Workaround to prevent overdrawing Amps. */ 2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 2898 grc_local_ctrl, 2899 TG3_GRC_LCLCTL_PWRSW_DELAY); 2900 } 2901 2902 /* On 5753 and variants, GPIO2 cannot be used. */ 2903 no_gpio2 = tp->nic_sram_data_cfg & 2904 NIC_SRAM_DATA_CFG_NO_GPIO2; 2905 2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 2907 GRC_LCLCTRL_GPIO_OE1 | 2908 GRC_LCLCTRL_GPIO_OE2 | 2909 GRC_LCLCTRL_GPIO_OUTPUT1 | 2910 GRC_LCLCTRL_GPIO_OUTPUT2; 2911 if (no_gpio2) { 2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 2913 GRC_LCLCTRL_GPIO_OUTPUT2); 2914 } 2915 tw32_wait_f(GRC_LOCAL_CTRL, 2916 tp->grc_local_ctrl | grc_local_ctrl, 2917 TG3_GRC_LCLCTL_PWRSW_DELAY); 2918 2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 2920 2921 tw32_wait_f(GRC_LOCAL_CTRL, 2922 tp->grc_local_ctrl | grc_local_ctrl, 2923 TG3_GRC_LCLCTL_PWRSW_DELAY); 2924 2925 if (!no_gpio2) { 2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 2927 tw32_wait_f(GRC_LOCAL_CTRL, 2928 tp->grc_local_ctrl | grc_local_ctrl, 2929 TG3_GRC_LCLCTL_PWRSW_DELAY); 2930 } 2931 } 2932 } 2933 2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) 2935 { 2936 u32 msg = 0; 2937 2938 /* Serialize power state transitions */ 2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) 2940 return; 2941 2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) 2943 msg = TG3_GPIO_MSG_NEED_VAUX; 2944 2945 msg = tg3_set_function_status(tp, msg); 2946 2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) 2948 goto done; 2949 2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) 2951 tg3_pwrsrc_switch_to_vaux(tp); 2952 else 2953 tg3_pwrsrc_die_with_vmain(tp); 2954 2955 done: 2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); 2957 } 2958 2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) 2960 { 2961 bool need_vaux = false; 2962 2963 /* The GPIOs do something completely different on 57765. */ 2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) 2965 return; 2966 2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 2968 tg3_asic_rev(tp) == ASIC_REV_5719 || 2969 tg3_asic_rev(tp) == ASIC_REV_5720) { 2970 tg3_frob_aux_power_5717(tp, include_wol ? 2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0); 2972 return; 2973 } 2974 2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { 2976 struct net_device *dev_peer; 2977 2978 dev_peer = pci_get_drvdata(tp->pdev_peer); 2979 2980 /* remove_one() may have been run on the peer. */ 2981 if (dev_peer) { 2982 struct tg3 *tp_peer = netdev_priv(dev_peer); 2983 2984 if (tg3_flag(tp_peer, INIT_COMPLETE)) 2985 return; 2986 2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || 2988 tg3_flag(tp_peer, ENABLE_ASF)) 2989 need_vaux = true; 2990 } 2991 } 2992 2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || 2994 tg3_flag(tp, ENABLE_ASF)) 2995 need_vaux = true; 2996 2997 if (need_vaux) 2998 tg3_pwrsrc_switch_to_vaux(tp); 2999 else 3000 tg3_pwrsrc_die_with_vmain(tp); 3001 } 3002 3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) 3004 { 3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) 3006 return 1; 3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { 3008 if (speed != SPEED_10) 3009 return 1; 3010 } else if (speed == SPEED_10) 3011 return 1; 3012 3013 return 0; 3014 } 3015 3016 static bool tg3_phy_power_bug(struct tg3 *tp) 3017 { 3018 switch (tg3_asic_rev(tp)) { 3019 case ASIC_REV_5700: 3020 case ASIC_REV_5704: 3021 return true; 3022 case ASIC_REV_5780: 3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 3024 return true; 3025 return false; 3026 case ASIC_REV_5717: 3027 if (!tp->pci_fn) 3028 return true; 3029 return false; 3030 case ASIC_REV_5719: 3031 case ASIC_REV_5720: 3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 3033 !tp->pci_fn) 3034 return true; 3035 return false; 3036 } 3037 3038 return false; 3039 } 3040 3041 static bool tg3_phy_led_bug(struct tg3 *tp) 3042 { 3043 switch (tg3_asic_rev(tp)) { 3044 case ASIC_REV_5719: 3045 case ASIC_REV_5720: 3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3047 !tp->pci_fn) 3048 return true; 3049 return false; 3050 } 3051 3052 return false; 3053 } 3054 3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 3056 { 3057 u32 val; 3058 3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) 3060 return; 3061 3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG); 3066 3067 sg_dig_ctrl |= 3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; 3069 tw32(SG_DIG_CTRL, sg_dig_ctrl); 3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); 3071 } 3072 return; 3073 } 3074 3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3076 tg3_bmcr_reset(tp); 3077 val = tr32(GRC_MISC_CFG); 3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 3079 udelay(40); 3080 return; 3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 3082 u32 phytest; 3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { 3084 u32 phy; 3085 3086 tg3_writephy(tp, MII_ADVERTISE, 0); 3087 tg3_writephy(tp, MII_BMCR, 3088 BMCR_ANENABLE | BMCR_ANRESTART); 3089 3090 tg3_writephy(tp, MII_TG3_FET_TEST, 3091 phytest | MII_TG3_FET_SHADOW_EN); 3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { 3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; 3094 tg3_writephy(tp, 3095 MII_TG3_FET_SHDW_AUXMODE4, 3096 phy); 3097 } 3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest); 3099 } 3100 return; 3101 } else if (do_low_power) { 3102 if (!tg3_phy_led_bug(tp)) 3103 tg3_writephy(tp, MII_TG3_EXT_CTRL, 3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 3105 3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | 3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 3108 MII_TG3_AUXCTL_PCTL_VREG_11V; 3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); 3110 } 3111 3112 /* The PHY should not be powered down on some chips because 3113 * of bugs. 3114 */ 3115 if (tg3_phy_power_bug(tp)) 3116 return; 3117 3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) { 3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 3124 } 3125 3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 3127 } 3128 3129 /* tp->lock is held. */ 3130 static int tg3_nvram_lock(struct tg3 *tp) 3131 { 3132 if (tg3_flag(tp, NVRAM)) { 3133 int i; 3134 3135 if (tp->nvram_lock_cnt == 0) { 3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 3137 for (i = 0; i < 8000; i++) { 3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 3139 break; 3140 udelay(20); 3141 } 3142 if (i == 8000) { 3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 3144 return -ENODEV; 3145 } 3146 } 3147 tp->nvram_lock_cnt++; 3148 } 3149 return 0; 3150 } 3151 3152 /* tp->lock is held. */ 3153 static void tg3_nvram_unlock(struct tg3 *tp) 3154 { 3155 if (tg3_flag(tp, NVRAM)) { 3156 if (tp->nvram_lock_cnt > 0) 3157 tp->nvram_lock_cnt--; 3158 if (tp->nvram_lock_cnt == 0) 3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 3160 } 3161 } 3162 3163 /* tp->lock is held. */ 3164 static void tg3_enable_nvram_access(struct tg3 *tp) 3165 { 3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3167 u32 nvaccess = tr32(NVRAM_ACCESS); 3168 3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 3170 } 3171 } 3172 3173 /* tp->lock is held. */ 3174 static void tg3_disable_nvram_access(struct tg3 *tp) 3175 { 3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { 3177 u32 nvaccess = tr32(NVRAM_ACCESS); 3178 3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 3180 } 3181 } 3182 3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 3184 u32 offset, u32 *val) 3185 { 3186 u32 tmp; 3187 int i; 3188 3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) 3190 return -EINVAL; 3191 3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 3193 EEPROM_ADDR_DEVID_MASK | 3194 EEPROM_ADDR_READ); 3195 tw32(GRC_EEPROM_ADDR, 3196 tmp | 3197 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 3199 EEPROM_ADDR_ADDR_MASK) | 3200 EEPROM_ADDR_READ | EEPROM_ADDR_START); 3201 3202 for (i = 0; i < 1000; i++) { 3203 tmp = tr32(GRC_EEPROM_ADDR); 3204 3205 if (tmp & EEPROM_ADDR_COMPLETE) 3206 break; 3207 msleep(1); 3208 } 3209 if (!(tmp & EEPROM_ADDR_COMPLETE)) 3210 return -EBUSY; 3211 3212 tmp = tr32(GRC_EEPROM_DATA); 3213 3214 /* 3215 * The data will always be opposite the native endian 3216 * format. Perform a blind byteswap to compensate. 3217 */ 3218 *val = swab32(tmp); 3219 3220 return 0; 3221 } 3222 3223 #define NVRAM_CMD_TIMEOUT 10000 3224 3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3226 { 3227 int i; 3228 3229 tw32(NVRAM_CMD, nvram_cmd); 3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3231 usleep_range(10, 40); 3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3233 udelay(10); 3234 break; 3235 } 3236 } 3237 3238 if (i == NVRAM_CMD_TIMEOUT) 3239 return -EBUSY; 3240 3241 return 0; 3242 } 3243 3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 3245 { 3246 if (tg3_flag(tp, NVRAM) && 3247 tg3_flag(tp, NVRAM_BUFFERED) && 3248 tg3_flag(tp, FLASH) && 3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3250 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3251 3252 addr = ((addr / tp->nvram_pagesize) << 3253 ATMEL_AT45DB0X1B_PAGE_POS) + 3254 (addr % tp->nvram_pagesize); 3255 3256 return addr; 3257 } 3258 3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 3260 { 3261 if (tg3_flag(tp, NVRAM) && 3262 tg3_flag(tp, NVRAM_BUFFERED) && 3263 tg3_flag(tp, FLASH) && 3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && 3265 (tp->nvram_jedecnum == JEDEC_ATMEL)) 3266 3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 3268 tp->nvram_pagesize) + 3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 3270 3271 return addr; 3272 } 3273 3274 /* NOTE: Data read in from NVRAM is byteswapped according to 3275 * the byteswapping settings for all other register accesses. 3276 * tg3 devices are BE devices, so on a BE machine, the data 3277 * returned will be exactly as it is seen in NVRAM. On a LE 3278 * machine, the 32-bit value will be byteswapped. 3279 */ 3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 3281 { 3282 int ret; 3283 3284 if (!tg3_flag(tp, NVRAM)) 3285 return tg3_nvram_read_using_eeprom(tp, offset, val); 3286 3287 offset = tg3_nvram_phys_addr(tp, offset); 3288 3289 if (offset > NVRAM_ADDR_MSK) 3290 return -EINVAL; 3291 3292 ret = tg3_nvram_lock(tp); 3293 if (ret) 3294 return ret; 3295 3296 tg3_enable_nvram_access(tp); 3297 3298 tw32(NVRAM_ADDR, offset); 3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 3301 3302 if (ret == 0) 3303 *val = tr32(NVRAM_RDDATA); 3304 3305 tg3_disable_nvram_access(tp); 3306 3307 tg3_nvram_unlock(tp); 3308 3309 return ret; 3310 } 3311 3312 /* Ensures NVRAM data is in bytestream format. */ 3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) 3314 { 3315 u32 v; 3316 int res = tg3_nvram_read(tp, offset, &v); 3317 if (!res) 3318 *val = cpu_to_be32(v); 3319 return res; 3320 } 3321 3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 3323 u32 offset, u32 len, u8 *buf) 3324 { 3325 int i, j, rc = 0; 3326 u32 val; 3327 3328 for (i = 0; i < len; i += 4) { 3329 u32 addr; 3330 __be32 data; 3331 3332 addr = offset + i; 3333 3334 memcpy(&data, buf + i, 4); 3335 3336 /* 3337 * The SEEPROM interface expects the data to always be opposite 3338 * the native endian format. We accomplish this by reversing 3339 * all the operations that would have been performed on the 3340 * data from a call to tg3_nvram_read_be32(). 3341 */ 3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); 3343 3344 val = tr32(GRC_EEPROM_ADDR); 3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 3346 3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 3348 EEPROM_ADDR_READ); 3349 tw32(GRC_EEPROM_ADDR, val | 3350 (0 << EEPROM_ADDR_DEVID_SHIFT) | 3351 (addr & EEPROM_ADDR_ADDR_MASK) | 3352 EEPROM_ADDR_START | 3353 EEPROM_ADDR_WRITE); 3354 3355 for (j = 0; j < 1000; j++) { 3356 val = tr32(GRC_EEPROM_ADDR); 3357 3358 if (val & EEPROM_ADDR_COMPLETE) 3359 break; 3360 msleep(1); 3361 } 3362 if (!(val & EEPROM_ADDR_COMPLETE)) { 3363 rc = -EBUSY; 3364 break; 3365 } 3366 } 3367 3368 return rc; 3369 } 3370 3371 /* offset and length are dword aligned */ 3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 3373 u8 *buf) 3374 { 3375 int ret = 0; 3376 u32 pagesize = tp->nvram_pagesize; 3377 u32 pagemask = pagesize - 1; 3378 u32 nvram_cmd; 3379 u8 *tmp; 3380 3381 tmp = kmalloc(pagesize, GFP_KERNEL); 3382 if (tmp == NULL) 3383 return -ENOMEM; 3384 3385 while (len) { 3386 int j; 3387 u32 phy_addr, page_off, size; 3388 3389 phy_addr = offset & ~pagemask; 3390 3391 for (j = 0; j < pagesize; j += 4) { 3392 ret = tg3_nvram_read_be32(tp, phy_addr + j, 3393 (__be32 *) (tmp + j)); 3394 if (ret) 3395 break; 3396 } 3397 if (ret) 3398 break; 3399 3400 page_off = offset & pagemask; 3401 size = pagesize; 3402 if (len < size) 3403 size = len; 3404 3405 len -= size; 3406 3407 memcpy(tmp + page_off, buf, size); 3408 3409 offset = offset + (pagesize - page_off); 3410 3411 tg3_enable_nvram_access(tp); 3412 3413 /* 3414 * Before we can erase the flash page, we need 3415 * to issue a special "write enable" command. 3416 */ 3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3418 3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3420 break; 3421 3422 /* Erase the target page */ 3423 tw32(NVRAM_ADDR, phy_addr); 3424 3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 3427 3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3429 break; 3430 3431 /* Issue another write enable to start the write. */ 3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3433 3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 3435 break; 3436 3437 for (j = 0; j < pagesize; j += 4) { 3438 __be32 data; 3439 3440 data = *((__be32 *) (tmp + j)); 3441 3442 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3443 3444 tw32(NVRAM_ADDR, phy_addr + j); 3445 3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 3447 NVRAM_CMD_WR; 3448 3449 if (j == 0) 3450 nvram_cmd |= NVRAM_CMD_FIRST; 3451 else if (j == (pagesize - 4)) 3452 nvram_cmd |= NVRAM_CMD_LAST; 3453 3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3455 if (ret) 3456 break; 3457 } 3458 if (ret) 3459 break; 3460 } 3461 3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3463 tg3_nvram_exec_cmd(tp, nvram_cmd); 3464 3465 kfree(tmp); 3466 3467 return ret; 3468 } 3469 3470 /* offset and length are dword aligned */ 3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 3472 u8 *buf) 3473 { 3474 int i, ret = 0; 3475 3476 for (i = 0; i < len; i += 4, offset += 4) { 3477 u32 page_off, phy_addr, nvram_cmd; 3478 __be32 data; 3479 3480 memcpy(&data, buf + i, 4); 3481 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 3482 3483 page_off = offset % tp->nvram_pagesize; 3484 3485 phy_addr = tg3_nvram_phys_addr(tp, offset); 3486 3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 3488 3489 if (page_off == 0 || i == 0) 3490 nvram_cmd |= NVRAM_CMD_FIRST; 3491 if (page_off == (tp->nvram_pagesize - 4)) 3492 nvram_cmd |= NVRAM_CMD_LAST; 3493 3494 if (i == (len - 4)) 3495 nvram_cmd |= NVRAM_CMD_LAST; 3496 3497 if ((nvram_cmd & NVRAM_CMD_FIRST) || 3498 !tg3_flag(tp, FLASH) || 3499 !tg3_flag(tp, 57765_PLUS)) 3500 tw32(NVRAM_ADDR, phy_addr); 3501 3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 && 3503 !tg3_flag(tp, 5755_PLUS) && 3504 (tp->nvram_jedecnum == JEDEC_ST) && 3505 (nvram_cmd & NVRAM_CMD_FIRST)) { 3506 u32 cmd; 3507 3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 3509 ret = tg3_nvram_exec_cmd(tp, cmd); 3510 if (ret) 3511 break; 3512 } 3513 if (!tg3_flag(tp, FLASH)) { 3514 /* We always do complete word writes to eeprom. */ 3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 3516 } 3517 3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd); 3519 if (ret) 3520 break; 3521 } 3522 return ret; 3523 } 3524 3525 /* offset and length are dword aligned */ 3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 3527 { 3528 int ret; 3529 3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 3532 ~GRC_LCLCTRL_GPIO_OUTPUT1); 3533 udelay(40); 3534 } 3535 3536 if (!tg3_flag(tp, NVRAM)) { 3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 3538 } else { 3539 u32 grc_mode; 3540 3541 ret = tg3_nvram_lock(tp); 3542 if (ret) 3543 return ret; 3544 3545 tg3_enable_nvram_access(tp); 3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) 3547 tw32(NVRAM_WRITE1, 0x406); 3548 3549 grc_mode = tr32(GRC_MODE); 3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 3551 3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { 3553 ret = tg3_nvram_write_block_buffered(tp, offset, len, 3554 buf); 3555 } else { 3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 3557 buf); 3558 } 3559 3560 grc_mode = tr32(GRC_MODE); 3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 3562 3563 tg3_disable_nvram_access(tp); 3564 tg3_nvram_unlock(tp); 3565 } 3566 3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) { 3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 3569 udelay(40); 3570 } 3571 3572 return ret; 3573 } 3574 3575 #define RX_CPU_SCRATCH_BASE 0x30000 3576 #define RX_CPU_SCRATCH_SIZE 0x04000 3577 #define TX_CPU_SCRATCH_BASE 0x34000 3578 #define TX_CPU_SCRATCH_SIZE 0x04000 3579 3580 /* tp->lock is held. */ 3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) 3582 { 3583 int i; 3584 const int iters = 10000; 3585 3586 for (i = 0; i < iters; i++) { 3587 tw32(cpu_base + CPU_STATE, 0xffffffff); 3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3590 break; 3591 if (pci_channel_offline(tp->pdev)) 3592 return -EBUSY; 3593 } 3594 3595 return (i == iters) ? -EBUSY : 0; 3596 } 3597 3598 /* tp->lock is held. */ 3599 static int tg3_rxcpu_pause(struct tg3 *tp) 3600 { 3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE); 3602 3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 3605 udelay(10); 3606 3607 return rc; 3608 } 3609 3610 /* tp->lock is held. */ 3611 static int tg3_txcpu_pause(struct tg3 *tp) 3612 { 3613 return tg3_pause_cpu(tp, TX_CPU_BASE); 3614 } 3615 3616 /* tp->lock is held. */ 3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) 3618 { 3619 tw32(cpu_base + CPU_STATE, 0xffffffff); 3620 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3621 } 3622 3623 /* tp->lock is held. */ 3624 static void tg3_rxcpu_resume(struct tg3 *tp) 3625 { 3626 tg3_resume_cpu(tp, RX_CPU_BASE); 3627 } 3628 3629 /* tp->lock is held. */ 3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) 3631 { 3632 int rc; 3633 3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3635 3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3637 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3638 3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3640 return 0; 3641 } 3642 if (cpu_base == RX_CPU_BASE) { 3643 rc = tg3_rxcpu_pause(tp); 3644 } else { 3645 /* 3646 * There is only an Rx CPU for the 5750 derivative in the 3647 * BCM4785. 3648 */ 3649 if (tg3_flag(tp, IS_SSB_CORE)) 3650 return 0; 3651 3652 rc = tg3_txcpu_pause(tp); 3653 } 3654 3655 if (rc) { 3656 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); 3658 return -ENODEV; 3659 } 3660 3661 /* Clear firmware's nvram arbitration. */ 3662 if (tg3_flag(tp, NVRAM)) 3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 3664 return 0; 3665 } 3666 3667 static int tg3_fw_data_len(struct tg3 *tp, 3668 const struct tg3_firmware_hdr *fw_hdr) 3669 { 3670 int fw_len; 3671 3672 /* Non fragmented firmware have one firmware header followed by a 3673 * contiguous chunk of data to be written. The length field in that 3674 * header is not the length of data to be written but the complete 3675 * length of the bss. The data length is determined based on 3676 * tp->fw->size minus headers. 3677 * 3678 * Fragmented firmware have a main header followed by multiple 3679 * fragments. Each fragment is identical to non fragmented firmware 3680 * with a firmware header followed by a contiguous chunk of data. In 3681 * the main header, the length field is unused and set to 0xffffffff. 3682 * In each fragment header the length is the entire size of that 3683 * fragment i.e. fragment data + header length. Data length is 3684 * therefore length field in the header minus TG3_FW_HDR_LEN. 3685 */ 3686 if (tp->fw_len == 0xffffffff) 3687 fw_len = be32_to_cpu(fw_hdr->len); 3688 else 3689 fw_len = tp->fw->size; 3690 3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); 3692 } 3693 3694 /* tp->lock is held. */ 3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3696 u32 cpu_scratch_base, int cpu_scratch_size, 3697 const struct tg3_firmware_hdr *fw_hdr) 3698 { 3699 int err, i; 3700 void (*write_op)(struct tg3 *, u32, u32); 3701 int total_len = tp->fw->size; 3702 3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3704 netdev_err(tp->dev, 3705 "%s: Trying to load TX cpu firmware which is 5705\n", 3706 __func__); 3707 return -EINVAL; 3708 } 3709 3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) 3711 write_op = tg3_write_mem; 3712 else 3713 write_op = tg3_write_indirect_reg32; 3714 3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) { 3716 /* It is possible that bootcode is still loading at this point. 3717 * Get the nvram lock first before halting the cpu. 3718 */ 3719 int lock_err = tg3_nvram_lock(tp); 3720 err = tg3_halt_cpu(tp, cpu_base); 3721 if (!lock_err) 3722 tg3_nvram_unlock(tp); 3723 if (err) 3724 goto out; 3725 3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3727 write_op(tp, cpu_scratch_base + i, 0); 3728 tw32(cpu_base + CPU_STATE, 0xffffffff); 3729 tw32(cpu_base + CPU_MODE, 3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); 3731 } else { 3732 /* Subtract additional main header for fragmented firmware and 3733 * advance to the first fragment 3734 */ 3735 total_len -= TG3_FW_HDR_LEN; 3736 fw_hdr++; 3737 } 3738 3739 do { 3740 u32 *fw_data = (u32 *)(fw_hdr + 1); 3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) 3742 write_op(tp, cpu_scratch_base + 3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + 3744 (i * sizeof(u32)), 3745 be32_to_cpu(fw_data[i])); 3746 3747 total_len -= be32_to_cpu(fw_hdr->len); 3748 3749 /* Advance to next fragment */ 3750 fw_hdr = (struct tg3_firmware_hdr *) 3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); 3752 } while (total_len > 0); 3753 3754 err = 0; 3755 3756 out: 3757 return err; 3758 } 3759 3760 /* tp->lock is held. */ 3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) 3762 { 3763 int i; 3764 const int iters = 5; 3765 3766 tw32(cpu_base + CPU_STATE, 0xffffffff); 3767 tw32_f(cpu_base + CPU_PC, pc); 3768 3769 for (i = 0; i < iters; i++) { 3770 if (tr32(cpu_base + CPU_PC) == pc) 3771 break; 3772 tw32(cpu_base + CPU_STATE, 0xffffffff); 3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3774 tw32_f(cpu_base + CPU_PC, pc); 3775 udelay(1000); 3776 } 3777 3778 return (i == iters) ? -EBUSY : 0; 3779 } 3780 3781 /* tp->lock is held. */ 3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3783 { 3784 const struct tg3_firmware_hdr *fw_hdr; 3785 int err; 3786 3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3788 3789 /* Firmware blob starts with version numbers, followed by 3790 start address and length. We are setting complete length. 3791 length = end_address_of_bss - start_address_of_text. 3792 Remainder is the blob to be loaded contiguously 3793 from start address. */ 3794 3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3797 fw_hdr); 3798 if (err) 3799 return err; 3800 3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3803 fw_hdr); 3804 if (err) 3805 return err; 3806 3807 /* Now startup only the RX cpu. */ 3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, 3809 be32_to_cpu(fw_hdr->base_addr)); 3810 if (err) { 3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3812 "should be %08x\n", __func__, 3813 tr32(RX_CPU_BASE + CPU_PC), 3814 be32_to_cpu(fw_hdr->base_addr)); 3815 return -ENODEV; 3816 } 3817 3818 tg3_rxcpu_resume(tp); 3819 3820 return 0; 3821 } 3822 3823 static int tg3_validate_rxcpu_state(struct tg3 *tp) 3824 { 3825 const int iters = 1000; 3826 int i; 3827 u32 val; 3828 3829 /* Wait for boot code to complete initialization and enter service 3830 * loop. It is then safe to download service patches 3831 */ 3832 for (i = 0; i < iters; i++) { 3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) 3834 break; 3835 3836 udelay(10); 3837 } 3838 3839 if (i == iters) { 3840 netdev_err(tp->dev, "Boot code not ready for service patches\n"); 3841 return -EBUSY; 3842 } 3843 3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); 3845 if (val & 0xff) { 3846 netdev_warn(tp->dev, 3847 "Other patches exist. Not downloading EEE patch\n"); 3848 return -EEXIST; 3849 } 3850 3851 return 0; 3852 } 3853 3854 /* tp->lock is held. */ 3855 static void tg3_load_57766_firmware(struct tg3 *tp) 3856 { 3857 struct tg3_firmware_hdr *fw_hdr; 3858 3859 if (!tg3_flag(tp, NO_NVRAM)) 3860 return; 3861 3862 if (tg3_validate_rxcpu_state(tp)) 3863 return; 3864 3865 if (!tp->fw) 3866 return; 3867 3868 /* This firmware blob has a different format than older firmware 3869 * releases as given below. The main difference is we have fragmented 3870 * data to be written to non-contiguous locations. 3871 * 3872 * In the beginning we have a firmware header identical to other 3873 * firmware which consists of version, base addr and length. The length 3874 * here is unused and set to 0xffffffff. 3875 * 3876 * This is followed by a series of firmware fragments which are 3877 * individually identical to previous firmware. i.e. they have the 3878 * firmware header and followed by data for that fragment. The version 3879 * field of the individual fragment header is unused. 3880 */ 3881 3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) 3884 return; 3885 3886 if (tg3_rxcpu_pause(tp)) 3887 return; 3888 3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */ 3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); 3891 3892 tg3_rxcpu_resume(tp); 3893 } 3894 3895 /* tp->lock is held. */ 3896 static int tg3_load_tso_firmware(struct tg3 *tp) 3897 { 3898 const struct tg3_firmware_hdr *fw_hdr; 3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3900 int err; 3901 3902 if (!tg3_flag(tp, FW_TSO)) 3903 return 0; 3904 3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 3906 3907 /* Firmware blob starts with version numbers, followed by 3908 start address and length. We are setting complete length. 3909 length = end_address_of_bss - start_address_of_text. 3910 Remainder is the blob to be loaded contiguously 3911 from start address. */ 3912 3913 cpu_scratch_size = tp->fw_len; 3914 3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3916 cpu_base = RX_CPU_BASE; 3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 3918 } else { 3919 cpu_base = TX_CPU_BASE; 3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 3922 } 3923 3924 err = tg3_load_firmware_cpu(tp, cpu_base, 3925 cpu_scratch_base, cpu_scratch_size, 3926 fw_hdr); 3927 if (err) 3928 return err; 3929 3930 /* Now startup the cpu. */ 3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, 3932 be32_to_cpu(fw_hdr->base_addr)); 3933 if (err) { 3934 netdev_err(tp->dev, 3935 "%s fails to set CPU PC, is %08x should be %08x\n", 3936 __func__, tr32(cpu_base + CPU_PC), 3937 be32_to_cpu(fw_hdr->base_addr)); 3938 return -ENODEV; 3939 } 3940 3941 tg3_resume_cpu(tp, cpu_base); 3942 return 0; 3943 } 3944 3945 /* tp->lock is held. */ 3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr, 3947 int index) 3948 { 3949 u32 addr_high, addr_low; 3950 3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]); 3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | 3953 (mac_addr[4] << 8) | mac_addr[5]); 3954 3955 if (index < 4) { 3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); 3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); 3958 } else { 3959 index -= 4; 3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); 3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); 3962 } 3963 } 3964 3965 /* tp->lock is held. */ 3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) 3967 { 3968 u32 addr_high; 3969 int i; 3970 3971 for (i = 0; i < 4; i++) { 3972 if (i == 1 && skip_mac_1) 3973 continue; 3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3975 } 3976 3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 3978 tg3_asic_rev(tp) == ASIC_REV_5704) { 3979 for (i = 4; i < 16; i++) 3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); 3981 } 3982 3983 addr_high = (tp->dev->dev_addr[0] + 3984 tp->dev->dev_addr[1] + 3985 tp->dev->dev_addr[2] + 3986 tp->dev->dev_addr[3] + 3987 tp->dev->dev_addr[4] + 3988 tp->dev->dev_addr[5]) & 3989 TX_BACKOFF_SEED_MASK; 3990 tw32(MAC_TX_BACKOFF_SEED, addr_high); 3991 } 3992 3993 static void tg3_enable_register_access(struct tg3 *tp) 3994 { 3995 /* 3996 * Make sure register accesses (indirect or otherwise) will function 3997 * correctly. 3998 */ 3999 pci_write_config_dword(tp->pdev, 4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); 4001 } 4002 4003 static int tg3_power_up(struct tg3 *tp) 4004 { 4005 int err; 4006 4007 tg3_enable_register_access(tp); 4008 4009 err = pci_set_power_state(tp->pdev, PCI_D0); 4010 if (!err) { 4011 /* Switch out of Vaux if it is a NIC */ 4012 tg3_pwrsrc_switch_to_vmain(tp); 4013 } else { 4014 netdev_err(tp->dev, "Transition to D0 failed\n"); 4015 } 4016 4017 return err; 4018 } 4019 4020 static int tg3_setup_phy(struct tg3 *, bool); 4021 4022 static int tg3_power_down_prepare(struct tg3 *tp) 4023 { 4024 u32 misc_host_ctrl; 4025 bool device_should_wake, do_low_power; 4026 4027 tg3_enable_register_access(tp); 4028 4029 /* Restore the CLKREQ setting. */ 4030 if (tg3_flag(tp, CLKREQ_BUG)) 4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4032 PCI_EXP_LNKCTL_CLKREQ_EN); 4033 4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 4035 tw32(TG3PCI_MISC_HOST_CTRL, 4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 4037 4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 4039 tg3_flag(tp, WOL_ENABLE); 4040 4041 if (tg3_flag(tp, USE_PHYLIB)) { 4042 do_low_power = false; 4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; 4046 struct phy_device *phydev; 4047 u32 phyid; 4048 4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 4050 4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4052 4053 tp->link_config.speed = phydev->speed; 4054 tp->link_config.duplex = phydev->duplex; 4055 tp->link_config.autoneg = phydev->autoneg; 4056 ethtool_convert_link_mode_to_legacy_u32( 4057 &tp->link_config.advertising, 4058 phydev->advertising); 4059 4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); 4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 4062 advertising); 4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 4064 advertising); 4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, 4066 advertising); 4067 4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { 4069 if (tg3_flag(tp, WOL_SPEED_100MB)) { 4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 4071 advertising); 4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 4073 advertising); 4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4075 advertising); 4076 } else { 4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, 4078 advertising); 4079 } 4080 } 4081 4082 linkmode_copy(phydev->advertising, advertising); 4083 phy_start_aneg(phydev); 4084 4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; 4086 if (phyid != PHY_ID_BCMAC131) { 4087 phyid &= PHY_BCM_OUI_MASK; 4088 if (phyid == PHY_BCM_OUI_1 || 4089 phyid == PHY_BCM_OUI_2 || 4090 phyid == PHY_BCM_OUI_3) 4091 do_low_power = true; 4092 } 4093 } 4094 } else { 4095 do_low_power = true; 4096 4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) 4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 4099 4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 4101 tg3_setup_phy(tp, false); 4102 } 4103 4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 4105 u32 val; 4106 4107 val = tr32(GRC_VCPU_EXT_CTRL); 4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 4109 } else if (!tg3_flag(tp, ENABLE_ASF)) { 4110 int i; 4111 u32 val; 4112 4113 for (i = 0; i < 200; i++) { 4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4116 break; 4117 msleep(1); 4118 } 4119 } 4120 if (tg3_flag(tp, WOL_CAP)) 4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 4122 WOL_DRV_STATE_SHUTDOWN | 4123 WOL_DRV_WOL | 4124 WOL_SET_MAGIC_PKT); 4125 4126 if (device_should_wake) { 4127 u32 mac_mode; 4128 4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 4130 if (do_low_power && 4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 4132 tg3_phy_auxctl_write(tp, 4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 4134 MII_TG3_AUXCTL_PCTL_WOL_EN | 4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); 4137 udelay(40); 4138 } 4139 4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4141 mac_mode = MAC_MODE_PORT_MODE_GMII; 4142 else if (tp->phy_flags & 4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { 4144 if (tp->link_config.active_speed == SPEED_1000) 4145 mac_mode = MAC_MODE_PORT_MODE_GMII; 4146 else 4147 mac_mode = MAC_MODE_PORT_MODE_MII; 4148 } else 4149 mac_mode = MAC_MODE_PORT_MODE_MII; 4150 4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? 4154 SPEED_100 : SPEED_10; 4155 if (tg3_5700_link_polarity(tp, speed)) 4156 mac_mode |= MAC_MODE_LINK_POLARITY; 4157 else 4158 mac_mode &= ~MAC_MODE_LINK_POLARITY; 4159 } 4160 } else { 4161 mac_mode = MAC_MODE_PORT_MODE_TBI; 4162 } 4163 4164 if (!tg3_flag(tp, 5750_PLUS)) 4165 tw32(MAC_LED_CTRL, tp->led_ctrl); 4166 4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && 4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) 4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 4171 4172 if (tg3_flag(tp, ENABLE_APE)) 4173 mac_mode |= MAC_MODE_APE_TX_EN | 4174 MAC_MODE_APE_RX_EN | 4175 MAC_MODE_TDE_ENABLE; 4176 4177 tw32_f(MAC_MODE, mac_mode); 4178 udelay(100); 4179 4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 4181 udelay(10); 4182 } 4183 4184 if (!tg3_flag(tp, WOL_SPEED_100MB) && 4185 (tg3_asic_rev(tp) == ASIC_REV_5700 || 4186 tg3_asic_rev(tp) == ASIC_REV_5701)) { 4187 u32 base_val; 4188 4189 base_val = tp->pci_clock_ctrl; 4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 4191 CLOCK_CTRL_TXCLK_DISABLE); 4192 4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 4194 CLOCK_CTRL_PWRDOWN_PLL133, 40); 4195 } else if (tg3_flag(tp, 5780_CLASS) || 4196 tg3_flag(tp, CPMU_PRESENT) || 4197 tg3_asic_rev(tp) == ASIC_REV_5906) { 4198 /* do nothing */ 4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { 4200 u32 newbits1, newbits2; 4201 4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4203 tg3_asic_rev(tp) == ASIC_REV_5701) { 4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 4205 CLOCK_CTRL_TXCLK_DISABLE | 4206 CLOCK_CTRL_ALTCLK); 4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4208 } else if (tg3_flag(tp, 5705_PLUS)) { 4209 newbits1 = CLOCK_CTRL_625_CORE; 4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 4211 } else { 4212 newbits1 = CLOCK_CTRL_ALTCLK; 4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 4214 } 4215 4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 4217 40); 4218 4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 4220 40); 4221 4222 if (!tg3_flag(tp, 5705_PLUS)) { 4223 u32 newbits3; 4224 4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4226 tg3_asic_rev(tp) == ASIC_REV_5701) { 4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 4228 CLOCK_CTRL_TXCLK_DISABLE | 4229 CLOCK_CTRL_44MHZ_CORE); 4230 } else { 4231 newbits3 = CLOCK_CTRL_44MHZ_CORE; 4232 } 4233 4234 tw32_wait_f(TG3PCI_CLOCK_CTRL, 4235 tp->pci_clock_ctrl | newbits3, 40); 4236 } 4237 } 4238 4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) 4240 tg3_power_down_phy(tp, do_low_power); 4241 4242 tg3_frob_aux_power(tp, true); 4243 4244 /* Workaround for unstable PLL clock */ 4245 if ((!tg3_flag(tp, IS_SSB_CORE)) && 4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || 4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { 4248 u32 val = tr32(0x7d00); 4249 4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 4251 tw32(0x7d00, val); 4252 if (!tg3_flag(tp, ENABLE_ASF)) { 4253 int err; 4254 4255 err = tg3_nvram_lock(tp); 4256 tg3_halt_cpu(tp, RX_CPU_BASE); 4257 if (!err) 4258 tg3_nvram_unlock(tp); 4259 } 4260 } 4261 4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4263 4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); 4265 4266 return 0; 4267 } 4268 4269 static void tg3_power_down(struct tg3 *tp) 4270 { 4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); 4272 pci_set_power_state(tp->pdev, PCI_D3hot); 4273 } 4274 4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) 4276 { 4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 4278 case MII_TG3_AUX_STAT_10HALF: 4279 *speed = SPEED_10; 4280 *duplex = DUPLEX_HALF; 4281 break; 4282 4283 case MII_TG3_AUX_STAT_10FULL: 4284 *speed = SPEED_10; 4285 *duplex = DUPLEX_FULL; 4286 break; 4287 4288 case MII_TG3_AUX_STAT_100HALF: 4289 *speed = SPEED_100; 4290 *duplex = DUPLEX_HALF; 4291 break; 4292 4293 case MII_TG3_AUX_STAT_100FULL: 4294 *speed = SPEED_100; 4295 *duplex = DUPLEX_FULL; 4296 break; 4297 4298 case MII_TG3_AUX_STAT_1000HALF: 4299 *speed = SPEED_1000; 4300 *duplex = DUPLEX_HALF; 4301 break; 4302 4303 case MII_TG3_AUX_STAT_1000FULL: 4304 *speed = SPEED_1000; 4305 *duplex = DUPLEX_FULL; 4306 break; 4307 4308 default: 4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : 4311 SPEED_10; 4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : 4313 DUPLEX_HALF; 4314 break; 4315 } 4316 *speed = SPEED_UNKNOWN; 4317 *duplex = DUPLEX_UNKNOWN; 4318 break; 4319 } 4320 } 4321 4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) 4323 { 4324 int err = 0; 4325 u32 val, new_adv; 4326 4327 new_adv = ADVERTISE_CSMA; 4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; 4329 new_adv |= mii_advertise_flowctrl(flowctrl); 4330 4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv); 4332 if (err) 4333 goto done; 4334 4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); 4337 4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) 4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4341 4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv); 4343 if (err) 4344 goto done; 4345 } 4346 4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4348 goto done; 4349 4350 tw32(TG3_CPMU_EEE_MODE, 4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4352 4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4354 if (!err) { 4355 u32 err2; 4356 4357 val = 0; 4358 /* Advertise 100-BaseTX EEE ability */ 4359 if (advertise & ADVERTISED_100baseT_Full) 4360 val |= MDIO_AN_EEE_ADV_100TX; 4361 /* Advertise 1000-BaseT EEE ability */ 4362 if (advertise & ADVERTISED_1000baseT_Full) 4363 val |= MDIO_AN_EEE_ADV_1000T; 4364 4365 if (!tp->eee.eee_enabled) { 4366 val = 0; 4367 tp->eee.advertised = 0; 4368 } else { 4369 tp->eee.advertised = advertise & 4370 (ADVERTISED_100baseT_Full | 4371 ADVERTISED_1000baseT_Full); 4372 } 4373 4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 4375 if (err) 4376 val = 0; 4377 4378 switch (tg3_asic_rev(tp)) { 4379 case ASIC_REV_5717: 4380 case ASIC_REV_57765: 4381 case ASIC_REV_57766: 4382 case ASIC_REV_5719: 4383 /* If we advertised any eee advertisements above... */ 4384 if (val) 4385 val = MII_TG3_DSP_TAP26_ALNOKO | 4386 MII_TG3_DSP_TAP26_RMRXSTO | 4387 MII_TG3_DSP_TAP26_OPCSINPT; 4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 4389 fallthrough; 4390 case ASIC_REV_5720: 4391 case ASIC_REV_5762: 4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | 4394 MII_TG3_DSP_CH34TP2_HIBW01); 4395 } 4396 4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4398 if (!err) 4399 err = err2; 4400 } 4401 4402 done: 4403 return err; 4404 } 4405 4406 static void tg3_phy_copper_begin(struct tg3 *tp) 4407 { 4408 if (tp->link_config.autoneg == AUTONEG_ENABLE || 4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4410 u32 adv, fc; 4411 4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4414 adv = ADVERTISED_10baseT_Half | 4415 ADVERTISED_10baseT_Full; 4416 if (tg3_flag(tp, WOL_SPEED_100MB)) 4417 adv |= ADVERTISED_100baseT_Half | 4418 ADVERTISED_100baseT_Full; 4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { 4420 if (!(tp->phy_flags & 4421 TG3_PHYFLG_DISABLE_1G_HD_ADV)) 4422 adv |= ADVERTISED_1000baseT_Half; 4423 adv |= ADVERTISED_1000baseT_Full; 4424 } 4425 4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4427 } else { 4428 adv = tp->link_config.advertising; 4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 4430 adv &= ~(ADVERTISED_1000baseT_Half | 4431 ADVERTISED_1000baseT_Full); 4432 4433 fc = tp->link_config.flowctrl; 4434 } 4435 4436 tg3_phy_autoneg_cfg(tp, adv, fc); 4437 4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && 4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { 4440 /* Normally during power down we want to autonegotiate 4441 * the lowest possible speed for WOL. However, to avoid 4442 * link flap, we leave it untouched. 4443 */ 4444 return; 4445 } 4446 4447 tg3_writephy(tp, MII_BMCR, 4448 BMCR_ANENABLE | BMCR_ANRESTART); 4449 } else { 4450 int i; 4451 u32 bmcr, orig_bmcr; 4452 4453 tp->link_config.active_speed = tp->link_config.speed; 4454 tp->link_config.active_duplex = tp->link_config.duplex; 4455 4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4457 /* With autoneg disabled, 5715 only links up when the 4458 * advertisement register has the configured speed 4459 * enabled. 4460 */ 4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4462 } 4463 4464 bmcr = 0; 4465 switch (tp->link_config.speed) { 4466 default: 4467 case SPEED_10: 4468 break; 4469 4470 case SPEED_100: 4471 bmcr |= BMCR_SPEED100; 4472 break; 4473 4474 case SPEED_1000: 4475 bmcr |= BMCR_SPEED1000; 4476 break; 4477 } 4478 4479 if (tp->link_config.duplex == DUPLEX_FULL) 4480 bmcr |= BMCR_FULLDPLX; 4481 4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 4483 (bmcr != orig_bmcr)) { 4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 4485 for (i = 0; i < 1500; i++) { 4486 u32 tmp; 4487 4488 udelay(10); 4489 if (tg3_readphy(tp, MII_BMSR, &tmp) || 4490 tg3_readphy(tp, MII_BMSR, &tmp)) 4491 continue; 4492 if (!(tmp & BMSR_LSTATUS)) { 4493 udelay(40); 4494 break; 4495 } 4496 } 4497 tg3_writephy(tp, MII_BMCR, bmcr); 4498 udelay(40); 4499 } 4500 } 4501 } 4502 4503 static int tg3_phy_pull_config(struct tg3 *tp) 4504 { 4505 int err; 4506 u32 val; 4507 4508 err = tg3_readphy(tp, MII_BMCR, &val); 4509 if (err) 4510 goto done; 4511 4512 if (!(val & BMCR_ANENABLE)) { 4513 tp->link_config.autoneg = AUTONEG_DISABLE; 4514 tp->link_config.advertising = 0; 4515 tg3_flag_clear(tp, PAUSE_AUTONEG); 4516 4517 err = -EIO; 4518 4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { 4520 case 0: 4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4522 goto done; 4523 4524 tp->link_config.speed = SPEED_10; 4525 break; 4526 case BMCR_SPEED100: 4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 4528 goto done; 4529 4530 tp->link_config.speed = SPEED_100; 4531 break; 4532 case BMCR_SPEED1000: 4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4534 tp->link_config.speed = SPEED_1000; 4535 break; 4536 } 4537 fallthrough; 4538 default: 4539 goto done; 4540 } 4541 4542 if (val & BMCR_FULLDPLX) 4543 tp->link_config.duplex = DUPLEX_FULL; 4544 else 4545 tp->link_config.duplex = DUPLEX_HALF; 4546 4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 4548 4549 err = 0; 4550 goto done; 4551 } 4552 4553 tp->link_config.autoneg = AUTONEG_ENABLE; 4554 tp->link_config.advertising = ADVERTISED_Autoneg; 4555 tg3_flag_set(tp, PAUSE_AUTONEG); 4556 4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4558 u32 adv; 4559 4560 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4561 if (err) 4562 goto done; 4563 4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); 4565 tp->link_config.advertising |= adv | ADVERTISED_TP; 4566 4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); 4568 } else { 4569 tp->link_config.advertising |= ADVERTISED_FIBRE; 4570 } 4571 4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4573 u32 adv; 4574 4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 4576 err = tg3_readphy(tp, MII_CTRL1000, &val); 4577 if (err) 4578 goto done; 4579 4580 adv = mii_ctrl1000_to_ethtool_adv_t(val); 4581 } else { 4582 err = tg3_readphy(tp, MII_ADVERTISE, &val); 4583 if (err) 4584 goto done; 4585 4586 adv = tg3_decode_flowctrl_1000X(val); 4587 tp->link_config.flowctrl = adv; 4588 4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); 4590 adv = mii_adv_to_ethtool_adv_x(val); 4591 } 4592 4593 tp->link_config.advertising |= adv; 4594 } 4595 4596 done: 4597 return err; 4598 } 4599 4600 static int tg3_init_5401phy_dsp(struct tg3 *tp) 4601 { 4602 int err; 4603 4604 /* Turn off tap power management. */ 4605 /* Set Extended packet length bit */ 4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); 4607 4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132); 4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232); 4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); 4613 4614 udelay(40); 4615 4616 return err; 4617 } 4618 4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp) 4620 { 4621 struct ethtool_eee eee; 4622 4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) 4624 return true; 4625 4626 tg3_eee_pull_config(tp, &eee); 4627 4628 if (tp->eee.eee_enabled) { 4629 if (tp->eee.advertised != eee.advertised || 4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || 4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) 4632 return false; 4633 } else { 4634 /* EEE is disabled but we're advertising */ 4635 if (eee.advertised) 4636 return false; 4637 } 4638 4639 return true; 4640 } 4641 4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4643 { 4644 u32 advmsk, tgtadv, advertising; 4645 4646 advertising = tp->link_config.advertising; 4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; 4648 4649 advmsk = ADVERTISE_ALL; 4650 if (tp->link_config.active_duplex == DUPLEX_FULL) { 4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); 4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4653 } 4654 4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) 4656 return false; 4657 4658 if ((*lcladv & advmsk) != tgtadv) 4659 return false; 4660 4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4662 u32 tg3_ctrl; 4663 4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); 4665 4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) 4667 return false; 4668 4669 if (tgtadv && 4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { 4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; 4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | 4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 4675 } else { 4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); 4677 } 4678 4679 if (tg3_ctrl != tgtadv) 4680 return false; 4681 } 4682 4683 return true; 4684 } 4685 4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) 4687 { 4688 u32 lpeth = 0; 4689 4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 4691 u32 val; 4692 4693 if (tg3_readphy(tp, MII_STAT1000, &val)) 4694 return false; 4695 4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val); 4697 } 4698 4699 if (tg3_readphy(tp, MII_LPA, rmtadv)) 4700 return false; 4701 4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); 4703 tp->link_config.rmt_adv = lpeth; 4704 4705 return true; 4706 } 4707 4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) 4709 { 4710 if (curr_link_up != tp->link_up) { 4711 if (curr_link_up) { 4712 netif_carrier_on(tp->dev); 4713 } else { 4714 netif_carrier_off(tp->dev); 4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4717 } 4718 4719 tg3_link_report(tp); 4720 return true; 4721 } 4722 4723 return false; 4724 } 4725 4726 static void tg3_clear_mac_status(struct tg3 *tp) 4727 { 4728 tw32(MAC_EVENT, 0); 4729 4730 tw32_f(MAC_STATUS, 4731 MAC_STATUS_SYNC_CHANGED | 4732 MAC_STATUS_CFG_CHANGED | 4733 MAC_STATUS_MI_COMPLETION | 4734 MAC_STATUS_LNKSTATE_CHANGED); 4735 udelay(40); 4736 } 4737 4738 static void tg3_setup_eee(struct tg3 *tp) 4739 { 4740 u32 val; 4741 4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | 4743 TG3_CPMU_EEE_LNKIDL_UART_IDL; 4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; 4746 4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); 4748 4749 tw32_f(TG3_CPMU_EEE_CTRL, 4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US); 4751 4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | 4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | 4754 TG3_CPMU_EEEMD_LPI_IN_RX | 4755 TG3_CPMU_EEEMD_EEE_ENABLE; 4756 4757 if (tg3_asic_rev(tp) != ASIC_REV_5717) 4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 4759 4760 if (tg3_flag(tp, ENABLE_APE)) 4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 4762 4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); 4764 4765 tw32_f(TG3_CPMU_EEE_DBTMR1, 4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US | 4767 (tp->eee.tx_lpi_timer & 0xffff)); 4768 4769 tw32_f(TG3_CPMU_EEE_DBTMR2, 4770 TG3_CPMU_DBTMR2_APE_TX_2047US | 4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 4772 } 4773 4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) 4775 { 4776 bool current_link_up; 4777 u32 bmsr, val; 4778 u32 lcl_adv, rmt_adv; 4779 u32 current_speed; 4780 u8 current_duplex; 4781 int i, err; 4782 4783 tg3_clear_mac_status(tp); 4784 4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4786 tw32_f(MAC_MI_MODE, 4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 4788 udelay(80); 4789 } 4790 4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); 4792 4793 /* Some third-party PHYs need to be reset on link going 4794 * down. 4795 */ 4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 || 4797 tg3_asic_rev(tp) == ASIC_REV_5704 || 4798 tg3_asic_rev(tp) == ASIC_REV_5705) && 4799 tp->link_up) { 4800 tg3_readphy(tp, MII_BMSR, &bmsr); 4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4802 !(bmsr & BMSR_LSTATUS)) 4803 force_reset = true; 4804 } 4805 if (force_reset) 4806 tg3_phy_reset(tp); 4807 4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 4809 tg3_readphy(tp, MII_BMSR, &bmsr); 4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 4811 !tg3_flag(tp, INIT_COMPLETE)) 4812 bmsr = 0; 4813 4814 if (!(bmsr & BMSR_LSTATUS)) { 4815 err = tg3_init_5401phy_dsp(tp); 4816 if (err) 4817 return err; 4818 4819 tg3_readphy(tp, MII_BMSR, &bmsr); 4820 for (i = 0; i < 1000; i++) { 4821 udelay(10); 4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4823 (bmsr & BMSR_LSTATUS)) { 4824 udelay(40); 4825 break; 4826 } 4827 } 4828 4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == 4830 TG3_PHY_REV_BCM5401_B0 && 4831 !(bmsr & BMSR_LSTATUS) && 4832 tp->link_config.active_speed == SPEED_1000) { 4833 err = tg3_phy_reset(tp); 4834 if (!err) 4835 err = tg3_init_5401phy_dsp(tp); 4836 if (err) 4837 return err; 4838 } 4839 } 4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { 4842 /* 5701 {A0,B0} CRC bug workaround */ 4843 tg3_writephy(tp, 0x15, 0x0a75); 4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); 4847 } 4848 4849 /* Clear pending interrupts... */ 4850 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4851 tg3_readphy(tp, MII_TG3_ISTAT, &val); 4852 4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) 4856 tg3_writephy(tp, MII_TG3_IMASK, ~0); 4857 4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 4859 tg3_asic_rev(tp) == ASIC_REV_5701) { 4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 4861 tg3_writephy(tp, MII_TG3_EXT_CTRL, 4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 4863 else 4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4865 } 4866 4867 current_link_up = false; 4868 current_speed = SPEED_UNKNOWN; 4869 current_duplex = DUPLEX_UNKNOWN; 4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4871 tp->link_config.rmt_adv = 0; 4872 4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 4874 err = tg3_phy_auxctl_read(tp, 4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4876 &val); 4877 if (!err && !(val & (1 << 10))) { 4878 tg3_phy_auxctl_write(tp, 4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST, 4880 val | (1 << 10)); 4881 goto relink; 4882 } 4883 } 4884 4885 bmsr = 0; 4886 for (i = 0; i < 100; i++) { 4887 tg3_readphy(tp, MII_BMSR, &bmsr); 4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4889 (bmsr & BMSR_LSTATUS)) 4890 break; 4891 udelay(40); 4892 } 4893 4894 if (bmsr & BMSR_LSTATUS) { 4895 u32 aux_stat, bmcr; 4896 4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 4898 for (i = 0; i < 2000; i++) { 4899 udelay(10); 4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 4901 aux_stat) 4902 break; 4903 } 4904 4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 4906 ¤t_speed, 4907 ¤t_duplex); 4908 4909 bmcr = 0; 4910 for (i = 0; i < 200; i++) { 4911 tg3_readphy(tp, MII_BMCR, &bmcr); 4912 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 4913 continue; 4914 if (bmcr && bmcr != 0x7fff) 4915 break; 4916 udelay(10); 4917 } 4918 4919 lcl_adv = 0; 4920 rmt_adv = 0; 4921 4922 tp->link_config.active_speed = current_speed; 4923 tp->link_config.active_duplex = current_duplex; 4924 4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp); 4927 4928 if ((bmcr & BMCR_ANENABLE) && 4929 eee_config_ok && 4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4932 current_link_up = true; 4933 4934 /* EEE settings changes take effect only after a phy 4935 * reset. If we have skipped a reset due to Link Flap 4936 * Avoidance being enabled, do it now. 4937 */ 4938 if (!eee_config_ok && 4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 4940 !force_reset) { 4941 tg3_setup_eee(tp); 4942 tg3_phy_reset(tp); 4943 } 4944 } else { 4945 if (!(bmcr & BMCR_ANENABLE) && 4946 tp->link_config.speed == current_speed && 4947 tp->link_config.duplex == current_duplex) { 4948 current_link_up = true; 4949 } 4950 } 4951 4952 if (current_link_up && 4953 tp->link_config.active_duplex == DUPLEX_FULL) { 4954 u32 reg, bit; 4955 4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 4957 reg = MII_TG3_FET_GEN_STAT; 4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; 4959 } else { 4960 reg = MII_TG3_EXT_STAT; 4961 bit = MII_TG3_EXT_STAT_MDIX; 4962 } 4963 4964 if (!tg3_readphy(tp, reg, &val) && (val & bit)) 4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; 4966 4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv); 4968 } 4969 } 4970 4971 relink: 4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4973 tg3_phy_copper_begin(tp); 4974 4975 if (tg3_flag(tp, ROBOSWITCH)) { 4976 current_link_up = true; 4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4978 current_speed = SPEED_1000; 4979 current_duplex = DUPLEX_FULL; 4980 tp->link_config.active_speed = current_speed; 4981 tp->link_config.active_duplex = current_duplex; 4982 } 4983 4984 tg3_readphy(tp, MII_BMSR, &bmsr); 4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4987 current_link_up = true; 4988 } 4989 4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4991 if (current_link_up) { 4992 if (tp->link_config.active_speed == SPEED_100 || 4993 tp->link_config.active_speed == SPEED_10) 4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4995 else 4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) 4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4999 else 5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5001 5002 /* In order for the 5750 core in BCM4785 chip to work properly 5003 * in RGMII mode, the Led Control Register must be set up. 5004 */ 5005 if (tg3_flag(tp, RGMII_MODE)) { 5006 u32 led_ctrl = tr32(MAC_LED_CTRL); 5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); 5008 5009 if (tp->link_config.active_speed == SPEED_10) 5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; 5011 else if (tp->link_config.active_speed == SPEED_100) 5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5013 LED_CTRL_100MBPS_ON); 5014 else if (tp->link_config.active_speed == SPEED_1000) 5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | 5016 LED_CTRL_1000MBPS_ON); 5017 5018 tw32(MAC_LED_CTRL, led_ctrl); 5019 udelay(40); 5020 } 5021 5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5023 if (tp->link_config.active_duplex == DUPLEX_HALF) 5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5025 5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 5027 if (current_link_up && 5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 5030 else 5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 5032 } 5033 5034 /* ??? Without this setting Netgear GA302T PHY does not 5035 * ??? send/receive packets... 5036 */ 5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && 5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { 5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 5040 tw32_f(MAC_MI_MODE, tp->mi_mode); 5041 udelay(80); 5042 } 5043 5044 tw32_f(MAC_MODE, tp->mac_mode); 5045 udelay(40); 5046 5047 tg3_phy_eee_adjust(tp, current_link_up); 5048 5049 if (tg3_flag(tp, USE_LINKCHG_REG)) { 5050 /* Polled via timer. */ 5051 tw32_f(MAC_EVENT, 0); 5052 } else { 5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5054 } 5055 udelay(40); 5056 5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 5058 current_link_up && 5059 tp->link_config.active_speed == SPEED_1000 && 5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 5061 udelay(120); 5062 tw32_f(MAC_STATUS, 5063 (MAC_STATUS_SYNC_CHANGED | 5064 MAC_STATUS_CFG_CHANGED)); 5065 udelay(40); 5066 tg3_write_mem(tp, 5067 NIC_SRAM_FIRMWARE_MBOX, 5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 5069 } 5070 5071 /* Prevent send BD corruption. */ 5072 if (tg3_flag(tp, CLKREQ_BUG)) { 5073 if (tp->link_config.active_speed == SPEED_100 || 5074 tp->link_config.active_speed == SPEED_10) 5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 5076 PCI_EXP_LNKCTL_CLKREQ_EN); 5077 else 5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 5079 PCI_EXP_LNKCTL_CLKREQ_EN); 5080 } 5081 5082 tg3_test_and_report_link_chg(tp, current_link_up); 5083 5084 return 0; 5085 } 5086 5087 struct tg3_fiber_aneginfo { 5088 int state; 5089 #define ANEG_STATE_UNKNOWN 0 5090 #define ANEG_STATE_AN_ENABLE 1 5091 #define ANEG_STATE_RESTART_INIT 2 5092 #define ANEG_STATE_RESTART 3 5093 #define ANEG_STATE_DISABLE_LINK_OK 4 5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5 5095 #define ANEG_STATE_ABILITY_DETECT 6 5096 #define ANEG_STATE_ACK_DETECT_INIT 7 5097 #define ANEG_STATE_ACK_DETECT 8 5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9 5099 #define ANEG_STATE_COMPLETE_ACK 10 5100 #define ANEG_STATE_IDLE_DETECT_INIT 11 5101 #define ANEG_STATE_IDLE_DETECT 12 5102 #define ANEG_STATE_LINK_OK 13 5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15 5105 5106 u32 flags; 5107 #define MR_AN_ENABLE 0x00000001 5108 #define MR_RESTART_AN 0x00000002 5109 #define MR_AN_COMPLETE 0x00000004 5110 #define MR_PAGE_RX 0x00000008 5111 #define MR_NP_LOADED 0x00000010 5112 #define MR_TOGGLE_TX 0x00000020 5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100 5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000 5120 #define MR_TOGGLE_RX 0x00002000 5121 #define MR_NP_RX 0x00004000 5122 5123 #define MR_LINK_OK 0x80000000 5124 5125 unsigned long link_time, cur_time; 5126 5127 u32 ability_match_cfg; 5128 int ability_match_count; 5129 5130 char ability_match, idle_match, ack_match; 5131 5132 u32 txconfig, rxconfig; 5133 #define ANEG_CFG_NP 0x00000080 5134 #define ANEG_CFG_ACK 0x00000040 5135 #define ANEG_CFG_RF2 0x00000020 5136 #define ANEG_CFG_RF1 0x00000010 5137 #define ANEG_CFG_PS2 0x00000001 5138 #define ANEG_CFG_PS1 0x00008000 5139 #define ANEG_CFG_HD 0x00004000 5140 #define ANEG_CFG_FD 0x00002000 5141 #define ANEG_CFG_INVAL 0x00001f06 5142 5143 }; 5144 #define ANEG_OK 0 5145 #define ANEG_DONE 1 5146 #define ANEG_TIMER_ENAB 2 5147 #define ANEG_FAILED -1 5148 5149 #define ANEG_STATE_SETTLE_TIME 10000 5150 5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 5152 struct tg3_fiber_aneginfo *ap) 5153 { 5154 u16 flowctrl; 5155 unsigned long delta; 5156 u32 rx_cfg_reg; 5157 int ret; 5158 5159 if (ap->state == ANEG_STATE_UNKNOWN) { 5160 ap->rxconfig = 0; 5161 ap->link_time = 0; 5162 ap->cur_time = 0; 5163 ap->ability_match_cfg = 0; 5164 ap->ability_match_count = 0; 5165 ap->ability_match = 0; 5166 ap->idle_match = 0; 5167 ap->ack_match = 0; 5168 } 5169 ap->cur_time++; 5170 5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 5173 5174 if (rx_cfg_reg != ap->ability_match_cfg) { 5175 ap->ability_match_cfg = rx_cfg_reg; 5176 ap->ability_match = 0; 5177 ap->ability_match_count = 0; 5178 } else { 5179 if (++ap->ability_match_count > 1) { 5180 ap->ability_match = 1; 5181 ap->ability_match_cfg = rx_cfg_reg; 5182 } 5183 } 5184 if (rx_cfg_reg & ANEG_CFG_ACK) 5185 ap->ack_match = 1; 5186 else 5187 ap->ack_match = 0; 5188 5189 ap->idle_match = 0; 5190 } else { 5191 ap->idle_match = 1; 5192 ap->ability_match_cfg = 0; 5193 ap->ability_match_count = 0; 5194 ap->ability_match = 0; 5195 ap->ack_match = 0; 5196 5197 rx_cfg_reg = 0; 5198 } 5199 5200 ap->rxconfig = rx_cfg_reg; 5201 ret = ANEG_OK; 5202 5203 switch (ap->state) { 5204 case ANEG_STATE_UNKNOWN: 5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 5206 ap->state = ANEG_STATE_AN_ENABLE; 5207 5208 fallthrough; 5209 case ANEG_STATE_AN_ENABLE: 5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 5211 if (ap->flags & MR_AN_ENABLE) { 5212 ap->link_time = 0; 5213 ap->cur_time = 0; 5214 ap->ability_match_cfg = 0; 5215 ap->ability_match_count = 0; 5216 ap->ability_match = 0; 5217 ap->idle_match = 0; 5218 ap->ack_match = 0; 5219 5220 ap->state = ANEG_STATE_RESTART_INIT; 5221 } else { 5222 ap->state = ANEG_STATE_DISABLE_LINK_OK; 5223 } 5224 break; 5225 5226 case ANEG_STATE_RESTART_INIT: 5227 ap->link_time = ap->cur_time; 5228 ap->flags &= ~(MR_NP_LOADED); 5229 ap->txconfig = 0; 5230 tw32(MAC_TX_AUTO_NEG, 0); 5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5232 tw32_f(MAC_MODE, tp->mac_mode); 5233 udelay(40); 5234 5235 ret = ANEG_TIMER_ENAB; 5236 ap->state = ANEG_STATE_RESTART; 5237 5238 fallthrough; 5239 case ANEG_STATE_RESTART: 5240 delta = ap->cur_time - ap->link_time; 5241 if (delta > ANEG_STATE_SETTLE_TIME) 5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 5243 else 5244 ret = ANEG_TIMER_ENAB; 5245 break; 5246 5247 case ANEG_STATE_DISABLE_LINK_OK: 5248 ret = ANEG_DONE; 5249 break; 5250 5251 case ANEG_STATE_ABILITY_DETECT_INIT: 5252 ap->flags &= ~(MR_TOGGLE_TX); 5253 ap->txconfig = ANEG_CFG_FD; 5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5255 if (flowctrl & ADVERTISE_1000XPAUSE) 5256 ap->txconfig |= ANEG_CFG_PS1; 5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5258 ap->txconfig |= ANEG_CFG_PS2; 5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5261 tw32_f(MAC_MODE, tp->mac_mode); 5262 udelay(40); 5263 5264 ap->state = ANEG_STATE_ABILITY_DETECT; 5265 break; 5266 5267 case ANEG_STATE_ABILITY_DETECT: 5268 if (ap->ability_match != 0 && ap->rxconfig != 0) 5269 ap->state = ANEG_STATE_ACK_DETECT_INIT; 5270 break; 5271 5272 case ANEG_STATE_ACK_DETECT_INIT: 5273 ap->txconfig |= ANEG_CFG_ACK; 5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 5276 tw32_f(MAC_MODE, tp->mac_mode); 5277 udelay(40); 5278 5279 ap->state = ANEG_STATE_ACK_DETECT; 5280 5281 fallthrough; 5282 case ANEG_STATE_ACK_DETECT: 5283 if (ap->ack_match != 0) { 5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 5287 } else { 5288 ap->state = ANEG_STATE_AN_ENABLE; 5289 } 5290 } else if (ap->ability_match != 0 && 5291 ap->rxconfig == 0) { 5292 ap->state = ANEG_STATE_AN_ENABLE; 5293 } 5294 break; 5295 5296 case ANEG_STATE_COMPLETE_ACK_INIT: 5297 if (ap->rxconfig & ANEG_CFG_INVAL) { 5298 ret = ANEG_FAILED; 5299 break; 5300 } 5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 5302 MR_LP_ADV_HALF_DUPLEX | 5303 MR_LP_ADV_SYM_PAUSE | 5304 MR_LP_ADV_ASYM_PAUSE | 5305 MR_LP_ADV_REMOTE_FAULT1 | 5306 MR_LP_ADV_REMOTE_FAULT2 | 5307 MR_LP_ADV_NEXT_PAGE | 5308 MR_TOGGLE_RX | 5309 MR_NP_RX); 5310 if (ap->rxconfig & ANEG_CFG_FD) 5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 5312 if (ap->rxconfig & ANEG_CFG_HD) 5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 5314 if (ap->rxconfig & ANEG_CFG_PS1) 5315 ap->flags |= MR_LP_ADV_SYM_PAUSE; 5316 if (ap->rxconfig & ANEG_CFG_PS2) 5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 5318 if (ap->rxconfig & ANEG_CFG_RF1) 5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 5320 if (ap->rxconfig & ANEG_CFG_RF2) 5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 5322 if (ap->rxconfig & ANEG_CFG_NP) 5323 ap->flags |= MR_LP_ADV_NEXT_PAGE; 5324 5325 ap->link_time = ap->cur_time; 5326 5327 ap->flags ^= (MR_TOGGLE_TX); 5328 if (ap->rxconfig & 0x0008) 5329 ap->flags |= MR_TOGGLE_RX; 5330 if (ap->rxconfig & ANEG_CFG_NP) 5331 ap->flags |= MR_NP_RX; 5332 ap->flags |= MR_PAGE_RX; 5333 5334 ap->state = ANEG_STATE_COMPLETE_ACK; 5335 ret = ANEG_TIMER_ENAB; 5336 break; 5337 5338 case ANEG_STATE_COMPLETE_ACK: 5339 if (ap->ability_match != 0 && 5340 ap->rxconfig == 0) { 5341 ap->state = ANEG_STATE_AN_ENABLE; 5342 break; 5343 } 5344 delta = ap->cur_time - ap->link_time; 5345 if (delta > ANEG_STATE_SETTLE_TIME) { 5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5348 } else { 5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 5350 !(ap->flags & MR_NP_RX)) { 5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 5352 } else { 5353 ret = ANEG_FAILED; 5354 } 5355 } 5356 } 5357 break; 5358 5359 case ANEG_STATE_IDLE_DETECT_INIT: 5360 ap->link_time = ap->cur_time; 5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5362 tw32_f(MAC_MODE, tp->mac_mode); 5363 udelay(40); 5364 5365 ap->state = ANEG_STATE_IDLE_DETECT; 5366 ret = ANEG_TIMER_ENAB; 5367 break; 5368 5369 case ANEG_STATE_IDLE_DETECT: 5370 if (ap->ability_match != 0 && 5371 ap->rxconfig == 0) { 5372 ap->state = ANEG_STATE_AN_ENABLE; 5373 break; 5374 } 5375 delta = ap->cur_time - ap->link_time; 5376 if (delta > ANEG_STATE_SETTLE_TIME) { 5377 /* XXX another gem from the Broadcom driver :( */ 5378 ap->state = ANEG_STATE_LINK_OK; 5379 } 5380 break; 5381 5382 case ANEG_STATE_LINK_OK: 5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 5384 ret = ANEG_DONE; 5385 break; 5386 5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 5388 /* ??? unimplemented */ 5389 break; 5390 5391 case ANEG_STATE_NEXT_PAGE_WAIT: 5392 /* ??? unimplemented */ 5393 break; 5394 5395 default: 5396 ret = ANEG_FAILED; 5397 break; 5398 } 5399 5400 return ret; 5401 } 5402 5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) 5404 { 5405 int res = 0; 5406 struct tg3_fiber_aneginfo aninfo; 5407 int status = ANEG_FAILED; 5408 unsigned int tick; 5409 u32 tmp; 5410 5411 tw32_f(MAC_TX_AUTO_NEG, 0); 5412 5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 5415 udelay(40); 5416 5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 5418 udelay(40); 5419 5420 memset(&aninfo, 0, sizeof(aninfo)); 5421 aninfo.flags |= MR_AN_ENABLE; 5422 aninfo.state = ANEG_STATE_UNKNOWN; 5423 aninfo.cur_time = 0; 5424 tick = 0; 5425 while (++tick < 195000) { 5426 status = tg3_fiber_aneg_smachine(tp, &aninfo); 5427 if (status == ANEG_DONE || status == ANEG_FAILED) 5428 break; 5429 5430 udelay(1); 5431 } 5432 5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 5434 tw32_f(MAC_MODE, tp->mac_mode); 5435 udelay(40); 5436 5437 *txflags = aninfo.txconfig; 5438 *rxflags = aninfo.flags; 5439 5440 if (status == ANEG_DONE && 5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 5442 MR_LP_ADV_FULL_DUPLEX))) 5443 res = 1; 5444 5445 return res; 5446 } 5447 5448 static void tg3_init_bcm8002(struct tg3 *tp) 5449 { 5450 u32 mac_status = tr32(MAC_STATUS); 5451 int i; 5452 5453 /* Reset when initting first time or we have a link. */ 5454 if (tg3_flag(tp, INIT_COMPLETE) && 5455 !(mac_status & MAC_STATUS_PCS_SYNCED)) 5456 return; 5457 5458 /* Set PLL lock range. */ 5459 tg3_writephy(tp, 0x16, 0x8007); 5460 5461 /* SW reset */ 5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 5463 5464 /* Wait for reset to complete. */ 5465 /* XXX schedule_timeout() ... */ 5466 for (i = 0; i < 500; i++) 5467 udelay(10); 5468 5469 /* Config mode; select PMA/Ch 1 regs. */ 5470 tg3_writephy(tp, 0x10, 0x8411); 5471 5472 /* Enable auto-lock and comdet, select txclk for tx. */ 5473 tg3_writephy(tp, 0x11, 0x0a10); 5474 5475 tg3_writephy(tp, 0x18, 0x00a0); 5476 tg3_writephy(tp, 0x16, 0x41ff); 5477 5478 /* Assert and deassert POR. */ 5479 tg3_writephy(tp, 0x13, 0x0400); 5480 udelay(40); 5481 tg3_writephy(tp, 0x13, 0x0000); 5482 5483 tg3_writephy(tp, 0x11, 0x0a50); 5484 udelay(40); 5485 tg3_writephy(tp, 0x11, 0x0a10); 5486 5487 /* Wait for signal to stabilize */ 5488 /* XXX schedule_timeout() ... */ 5489 for (i = 0; i < 15000; i++) 5490 udelay(10); 5491 5492 /* Deselect the channel register so we can read the PHYID 5493 * later. 5494 */ 5495 tg3_writephy(tp, 0x10, 0x8011); 5496 } 5497 5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5499 { 5500 u16 flowctrl; 5501 bool current_link_up; 5502 u32 sg_dig_ctrl, sg_dig_status; 5503 u32 serdes_cfg, expected_sg_dig_ctrl; 5504 int workaround, port_a; 5505 5506 serdes_cfg = 0; 5507 workaround = 0; 5508 port_a = 1; 5509 current_link_up = false; 5510 5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5513 workaround = 1; 5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 5515 port_a = 0; 5516 5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 5518 /* preserve bits 20-23 for voltage regulator */ 5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 5520 } 5521 5522 sg_dig_ctrl = tr32(SG_DIG_CTRL); 5523 5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { 5526 if (workaround) { 5527 u32 val = serdes_cfg; 5528 5529 if (port_a) 5530 val |= 0xc010000; 5531 else 5532 val |= 0x4010000; 5533 tw32_f(MAC_SERDES_CFG, val); 5534 } 5535 5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5537 } 5538 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5539 tg3_setup_flow_control(tp, 0, 0); 5540 current_link_up = true; 5541 } 5542 goto out; 5543 } 5544 5545 /* Want auto-negotiation. */ 5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; 5547 5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5549 if (flowctrl & ADVERTISE_1000XPAUSE) 5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; 5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM) 5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; 5553 5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && 5556 tp->serdes_counter && 5557 ((mac_status & (MAC_STATUS_PCS_SYNCED | 5558 MAC_STATUS_RCVD_CFG)) == 5559 MAC_STATUS_PCS_SYNCED)) { 5560 tp->serdes_counter--; 5561 current_link_up = true; 5562 goto out; 5563 } 5564 restart_autoneg: 5565 if (workaround) 5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); 5568 udelay(5); 5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 5570 5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 5574 MAC_STATUS_SIGNAL_DET)) { 5575 sg_dig_status = tr32(SG_DIG_STATUS); 5576 mac_status = tr32(MAC_STATUS); 5577 5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && 5579 (mac_status & MAC_STATUS_PCS_SYNCED)) { 5580 u32 local_adv = 0, remote_adv = 0; 5581 5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) 5583 local_adv |= ADVERTISE_1000XPAUSE; 5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) 5585 local_adv |= ADVERTISE_1000XPSE_ASYM; 5586 5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) 5588 remote_adv |= LPA_1000XPAUSE; 5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) 5590 remote_adv |= LPA_1000XPAUSE_ASYM; 5591 5592 tp->link_config.rmt_adv = 5593 mii_adv_to_ethtool_adv_x(remote_adv); 5594 5595 tg3_setup_flow_control(tp, local_adv, remote_adv); 5596 current_link_up = true; 5597 tp->serdes_counter = 0; 5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5600 if (tp->serdes_counter) 5601 tp->serdes_counter--; 5602 else { 5603 if (workaround) { 5604 u32 val = serdes_cfg; 5605 5606 if (port_a) 5607 val |= 0xc010000; 5608 else 5609 val |= 0x4010000; 5610 5611 tw32_f(MAC_SERDES_CFG, val); 5612 } 5613 5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); 5615 udelay(40); 5616 5617 /* Link parallel detection - link is up */ 5618 /* only if we have PCS_SYNC and not */ 5619 /* receiving config code words */ 5620 mac_status = tr32(MAC_STATUS); 5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5622 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5623 tg3_setup_flow_control(tp, 0, 0); 5624 current_link_up = true; 5625 tp->phy_flags |= 5626 TG3_PHYFLG_PARALLEL_DETECT; 5627 tp->serdes_counter = 5628 SERDES_PARALLEL_DET_TIMEOUT; 5629 } else 5630 goto restart_autoneg; 5631 } 5632 } 5633 } else { 5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; 5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5636 } 5637 5638 out: 5639 return current_link_up; 5640 } 5641 5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5643 { 5644 bool current_link_up = false; 5645 5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5647 goto out; 5648 5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5650 u32 txflags, rxflags; 5651 int i; 5652 5653 if (fiber_autoneg(tp, &txflags, &rxflags)) { 5654 u32 local_adv = 0, remote_adv = 0; 5655 5656 if (txflags & ANEG_CFG_PS1) 5657 local_adv |= ADVERTISE_1000XPAUSE; 5658 if (txflags & ANEG_CFG_PS2) 5659 local_adv |= ADVERTISE_1000XPSE_ASYM; 5660 5661 if (rxflags & MR_LP_ADV_SYM_PAUSE) 5662 remote_adv |= LPA_1000XPAUSE; 5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE) 5664 remote_adv |= LPA_1000XPAUSE_ASYM; 5665 5666 tp->link_config.rmt_adv = 5667 mii_adv_to_ethtool_adv_x(remote_adv); 5668 5669 tg3_setup_flow_control(tp, local_adv, remote_adv); 5670 5671 current_link_up = true; 5672 } 5673 for (i = 0; i < 30; i++) { 5674 udelay(20); 5675 tw32_f(MAC_STATUS, 5676 (MAC_STATUS_SYNC_CHANGED | 5677 MAC_STATUS_CFG_CHANGED)); 5678 udelay(40); 5679 if ((tr32(MAC_STATUS) & 5680 (MAC_STATUS_SYNC_CHANGED | 5681 MAC_STATUS_CFG_CHANGED)) == 0) 5682 break; 5683 } 5684 5685 mac_status = tr32(MAC_STATUS); 5686 if (!current_link_up && 5687 (mac_status & MAC_STATUS_PCS_SYNCED) && 5688 !(mac_status & MAC_STATUS_RCVD_CFG)) 5689 current_link_up = true; 5690 } else { 5691 tg3_setup_flow_control(tp, 0, 0); 5692 5693 /* Forcing 1000FD link up. */ 5694 current_link_up = true; 5695 5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5697 udelay(40); 5698 5699 tw32_f(MAC_MODE, tp->mac_mode); 5700 udelay(40); 5701 } 5702 5703 out: 5704 return current_link_up; 5705 } 5706 5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) 5708 { 5709 u32 orig_pause_cfg; 5710 u32 orig_active_speed; 5711 u8 orig_active_duplex; 5712 u32 mac_status; 5713 bool current_link_up; 5714 int i; 5715 5716 orig_pause_cfg = tp->link_config.active_flowctrl; 5717 orig_active_speed = tp->link_config.active_speed; 5718 orig_active_duplex = tp->link_config.active_duplex; 5719 5720 if (!tg3_flag(tp, HW_AUTONEG) && 5721 tp->link_up && 5722 tg3_flag(tp, INIT_COMPLETE)) { 5723 mac_status = tr32(MAC_STATUS); 5724 mac_status &= (MAC_STATUS_PCS_SYNCED | 5725 MAC_STATUS_SIGNAL_DET | 5726 MAC_STATUS_CFG_CHANGED | 5727 MAC_STATUS_RCVD_CFG); 5728 if (mac_status == (MAC_STATUS_PCS_SYNCED | 5729 MAC_STATUS_SIGNAL_DET)) { 5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5731 MAC_STATUS_CFG_CHANGED)); 5732 return 0; 5733 } 5734 } 5735 5736 tw32_f(MAC_TX_AUTO_NEG, 0); 5737 5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 5740 tw32_f(MAC_MODE, tp->mac_mode); 5741 udelay(40); 5742 5743 if (tp->phy_id == TG3_PHY_ID_BCM8002) 5744 tg3_init_bcm8002(tp); 5745 5746 /* Enable link change event even when serdes polling. */ 5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5748 udelay(40); 5749 5750 tp->link_config.rmt_adv = 0; 5751 mac_status = tr32(MAC_STATUS); 5752 5753 if (tg3_flag(tp, HW_AUTONEG)) 5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 5755 else 5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 5757 5758 tp->napi[0].hw_status->status = 5759 (SD_STATUS_UPDATED | 5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); 5761 5762 for (i = 0; i < 100; i++) { 5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 5764 MAC_STATUS_CFG_CHANGED)); 5765 udelay(5); 5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 5767 MAC_STATUS_CFG_CHANGED | 5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0) 5769 break; 5770 } 5771 5772 mac_status = tr32(MAC_STATUS); 5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5774 current_link_up = false; 5775 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5776 tp->serdes_counter == 0) { 5777 tw32_f(MAC_MODE, (tp->mac_mode | 5778 MAC_MODE_SEND_CONFIGS)); 5779 udelay(1); 5780 tw32_f(MAC_MODE, tp->mac_mode); 5781 } 5782 } 5783 5784 if (current_link_up) { 5785 tp->link_config.active_speed = SPEED_1000; 5786 tp->link_config.active_duplex = DUPLEX_FULL; 5787 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5788 LED_CTRL_LNKLED_OVERRIDE | 5789 LED_CTRL_1000MBPS_ON)); 5790 } else { 5791 tp->link_config.active_speed = SPEED_UNKNOWN; 5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 5793 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5794 LED_CTRL_LNKLED_OVERRIDE | 5795 LED_CTRL_TRAFFIC_OVERRIDE)); 5796 } 5797 5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) { 5799 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5800 if (orig_pause_cfg != now_pause_cfg || 5801 orig_active_speed != tp->link_config.active_speed || 5802 orig_active_duplex != tp->link_config.active_duplex) 5803 tg3_link_report(tp); 5804 } 5805 5806 return 0; 5807 } 5808 5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) 5810 { 5811 int err = 0; 5812 u32 bmsr, bmcr; 5813 u32 current_speed = SPEED_UNKNOWN; 5814 u8 current_duplex = DUPLEX_UNKNOWN; 5815 bool current_link_up = false; 5816 u32 local_adv, remote_adv, sgsr; 5817 5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 || 5819 tg3_asic_rev(tp) == ASIC_REV_5720) && 5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && 5821 (sgsr & SERDES_TG3_SGMII_MODE)) { 5822 5823 if (force_reset) 5824 tg3_phy_reset(tp); 5825 5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 5827 5828 if (!(sgsr & SERDES_TG3_LINK_UP)) { 5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5830 } else { 5831 current_link_up = true; 5832 if (sgsr & SERDES_TG3_SPEED_1000) { 5833 current_speed = SPEED_1000; 5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5835 } else if (sgsr & SERDES_TG3_SPEED_100) { 5836 current_speed = SPEED_100; 5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5838 } else { 5839 current_speed = SPEED_10; 5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 5841 } 5842 5843 if (sgsr & SERDES_TG3_FULL_DUPLEX) 5844 current_duplex = DUPLEX_FULL; 5845 else 5846 current_duplex = DUPLEX_HALF; 5847 } 5848 5849 tw32_f(MAC_MODE, tp->mac_mode); 5850 udelay(40); 5851 5852 tg3_clear_mac_status(tp); 5853 5854 goto fiber_setup_done; 5855 } 5856 5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5858 tw32_f(MAC_MODE, tp->mac_mode); 5859 udelay(40); 5860 5861 tg3_clear_mac_status(tp); 5862 5863 if (force_reset) 5864 tg3_phy_reset(tp); 5865 5866 tp->link_config.rmt_adv = 0; 5867 5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5872 bmsr |= BMSR_LSTATUS; 5873 else 5874 bmsr &= ~BMSR_LSTATUS; 5875 } 5876 5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 5878 5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5881 /* do nothing, just check for link up at the end */ 5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 5883 u32 adv, newadv; 5884 5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 5887 ADVERTISE_1000XPAUSE | 5888 ADVERTISE_1000XPSE_ASYM | 5889 ADVERTISE_SLCT); 5890 5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); 5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); 5893 5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { 5895 tg3_writephy(tp, MII_ADVERTISE, newadv); 5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 5897 tg3_writephy(tp, MII_BMCR, bmcr); 5898 5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; 5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5902 5903 return err; 5904 } 5905 } else { 5906 u32 new_bmcr; 5907 5908 bmcr &= ~BMCR_SPEED1000; 5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 5910 5911 if (tp->link_config.duplex == DUPLEX_FULL) 5912 new_bmcr |= BMCR_FULLDPLX; 5913 5914 if (new_bmcr != bmcr) { 5915 /* BMCR_SPEED1000 is a reserved bit that needs 5916 * to be set on write. 5917 */ 5918 new_bmcr |= BMCR_SPEED1000; 5919 5920 /* Force a linkdown */ 5921 if (tp->link_up) { 5922 u32 adv; 5923 5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5925 adv &= ~(ADVERTISE_1000XFULL | 5926 ADVERTISE_1000XHALF | 5927 ADVERTISE_SLCT); 5928 tg3_writephy(tp, MII_ADVERTISE, adv); 5929 tg3_writephy(tp, MII_BMCR, bmcr | 5930 BMCR_ANRESTART | 5931 BMCR_ANENABLE); 5932 udelay(10); 5933 tg3_carrier_off(tp); 5934 } 5935 tg3_writephy(tp, MII_BMCR, new_bmcr); 5936 bmcr = new_bmcr; 5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) { 5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 5941 bmsr |= BMSR_LSTATUS; 5942 else 5943 bmsr &= ~BMSR_LSTATUS; 5944 } 5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5946 } 5947 } 5948 5949 if (bmsr & BMSR_LSTATUS) { 5950 current_speed = SPEED_1000; 5951 current_link_up = true; 5952 if (bmcr & BMCR_FULLDPLX) 5953 current_duplex = DUPLEX_FULL; 5954 else 5955 current_duplex = DUPLEX_HALF; 5956 5957 local_adv = 0; 5958 remote_adv = 0; 5959 5960 if (bmcr & BMCR_ANENABLE) { 5961 u32 common; 5962 5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 5965 common = local_adv & remote_adv; 5966 if (common & (ADVERTISE_1000XHALF | 5967 ADVERTISE_1000XFULL)) { 5968 if (common & ADVERTISE_1000XFULL) 5969 current_duplex = DUPLEX_FULL; 5970 else 5971 current_duplex = DUPLEX_HALF; 5972 5973 tp->link_config.rmt_adv = 5974 mii_adv_to_ethtool_adv_x(remote_adv); 5975 } else if (!tg3_flag(tp, 5780_CLASS)) { 5976 /* Link is up via parallel detect */ 5977 } else { 5978 current_link_up = false; 5979 } 5980 } 5981 } 5982 5983 fiber_setup_done: 5984 if (current_link_up && current_duplex == DUPLEX_FULL) 5985 tg3_setup_flow_control(tp, local_adv, remote_adv); 5986 5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5988 if (tp->link_config.active_duplex == DUPLEX_HALF) 5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 5990 5991 tw32_f(MAC_MODE, tp->mac_mode); 5992 udelay(40); 5993 5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5995 5996 tp->link_config.active_speed = current_speed; 5997 tp->link_config.active_duplex = current_duplex; 5998 5999 tg3_test_and_report_link_chg(tp, current_link_up); 6000 return err; 6001 } 6002 6003 static void tg3_serdes_parallel_detect(struct tg3 *tp) 6004 { 6005 if (tp->serdes_counter) { 6006 /* Give autoneg time to complete. */ 6007 tp->serdes_counter--; 6008 return; 6009 } 6010 6011 if (!tp->link_up && 6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 6013 u32 bmcr; 6014 6015 tg3_readphy(tp, MII_BMCR, &bmcr); 6016 if (bmcr & BMCR_ANENABLE) { 6017 u32 phy1, phy2; 6018 6019 /* Select shadow register 0x1f */ 6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); 6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); 6022 6023 /* Select expansion interrupt status register */ 6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6025 MII_TG3_DSP_EXP1_INT_STAT); 6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6028 6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 6030 /* We have signal detect and not receiving 6031 * config code words, link is up by parallel 6032 * detection. 6033 */ 6034 6035 bmcr &= ~BMCR_ANENABLE; 6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 6037 tg3_writephy(tp, MII_BMCR, bmcr); 6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 6039 } 6040 } 6041 } else if (tp->link_up && 6042 (tp->link_config.autoneg == AUTONEG_ENABLE) && 6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 6044 u32 phy2; 6045 6046 /* Select expansion interrupt status register */ 6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 6048 MII_TG3_DSP_EXP1_INT_STAT); 6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); 6050 if (phy2 & 0x20) { 6051 u32 bmcr; 6052 6053 /* Config code words received, turn on autoneg. */ 6054 tg3_readphy(tp, MII_BMCR, &bmcr); 6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 6056 6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 6058 6059 } 6060 } 6061 } 6062 6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset) 6064 { 6065 u32 val; 6066 int err; 6067 6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 6069 err = tg3_setup_fiber_phy(tp, force_reset); 6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 6071 err = tg3_setup_fiber_mii_phy(tp, force_reset); 6072 else 6073 err = tg3_setup_copper_phy(tp, force_reset); 6074 6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 6076 u32 scale; 6077 6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 6080 scale = 65; 6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) 6082 scale = 6; 6083 else 6084 scale = 12; 6085 6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; 6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); 6088 tw32(GRC_MISC_CFG, val); 6089 } 6090 6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6092 (6 << TX_LENGTHS_IPG_SHIFT); 6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 6094 tg3_asic_rev(tp) == ASIC_REV_5762) 6095 val |= tr32(MAC_TX_LENGTHS) & 6096 (TX_LENGTHS_JMB_FRM_LEN_MSK | 6097 TX_LENGTHS_CNT_DWN_VAL_MSK); 6098 6099 if (tp->link_config.active_speed == SPEED_1000 && 6100 tp->link_config.active_duplex == DUPLEX_HALF) 6101 tw32(MAC_TX_LENGTHS, val | 6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); 6103 else 6104 tw32(MAC_TX_LENGTHS, val | 6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6106 6107 if (!tg3_flag(tp, 5705_PLUS)) { 6108 if (tp->link_up) { 6109 tw32(HOSTCC_STAT_COAL_TICKS, 6110 tp->coal.stats_block_coalesce_usecs); 6111 } else { 6112 tw32(HOSTCC_STAT_COAL_TICKS, 0); 6113 } 6114 } 6115 6116 if (tg3_flag(tp, ASPM_WORKAROUND)) { 6117 val = tr32(PCIE_PWR_MGMT_THRESH); 6118 if (!tp->link_up) 6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 6120 tp->pwrmgmt_thresh; 6121 else 6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK; 6123 tw32(PCIE_PWR_MGMT_THRESH, val); 6124 } 6125 6126 return err; 6127 } 6128 6129 /* tp->lock must be held */ 6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) 6131 { 6132 u64 stamp; 6133 6134 ptp_read_system_prets(sts); 6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB); 6136 ptp_read_system_postts(sts); 6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; 6138 6139 return stamp; 6140 } 6141 6142 /* tp->lock must be held */ 6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval) 6144 { 6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6146 6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); 6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); 6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); 6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); 6151 } 6152 6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); 6154 static inline void tg3_full_unlock(struct tg3 *tp); 6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 6156 { 6157 struct tg3 *tp = netdev_priv(dev); 6158 6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 6160 SOF_TIMESTAMPING_RX_SOFTWARE | 6161 SOF_TIMESTAMPING_SOFTWARE; 6162 6163 if (tg3_flag(tp, PTP_CAPABLE)) { 6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 6165 SOF_TIMESTAMPING_RX_HARDWARE | 6166 SOF_TIMESTAMPING_RAW_HARDWARE; 6167 } 6168 6169 if (tp->ptp_clock) 6170 info->phc_index = ptp_clock_index(tp->ptp_clock); 6171 else 6172 info->phc_index = -1; 6173 6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 6175 6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 6180 return 0; 6181 } 6182 6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 6184 { 6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6186 u64 correction; 6187 bool neg_adj; 6188 6189 /* Frequency adjustment is performed using hardware with a 24 bit 6190 * accumulator and a programmable correction value. On each clk, the 6191 * correction value gets added to the accumulator and when it 6192 * overflows, the time counter is incremented/decremented. 6193 */ 6194 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction); 6195 6196 tg3_full_lock(tp, 0); 6197 6198 if (correction) 6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 6200 TG3_EAV_REF_CLK_CORRECT_EN | 6201 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | 6202 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK)); 6203 else 6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); 6205 6206 tg3_full_unlock(tp); 6207 6208 return 0; 6209 } 6210 6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 6212 { 6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6214 6215 tg3_full_lock(tp, 0); 6216 tp->ptp_adjust += delta; 6217 tg3_full_unlock(tp); 6218 6219 return 0; 6220 } 6221 6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, 6223 struct ptp_system_timestamp *sts) 6224 { 6225 u64 ns; 6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6227 6228 tg3_full_lock(tp, 0); 6229 ns = tg3_refclk_read(tp, sts); 6230 ns += tp->ptp_adjust; 6231 tg3_full_unlock(tp); 6232 6233 *ts = ns_to_timespec64(ns); 6234 6235 return 0; 6236 } 6237 6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp, 6239 const struct timespec64 *ts) 6240 { 6241 u64 ns; 6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6243 6244 ns = timespec64_to_ns(ts); 6245 6246 tg3_full_lock(tp, 0); 6247 tg3_refclk_write(tp, ns); 6248 tp->ptp_adjust = 0; 6249 tg3_full_unlock(tp); 6250 6251 return 0; 6252 } 6253 6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp, 6255 struct ptp_clock_request *rq, int on) 6256 { 6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6258 u32 clock_ctl; 6259 int rval = 0; 6260 6261 switch (rq->type) { 6262 case PTP_CLK_REQ_PEROUT: 6263 /* Reject requests with unsupported flags */ 6264 if (rq->perout.flags) 6265 return -EOPNOTSUPP; 6266 6267 if (rq->perout.index != 0) 6268 return -EINVAL; 6269 6270 tg3_full_lock(tp, 0); 6271 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); 6272 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; 6273 6274 if (on) { 6275 u64 nsec; 6276 6277 nsec = rq->perout.start.sec * 1000000000ULL + 6278 rq->perout.start.nsec; 6279 6280 if (rq->perout.period.sec || rq->perout.period.nsec) { 6281 netdev_warn(tp->dev, 6282 "Device supports only a one-shot timesync output, period must be 0\n"); 6283 rval = -EINVAL; 6284 goto err_out; 6285 } 6286 6287 if (nsec & (1ULL << 63)) { 6288 netdev_warn(tp->dev, 6289 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); 6290 rval = -EINVAL; 6291 goto err_out; 6292 } 6293 6294 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); 6295 tw32(TG3_EAV_WATCHDOG0_MSB, 6296 TG3_EAV_WATCHDOG0_EN | 6297 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); 6298 6299 tw32(TG3_EAV_REF_CLCK_CTL, 6300 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); 6301 } else { 6302 tw32(TG3_EAV_WATCHDOG0_MSB, 0); 6303 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); 6304 } 6305 6306 err_out: 6307 tg3_full_unlock(tp); 6308 return rval; 6309 6310 default: 6311 break; 6312 } 6313 6314 return -EOPNOTSUPP; 6315 } 6316 6317 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, 6318 struct skb_shared_hwtstamps *timestamp) 6319 { 6320 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); 6321 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + 6322 tp->ptp_adjust); 6323 } 6324 6325 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock) 6326 { 6327 *hwclock = tr32(TG3_TX_TSTAMP_LSB); 6328 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; 6329 } 6330 6331 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp) 6332 { 6333 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); 6334 struct skb_shared_hwtstamps timestamp; 6335 u64 hwclock; 6336 6337 if (tp->ptp_txts_retrycnt > 2) 6338 goto done; 6339 6340 tg3_read_tx_tstamp(tp, &hwclock); 6341 6342 if (hwclock != tp->pre_tx_ts) { 6343 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6344 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp); 6345 goto done; 6346 } 6347 tp->ptp_txts_retrycnt++; 6348 return HZ / 10; 6349 done: 6350 dev_consume_skb_any(tp->tx_tstamp_skb); 6351 tp->tx_tstamp_skb = NULL; 6352 tp->ptp_txts_retrycnt = 0; 6353 tp->pre_tx_ts = 0; 6354 return -1; 6355 } 6356 6357 static const struct ptp_clock_info tg3_ptp_caps = { 6358 .owner = THIS_MODULE, 6359 .name = "tg3 clock", 6360 .max_adj = 250000000, 6361 .n_alarm = 0, 6362 .n_ext_ts = 0, 6363 .n_per_out = 1, 6364 .n_pins = 0, 6365 .pps = 0, 6366 .adjfine = tg3_ptp_adjfine, 6367 .adjtime = tg3_ptp_adjtime, 6368 .do_aux_work = tg3_ptp_ts_aux_work, 6369 .gettimex64 = tg3_ptp_gettimex, 6370 .settime64 = tg3_ptp_settime, 6371 .enable = tg3_ptp_enable, 6372 }; 6373 6374 /* tp->lock must be held */ 6375 static void tg3_ptp_init(struct tg3 *tp) 6376 { 6377 if (!tg3_flag(tp, PTP_CAPABLE)) 6378 return; 6379 6380 /* Initialize the hardware clock to the system time. */ 6381 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); 6382 tp->ptp_adjust = 0; 6383 tp->ptp_info = tg3_ptp_caps; 6384 } 6385 6386 /* tp->lock must be held */ 6387 static void tg3_ptp_resume(struct tg3 *tp) 6388 { 6389 if (!tg3_flag(tp, PTP_CAPABLE)) 6390 return; 6391 6392 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); 6393 tp->ptp_adjust = 0; 6394 } 6395 6396 static void tg3_ptp_fini(struct tg3 *tp) 6397 { 6398 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) 6399 return; 6400 6401 ptp_clock_unregister(tp->ptp_clock); 6402 tp->ptp_clock = NULL; 6403 tp->ptp_adjust = 0; 6404 dev_consume_skb_any(tp->tx_tstamp_skb); 6405 tp->tx_tstamp_skb = NULL; 6406 } 6407 6408 static inline int tg3_irq_sync(struct tg3 *tp) 6409 { 6410 return tp->irq_sync; 6411 } 6412 6413 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) 6414 { 6415 int i; 6416 6417 dst = (u32 *)((u8 *)dst + off); 6418 for (i = 0; i < len; i += sizeof(u32)) 6419 *dst++ = tr32(off + i); 6420 } 6421 6422 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) 6423 { 6424 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); 6425 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); 6426 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); 6427 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); 6428 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); 6429 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); 6430 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); 6431 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); 6432 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); 6433 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); 6434 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); 6435 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); 6436 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); 6437 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); 6438 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); 6439 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); 6440 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); 6441 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); 6442 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); 6443 6444 if (tg3_flag(tp, SUPPORT_MSIX)) 6445 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); 6446 6447 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); 6448 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); 6449 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); 6450 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); 6451 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); 6452 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); 6453 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); 6454 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); 6455 6456 if (!tg3_flag(tp, 5705_PLUS)) { 6457 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); 6458 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); 6459 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); 6460 } 6461 6462 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); 6463 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); 6464 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); 6465 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); 6466 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); 6467 6468 if (tg3_flag(tp, NVRAM)) 6469 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); 6470 } 6471 6472 static void tg3_dump_state(struct tg3 *tp) 6473 { 6474 int i; 6475 u32 *regs; 6476 6477 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); 6478 if (!regs) 6479 return; 6480 6481 if (tg3_flag(tp, PCI_EXPRESS)) { 6482 /* Read up to but not including private PCI registers */ 6483 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) 6484 regs[i / sizeof(u32)] = tr32(i); 6485 } else 6486 tg3_dump_legacy_regs(tp, regs); 6487 6488 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { 6489 if (!regs[i + 0] && !regs[i + 1] && 6490 !regs[i + 2] && !regs[i + 3]) 6491 continue; 6492 6493 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 6494 i * 4, 6495 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); 6496 } 6497 6498 kfree(regs); 6499 6500 for (i = 0; i < tp->irq_cnt; i++) { 6501 struct tg3_napi *tnapi = &tp->napi[i]; 6502 6503 /* SW status block */ 6504 netdev_err(tp->dev, 6505 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 6506 i, 6507 tnapi->hw_status->status, 6508 tnapi->hw_status->status_tag, 6509 tnapi->hw_status->rx_jumbo_consumer, 6510 tnapi->hw_status->rx_consumer, 6511 tnapi->hw_status->rx_mini_consumer, 6512 tnapi->hw_status->idx[0].rx_producer, 6513 tnapi->hw_status->idx[0].tx_consumer); 6514 6515 netdev_err(tp->dev, 6516 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", 6517 i, 6518 tnapi->last_tag, tnapi->last_irq_tag, 6519 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, 6520 tnapi->rx_rcb_ptr, 6521 tnapi->prodring.rx_std_prod_idx, 6522 tnapi->prodring.rx_std_cons_idx, 6523 tnapi->prodring.rx_jmb_prod_idx, 6524 tnapi->prodring.rx_jmb_cons_idx); 6525 } 6526 } 6527 6528 /* This is called whenever we suspect that the system chipset is re- 6529 * ordering the sequence of MMIO to the tx send mailbox. The symptom 6530 * is bogus tx completions. We try to recover by setting the 6531 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 6532 * in the workqueue. 6533 */ 6534 static void tg3_tx_recover(struct tg3 *tp) 6535 { 6536 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || 6537 tp->write32_tx_mbox == tg3_write_indirect_mbox); 6538 6539 netdev_warn(tp->dev, 6540 "The system may be re-ordering memory-mapped I/O " 6541 "cycles to the network device, attempting to recover. " 6542 "Please report the problem to the driver maintainer " 6543 "and include system chipset information.\n"); 6544 6545 tg3_flag_set(tp, TX_RECOVERY_PENDING); 6546 } 6547 6548 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) 6549 { 6550 /* Tell compiler to fetch tx indices from memory. */ 6551 barrier(); 6552 return tnapi->tx_pending - 6553 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); 6554 } 6555 6556 /* Tigon3 never reports partial packet sends. So we do not 6557 * need special logic to handle SKBs that have not had all 6558 * of their frags sent yet, like SunGEM does. 6559 */ 6560 static void tg3_tx(struct tg3_napi *tnapi) 6561 { 6562 struct tg3 *tp = tnapi->tp; 6563 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 6564 u32 sw_idx = tnapi->tx_cons; 6565 struct netdev_queue *txq; 6566 int index = tnapi - tp->napi; 6567 unsigned int pkts_compl = 0, bytes_compl = 0; 6568 6569 if (tg3_flag(tp, ENABLE_TSS)) 6570 index--; 6571 6572 txq = netdev_get_tx_queue(tp->dev, index); 6573 6574 while (sw_idx != hw_idx) { 6575 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 6576 bool complete_skb_later = false; 6577 struct sk_buff *skb = ri->skb; 6578 int i, tx_bug = 0; 6579 6580 if (unlikely(skb == NULL)) { 6581 tg3_tx_recover(tp); 6582 return; 6583 } 6584 6585 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { 6586 struct skb_shared_hwtstamps timestamp; 6587 u64 hwclock; 6588 6589 tg3_read_tx_tstamp(tp, &hwclock); 6590 if (hwclock != tp->pre_tx_ts) { 6591 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); 6592 skb_tstamp_tx(skb, ×tamp); 6593 tp->pre_tx_ts = 0; 6594 } else { 6595 tp->tx_tstamp_skb = skb; 6596 complete_skb_later = true; 6597 } 6598 } 6599 6600 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), 6601 skb_headlen(skb), DMA_TO_DEVICE); 6602 6603 ri->skb = NULL; 6604 6605 while (ri->fragmented) { 6606 ri->fragmented = false; 6607 sw_idx = NEXT_TX(sw_idx); 6608 ri = &tnapi->tx_buffers[sw_idx]; 6609 } 6610 6611 sw_idx = NEXT_TX(sw_idx); 6612 6613 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6614 ri = &tnapi->tx_buffers[sw_idx]; 6615 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 6616 tx_bug = 1; 6617 6618 dma_unmap_page(&tp->pdev->dev, 6619 dma_unmap_addr(ri, mapping), 6620 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6621 DMA_TO_DEVICE); 6622 6623 while (ri->fragmented) { 6624 ri->fragmented = false; 6625 sw_idx = NEXT_TX(sw_idx); 6626 ri = &tnapi->tx_buffers[sw_idx]; 6627 } 6628 6629 sw_idx = NEXT_TX(sw_idx); 6630 } 6631 6632 pkts_compl++; 6633 bytes_compl += skb->len; 6634 6635 if (!complete_skb_later) 6636 dev_consume_skb_any(skb); 6637 else 6638 ptp_schedule_worker(tp->ptp_clock, 0); 6639 6640 if (unlikely(tx_bug)) { 6641 tg3_tx_recover(tp); 6642 return; 6643 } 6644 } 6645 6646 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 6647 6648 tnapi->tx_cons = sw_idx; 6649 6650 /* Need to make the tx_cons update visible to __tg3_start_xmit() 6651 * before checking for netif_queue_stopped(). Without the 6652 * memory barrier, there is a small possibility that __tg3_start_xmit() 6653 * will miss it and cause the queue to be stopped forever. 6654 */ 6655 smp_mb(); 6656 6657 if (unlikely(netif_tx_queue_stopped(txq) && 6658 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 6659 __netif_tx_lock(txq, smp_processor_id()); 6660 if (netif_tx_queue_stopped(txq) && 6661 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 6662 netif_tx_wake_queue(txq); 6663 __netif_tx_unlock(txq); 6664 } 6665 } 6666 6667 static void tg3_frag_free(bool is_frag, void *data) 6668 { 6669 if (is_frag) 6670 skb_free_frag(data); 6671 else 6672 kfree(data); 6673 } 6674 6675 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 6676 { 6677 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + 6678 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6679 6680 if (!ri->data) 6681 return; 6682 6683 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, 6684 DMA_FROM_DEVICE); 6685 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); 6686 ri->data = NULL; 6687 } 6688 6689 6690 /* Returns size of skb allocated or < 0 on error. 6691 * 6692 * We only need to fill in the address because the other members 6693 * of the RX descriptor are invariant, see tg3_init_rings. 6694 * 6695 * Note the purposeful assymetry of cpu vs. chip accesses. For 6696 * posting buffers we only dirty the first cache line of the RX 6697 * descriptor (containing the address). Whereas for the RX status 6698 * buffers the cpu only reads the last cacheline of the RX descriptor 6699 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 6700 */ 6701 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 6702 u32 opaque_key, u32 dest_idx_unmasked, 6703 unsigned int *frag_size) 6704 { 6705 struct tg3_rx_buffer_desc *desc; 6706 struct ring_info *map; 6707 u8 *data; 6708 dma_addr_t mapping; 6709 int skb_size, data_size, dest_idx; 6710 6711 switch (opaque_key) { 6712 case RXD_OPAQUE_RING_STD: 6713 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6714 desc = &tpr->rx_std[dest_idx]; 6715 map = &tpr->rx_std_buffers[dest_idx]; 6716 data_size = tp->rx_pkt_map_sz; 6717 break; 6718 6719 case RXD_OPAQUE_RING_JUMBO: 6720 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6721 desc = &tpr->rx_jmb[dest_idx].std; 6722 map = &tpr->rx_jmb_buffers[dest_idx]; 6723 data_size = TG3_RX_JMB_MAP_SZ; 6724 break; 6725 6726 default: 6727 return -EINVAL; 6728 } 6729 6730 /* Do not overwrite any of the map or rp information 6731 * until we are sure we can commit to a new buffer. 6732 * 6733 * Callers depend upon this behavior and assume that 6734 * we leave everything unchanged if we fail. 6735 */ 6736 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 6737 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6738 if (skb_size <= PAGE_SIZE) { 6739 data = napi_alloc_frag(skb_size); 6740 *frag_size = skb_size; 6741 } else { 6742 data = kmalloc(skb_size, GFP_ATOMIC); 6743 *frag_size = 0; 6744 } 6745 if (!data) 6746 return -ENOMEM; 6747 6748 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), 6749 data_size, DMA_FROM_DEVICE); 6750 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { 6751 tg3_frag_free(skb_size <= PAGE_SIZE, data); 6752 return -EIO; 6753 } 6754 6755 map->data = data; 6756 dma_unmap_addr_set(map, mapping, mapping); 6757 6758 desc->addr_hi = ((u64)mapping >> 32); 6759 desc->addr_lo = ((u64)mapping & 0xffffffff); 6760 6761 return data_size; 6762 } 6763 6764 /* We only need to move over in the address because the other 6765 * members of the RX descriptor are invariant. See notes above 6766 * tg3_alloc_rx_data for full details. 6767 */ 6768 static void tg3_recycle_rx(struct tg3_napi *tnapi, 6769 struct tg3_rx_prodring_set *dpr, 6770 u32 opaque_key, int src_idx, 6771 u32 dest_idx_unmasked) 6772 { 6773 struct tg3 *tp = tnapi->tp; 6774 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 6775 struct ring_info *src_map, *dest_map; 6776 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; 6777 int dest_idx; 6778 6779 switch (opaque_key) { 6780 case RXD_OPAQUE_RING_STD: 6781 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; 6782 dest_desc = &dpr->rx_std[dest_idx]; 6783 dest_map = &dpr->rx_std_buffers[dest_idx]; 6784 src_desc = &spr->rx_std[src_idx]; 6785 src_map = &spr->rx_std_buffers[src_idx]; 6786 break; 6787 6788 case RXD_OPAQUE_RING_JUMBO: 6789 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; 6790 dest_desc = &dpr->rx_jmb[dest_idx].std; 6791 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 6792 src_desc = &spr->rx_jmb[src_idx].std; 6793 src_map = &spr->rx_jmb_buffers[src_idx]; 6794 break; 6795 6796 default: 6797 return; 6798 } 6799 6800 dest_map->data = src_map->data; 6801 dma_unmap_addr_set(dest_map, mapping, 6802 dma_unmap_addr(src_map, mapping)); 6803 dest_desc->addr_hi = src_desc->addr_hi; 6804 dest_desc->addr_lo = src_desc->addr_lo; 6805 6806 /* Ensure that the update to the skb happens after the physical 6807 * addresses have been transferred to the new BD location. 6808 */ 6809 smp_wmb(); 6810 6811 src_map->data = NULL; 6812 } 6813 6814 /* The RX ring scheme is composed of multiple rings which post fresh 6815 * buffers to the chip, and one special ring the chip uses to report 6816 * status back to the host. 6817 * 6818 * The special ring reports the status of received packets to the 6819 * host. The chip does not write into the original descriptor the 6820 * RX buffer was obtained from. The chip simply takes the original 6821 * descriptor as provided by the host, updates the status and length 6822 * field, then writes this into the next status ring entry. 6823 * 6824 * Each ring the host uses to post buffers to the chip is described 6825 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 6826 * it is first placed into the on-chip ram. When the packet's length 6827 * is known, it walks down the TG3_BDINFO entries to select the ring. 6828 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 6829 * which is within the range of the new packet's length is chosen. 6830 * 6831 * The "separate ring for rx status" scheme may sound queer, but it makes 6832 * sense from a cache coherency perspective. If only the host writes 6833 * to the buffer post rings, and only the chip writes to the rx status 6834 * rings, then cache lines never move beyond shared-modified state. 6835 * If both the host and chip were to write into the same ring, cache line 6836 * eviction could occur since both entities want it in an exclusive state. 6837 */ 6838 static int tg3_rx(struct tg3_napi *tnapi, int budget) 6839 { 6840 struct tg3 *tp = tnapi->tp; 6841 u32 work_mask, rx_std_posted = 0; 6842 u32 std_prod_idx, jmb_prod_idx; 6843 u32 sw_idx = tnapi->rx_rcb_ptr; 6844 u16 hw_idx; 6845 int received; 6846 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; 6847 6848 hw_idx = *(tnapi->rx_rcb_prod_idx); 6849 /* 6850 * We need to order the read of hw_idx and the read of 6851 * the opaque cookie. 6852 */ 6853 rmb(); 6854 work_mask = 0; 6855 received = 0; 6856 std_prod_idx = tpr->rx_std_prod_idx; 6857 jmb_prod_idx = tpr->rx_jmb_prod_idx; 6858 while (sw_idx != hw_idx && budget > 0) { 6859 struct ring_info *ri; 6860 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 6861 unsigned int len; 6862 struct sk_buff *skb; 6863 dma_addr_t dma_addr; 6864 u32 opaque_key, desc_idx, *post_ptr; 6865 u8 *data; 6866 u64 tstamp = 0; 6867 6868 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6869 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6870 if (opaque_key == RXD_OPAQUE_RING_STD) { 6871 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; 6872 dma_addr = dma_unmap_addr(ri, mapping); 6873 data = ri->data; 6874 post_ptr = &std_prod_idx; 6875 rx_std_posted++; 6876 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 6877 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; 6878 dma_addr = dma_unmap_addr(ri, mapping); 6879 data = ri->data; 6880 post_ptr = &jmb_prod_idx; 6881 } else 6882 goto next_pkt_nopost; 6883 6884 work_mask |= opaque_key; 6885 6886 if (desc->err_vlan & RXD_ERR_MASK) { 6887 drop_it: 6888 tg3_recycle_rx(tnapi, tpr, opaque_key, 6889 desc_idx, *post_ptr); 6890 drop_it_no_recycle: 6891 /* Other statistics kept track of by card. */ 6892 tnapi->rx_dropped++; 6893 goto next_pkt; 6894 } 6895 6896 prefetch(data + TG3_RX_OFFSET(tp)); 6897 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6898 ETH_FCS_LEN; 6899 6900 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6901 RXD_FLAG_PTPSTAT_PTPV1 || 6902 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == 6903 RXD_FLAG_PTPSTAT_PTPV2) { 6904 tstamp = tr32(TG3_RX_TSTAMP_LSB); 6905 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; 6906 } 6907 6908 if (len > TG3_RX_COPY_THRESH(tp)) { 6909 int skb_size; 6910 unsigned int frag_size; 6911 6912 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 6913 *post_ptr, &frag_size); 6914 if (skb_size < 0) 6915 goto drop_it; 6916 6917 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, 6918 DMA_FROM_DEVICE); 6919 6920 /* Ensure that the update to the data happens 6921 * after the usage of the old DMA mapping. 6922 */ 6923 smp_wmb(); 6924 6925 ri->data = NULL; 6926 6927 if (frag_size) 6928 skb = build_skb(data, frag_size); 6929 else 6930 skb = slab_build_skb(data); 6931 if (!skb) { 6932 tg3_frag_free(frag_size != 0, data); 6933 goto drop_it_no_recycle; 6934 } 6935 skb_reserve(skb, TG3_RX_OFFSET(tp)); 6936 } else { 6937 tg3_recycle_rx(tnapi, tpr, opaque_key, 6938 desc_idx, *post_ptr); 6939 6940 skb = netdev_alloc_skb(tp->dev, 6941 len + TG3_RAW_IP_ALIGN); 6942 if (skb == NULL) 6943 goto drop_it_no_recycle; 6944 6945 skb_reserve(skb, TG3_RAW_IP_ALIGN); 6946 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, 6947 DMA_FROM_DEVICE); 6948 memcpy(skb->data, 6949 data + TG3_RX_OFFSET(tp), 6950 len); 6951 dma_sync_single_for_device(&tp->pdev->dev, dma_addr, 6952 len, DMA_FROM_DEVICE); 6953 } 6954 6955 skb_put(skb, len); 6956 if (tstamp) 6957 tg3_hwclock_to_timestamp(tp, tstamp, 6958 skb_hwtstamps(skb)); 6959 6960 if ((tp->dev->features & NETIF_F_RXCSUM) && 6961 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6962 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6963 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 6964 skb->ip_summed = CHECKSUM_UNNECESSARY; 6965 else 6966 skb_checksum_none_assert(skb); 6967 6968 skb->protocol = eth_type_trans(skb, tp->dev); 6969 6970 if (len > (tp->dev->mtu + ETH_HLEN) && 6971 skb->protocol != htons(ETH_P_8021Q) && 6972 skb->protocol != htons(ETH_P_8021AD)) { 6973 dev_kfree_skb_any(skb); 6974 goto drop_it_no_recycle; 6975 } 6976 6977 if (desc->type_flags & RXD_FLAG_VLAN && 6978 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6979 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 6980 desc->err_vlan & RXD_VLAN_MASK); 6981 6982 napi_gro_receive(&tnapi->napi, skb); 6983 6984 received++; 6985 budget--; 6986 6987 next_pkt: 6988 (*post_ptr)++; 6989 6990 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 6991 tpr->rx_std_prod_idx = std_prod_idx & 6992 tp->rx_std_ring_mask; 6993 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 6994 tpr->rx_std_prod_idx); 6995 work_mask &= ~RXD_OPAQUE_RING_STD; 6996 rx_std_posted = 0; 6997 } 6998 next_pkt_nopost: 6999 sw_idx++; 7000 sw_idx &= tp->rx_ret_ring_mask; 7001 7002 /* Refresh hw_idx to see if there is new work */ 7003 if (sw_idx == hw_idx) { 7004 hw_idx = *(tnapi->rx_rcb_prod_idx); 7005 rmb(); 7006 } 7007 } 7008 7009 /* ACK the status ring. */ 7010 tnapi->rx_rcb_ptr = sw_idx; 7011 tw32_rx_mbox(tnapi->consmbox, sw_idx); 7012 7013 /* Refill RX ring(s). */ 7014 if (!tg3_flag(tp, ENABLE_RSS)) { 7015 /* Sync BD data before updating mailbox */ 7016 wmb(); 7017 7018 if (work_mask & RXD_OPAQUE_RING_STD) { 7019 tpr->rx_std_prod_idx = std_prod_idx & 7020 tp->rx_std_ring_mask; 7021 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7022 tpr->rx_std_prod_idx); 7023 } 7024 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 7025 tpr->rx_jmb_prod_idx = jmb_prod_idx & 7026 tp->rx_jmb_ring_mask; 7027 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7028 tpr->rx_jmb_prod_idx); 7029 } 7030 } else if (work_mask) { 7031 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be 7032 * updated before the producer indices can be updated. 7033 */ 7034 smp_wmb(); 7035 7036 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; 7037 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; 7038 7039 if (tnapi != &tp->napi[1]) { 7040 tp->rx_refill = true; 7041 napi_schedule(&tp->napi[1].napi); 7042 } 7043 } 7044 7045 return received; 7046 } 7047 7048 static void tg3_poll_link(struct tg3 *tp) 7049 { 7050 /* handle link change and other phy events */ 7051 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { 7052 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 7053 7054 if (sblk->status & SD_STATUS_LINK_CHG) { 7055 sblk->status = SD_STATUS_UPDATED | 7056 (sblk->status & ~SD_STATUS_LINK_CHG); 7057 spin_lock(&tp->lock); 7058 if (tg3_flag(tp, USE_PHYLIB)) { 7059 tw32_f(MAC_STATUS, 7060 (MAC_STATUS_SYNC_CHANGED | 7061 MAC_STATUS_CFG_CHANGED | 7062 MAC_STATUS_MI_COMPLETION | 7063 MAC_STATUS_LNKSTATE_CHANGED)); 7064 udelay(40); 7065 } else 7066 tg3_setup_phy(tp, false); 7067 spin_unlock(&tp->lock); 7068 } 7069 } 7070 } 7071 7072 static int tg3_rx_prodring_xfer(struct tg3 *tp, 7073 struct tg3_rx_prodring_set *dpr, 7074 struct tg3_rx_prodring_set *spr) 7075 { 7076 u32 si, di, cpycnt, src_prod_idx; 7077 int i, err = 0; 7078 7079 while (1) { 7080 src_prod_idx = spr->rx_std_prod_idx; 7081 7082 /* Make sure updates to the rx_std_buffers[] entries and the 7083 * standard producer index are seen in the correct order. 7084 */ 7085 smp_rmb(); 7086 7087 if (spr->rx_std_cons_idx == src_prod_idx) 7088 break; 7089 7090 if (spr->rx_std_cons_idx < src_prod_idx) 7091 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 7092 else 7093 cpycnt = tp->rx_std_ring_mask + 1 - 7094 spr->rx_std_cons_idx; 7095 7096 cpycnt = min(cpycnt, 7097 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); 7098 7099 si = spr->rx_std_cons_idx; 7100 di = dpr->rx_std_prod_idx; 7101 7102 for (i = di; i < di + cpycnt; i++) { 7103 if (dpr->rx_std_buffers[i].data) { 7104 cpycnt = i - di; 7105 err = -ENOSPC; 7106 break; 7107 } 7108 } 7109 7110 if (!cpycnt) 7111 break; 7112 7113 /* Ensure that updates to the rx_std_buffers ring and the 7114 * shadowed hardware producer ring from tg3_recycle_skb() are 7115 * ordered correctly WRT the skb check above. 7116 */ 7117 smp_rmb(); 7118 7119 memcpy(&dpr->rx_std_buffers[di], 7120 &spr->rx_std_buffers[si], 7121 cpycnt * sizeof(struct ring_info)); 7122 7123 for (i = 0; i < cpycnt; i++, di++, si++) { 7124 struct tg3_rx_buffer_desc *sbd, *dbd; 7125 sbd = &spr->rx_std[si]; 7126 dbd = &dpr->rx_std[di]; 7127 dbd->addr_hi = sbd->addr_hi; 7128 dbd->addr_lo = sbd->addr_lo; 7129 } 7130 7131 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & 7132 tp->rx_std_ring_mask; 7133 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & 7134 tp->rx_std_ring_mask; 7135 } 7136 7137 while (1) { 7138 src_prod_idx = spr->rx_jmb_prod_idx; 7139 7140 /* Make sure updates to the rx_jmb_buffers[] entries and 7141 * the jumbo producer index are seen in the correct order. 7142 */ 7143 smp_rmb(); 7144 7145 if (spr->rx_jmb_cons_idx == src_prod_idx) 7146 break; 7147 7148 if (spr->rx_jmb_cons_idx < src_prod_idx) 7149 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 7150 else 7151 cpycnt = tp->rx_jmb_ring_mask + 1 - 7152 spr->rx_jmb_cons_idx; 7153 7154 cpycnt = min(cpycnt, 7155 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); 7156 7157 si = spr->rx_jmb_cons_idx; 7158 di = dpr->rx_jmb_prod_idx; 7159 7160 for (i = di; i < di + cpycnt; i++) { 7161 if (dpr->rx_jmb_buffers[i].data) { 7162 cpycnt = i - di; 7163 err = -ENOSPC; 7164 break; 7165 } 7166 } 7167 7168 if (!cpycnt) 7169 break; 7170 7171 /* Ensure that updates to the rx_jmb_buffers ring and the 7172 * shadowed hardware producer ring from tg3_recycle_skb() are 7173 * ordered correctly WRT the skb check above. 7174 */ 7175 smp_rmb(); 7176 7177 memcpy(&dpr->rx_jmb_buffers[di], 7178 &spr->rx_jmb_buffers[si], 7179 cpycnt * sizeof(struct ring_info)); 7180 7181 for (i = 0; i < cpycnt; i++, di++, si++) { 7182 struct tg3_rx_buffer_desc *sbd, *dbd; 7183 sbd = &spr->rx_jmb[si].std; 7184 dbd = &dpr->rx_jmb[di].std; 7185 dbd->addr_hi = sbd->addr_hi; 7186 dbd->addr_lo = sbd->addr_lo; 7187 } 7188 7189 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & 7190 tp->rx_jmb_ring_mask; 7191 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & 7192 tp->rx_jmb_ring_mask; 7193 } 7194 7195 return err; 7196 } 7197 7198 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 7199 { 7200 struct tg3 *tp = tnapi->tp; 7201 7202 /* run TX completion thread */ 7203 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 7204 tg3_tx(tnapi); 7205 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7206 return work_done; 7207 } 7208 7209 if (!tnapi->rx_rcb_prod_idx) 7210 return work_done; 7211 7212 /* run RX thread, within the bounds set by NAPI. 7213 * All RX "locking" is done by ensuring outside 7214 * code synchronizes with tg3->napi.poll() 7215 */ 7216 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 7217 work_done += tg3_rx(tnapi, budget - work_done); 7218 7219 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { 7220 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 7221 int i, err = 0; 7222 u32 std_prod_idx = dpr->rx_std_prod_idx; 7223 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 7224 7225 tp->rx_refill = false; 7226 for (i = 1; i <= tp->rxq_cnt; i++) 7227 err |= tg3_rx_prodring_xfer(tp, dpr, 7228 &tp->napi[i].prodring); 7229 7230 wmb(); 7231 7232 if (std_prod_idx != dpr->rx_std_prod_idx) 7233 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 7234 dpr->rx_std_prod_idx); 7235 7236 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) 7237 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 7238 dpr->rx_jmb_prod_idx); 7239 7240 if (err) 7241 tw32_f(HOSTCC_MODE, tp->coal_now); 7242 } 7243 7244 return work_done; 7245 } 7246 7247 static inline void tg3_reset_task_schedule(struct tg3 *tp) 7248 { 7249 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7250 schedule_work(&tp->reset_task); 7251 } 7252 7253 static inline void tg3_reset_task_cancel(struct tg3 *tp) 7254 { 7255 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 7256 cancel_work_sync(&tp->reset_task); 7257 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 7258 } 7259 7260 static int tg3_poll_msix(struct napi_struct *napi, int budget) 7261 { 7262 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7263 struct tg3 *tp = tnapi->tp; 7264 int work_done = 0; 7265 struct tg3_hw_status *sblk = tnapi->hw_status; 7266 7267 while (1) { 7268 work_done = tg3_poll_work(tnapi, work_done, budget); 7269 7270 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7271 goto tx_recovery; 7272 7273 if (unlikely(work_done >= budget)) 7274 break; 7275 7276 /* tp->last_tag is used in tg3_int_reenable() below 7277 * to tell the hw how much work has been processed, 7278 * so we must read it before checking for more work. 7279 */ 7280 tnapi->last_tag = sblk->status_tag; 7281 tnapi->last_irq_tag = tnapi->last_tag; 7282 rmb(); 7283 7284 /* check for RX/TX work to do */ 7285 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && 7286 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { 7287 7288 /* This test here is not race free, but will reduce 7289 * the number of interrupts by looping again. 7290 */ 7291 if (tnapi == &tp->napi[1] && tp->rx_refill) 7292 continue; 7293 7294 napi_complete_done(napi, work_done); 7295 /* Reenable interrupts. */ 7296 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 7297 7298 /* This test here is synchronized by napi_schedule() 7299 * and napi_complete() to close the race condition. 7300 */ 7301 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { 7302 tw32(HOSTCC_MODE, tp->coalesce_mode | 7303 HOSTCC_MODE_ENABLE | 7304 tnapi->coal_now); 7305 } 7306 break; 7307 } 7308 } 7309 7310 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7311 return work_done; 7312 7313 tx_recovery: 7314 /* work_done is guaranteed to be less than budget. */ 7315 napi_complete(napi); 7316 tg3_reset_task_schedule(tp); 7317 return work_done; 7318 } 7319 7320 static void tg3_process_error(struct tg3 *tp) 7321 { 7322 u32 val; 7323 bool real_error = false; 7324 7325 if (tg3_flag(tp, ERROR_PROCESSED)) 7326 return; 7327 7328 /* Check Flow Attention register */ 7329 val = tr32(HOSTCC_FLOW_ATTN); 7330 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { 7331 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); 7332 real_error = true; 7333 } 7334 7335 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { 7336 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); 7337 real_error = true; 7338 } 7339 7340 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { 7341 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); 7342 real_error = true; 7343 } 7344 7345 if (!real_error) 7346 return; 7347 7348 tg3_dump_state(tp); 7349 7350 tg3_flag_set(tp, ERROR_PROCESSED); 7351 tg3_reset_task_schedule(tp); 7352 } 7353 7354 static int tg3_poll(struct napi_struct *napi, int budget) 7355 { 7356 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 7357 struct tg3 *tp = tnapi->tp; 7358 int work_done = 0; 7359 struct tg3_hw_status *sblk = tnapi->hw_status; 7360 7361 while (1) { 7362 if (sblk->status & SD_STATUS_ERROR) 7363 tg3_process_error(tp); 7364 7365 tg3_poll_link(tp); 7366 7367 work_done = tg3_poll_work(tnapi, work_done, budget); 7368 7369 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) 7370 goto tx_recovery; 7371 7372 if (unlikely(work_done >= budget)) 7373 break; 7374 7375 if (tg3_flag(tp, TAGGED_STATUS)) { 7376 /* tp->last_tag is used in tg3_int_reenable() below 7377 * to tell the hw how much work has been processed, 7378 * so we must read it before checking for more work. 7379 */ 7380 tnapi->last_tag = sblk->status_tag; 7381 tnapi->last_irq_tag = tnapi->last_tag; 7382 rmb(); 7383 } else 7384 sblk->status &= ~SD_STATUS_UPDATED; 7385 7386 if (likely(!tg3_has_work(tnapi))) { 7387 napi_complete_done(napi, work_done); 7388 tg3_int_reenable(tnapi); 7389 break; 7390 } 7391 } 7392 7393 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); 7394 return work_done; 7395 7396 tx_recovery: 7397 /* work_done is guaranteed to be less than budget. */ 7398 napi_complete(napi); 7399 tg3_reset_task_schedule(tp); 7400 return work_done; 7401 } 7402 7403 static void tg3_napi_disable(struct tg3 *tp) 7404 { 7405 int i; 7406 7407 for (i = tp->irq_cnt - 1; i >= 0; i--) 7408 napi_disable(&tp->napi[i].napi); 7409 } 7410 7411 static void tg3_napi_enable(struct tg3 *tp) 7412 { 7413 int i; 7414 7415 for (i = 0; i < tp->irq_cnt; i++) 7416 napi_enable(&tp->napi[i].napi); 7417 } 7418 7419 static void tg3_napi_init(struct tg3 *tp) 7420 { 7421 int i; 7422 7423 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll); 7424 for (i = 1; i < tp->irq_cnt; i++) 7425 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix); 7426 } 7427 7428 static void tg3_napi_fini(struct tg3 *tp) 7429 { 7430 int i; 7431 7432 for (i = 0; i < tp->irq_cnt; i++) 7433 netif_napi_del(&tp->napi[i].napi); 7434 } 7435 7436 static inline void tg3_netif_stop(struct tg3 *tp) 7437 { 7438 netif_trans_update(tp->dev); /* prevent tx timeout */ 7439 tg3_napi_disable(tp); 7440 netif_carrier_off(tp->dev); 7441 netif_tx_disable(tp->dev); 7442 } 7443 7444 /* tp->lock must be held */ 7445 static inline void tg3_netif_start(struct tg3 *tp) 7446 { 7447 tg3_ptp_resume(tp); 7448 7449 /* NOTE: unconditional netif_tx_wake_all_queues is only 7450 * appropriate so long as all callers are assured to 7451 * have free tx slots (such as after tg3_init_hw) 7452 */ 7453 netif_tx_wake_all_queues(tp->dev); 7454 7455 if (tp->link_up) 7456 netif_carrier_on(tp->dev); 7457 7458 tg3_napi_enable(tp); 7459 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 7460 tg3_enable_ints(tp); 7461 } 7462 7463 static void tg3_irq_quiesce(struct tg3 *tp) 7464 __releases(tp->lock) 7465 __acquires(tp->lock) 7466 { 7467 int i; 7468 7469 BUG_ON(tp->irq_sync); 7470 7471 tp->irq_sync = 1; 7472 smp_mb(); 7473 7474 spin_unlock_bh(&tp->lock); 7475 7476 for (i = 0; i < tp->irq_cnt; i++) 7477 synchronize_irq(tp->napi[i].irq_vec); 7478 7479 spin_lock_bh(&tp->lock); 7480 } 7481 7482 /* Fully shutdown all tg3 driver activity elsewhere in the system. 7483 * If irq_sync is non-zero, then the IRQ handler must be synchronized 7484 * with as well. Most of the time, this is not necessary except when 7485 * shutting down the device. 7486 */ 7487 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 7488 { 7489 spin_lock_bh(&tp->lock); 7490 if (irq_sync) 7491 tg3_irq_quiesce(tp); 7492 } 7493 7494 static inline void tg3_full_unlock(struct tg3 *tp) 7495 { 7496 spin_unlock_bh(&tp->lock); 7497 } 7498 7499 /* One-shot MSI handler - Chip automatically disables interrupt 7500 * after sending MSI so driver doesn't have to do it. 7501 */ 7502 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) 7503 { 7504 struct tg3_napi *tnapi = dev_id; 7505 struct tg3 *tp = tnapi->tp; 7506 7507 prefetch(tnapi->hw_status); 7508 if (tnapi->rx_rcb) 7509 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7510 7511 if (likely(!tg3_irq_sync(tp))) 7512 napi_schedule(&tnapi->napi); 7513 7514 return IRQ_HANDLED; 7515 } 7516 7517 /* MSI ISR - No need to check for interrupt sharing and no need to 7518 * flush status block and interrupt mailbox. PCI ordering rules 7519 * guarantee that MSI will arrive after the status block. 7520 */ 7521 static irqreturn_t tg3_msi(int irq, void *dev_id) 7522 { 7523 struct tg3_napi *tnapi = dev_id; 7524 struct tg3 *tp = tnapi->tp; 7525 7526 prefetch(tnapi->hw_status); 7527 if (tnapi->rx_rcb) 7528 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7529 /* 7530 * Writing any value to intr-mbox-0 clears PCI INTA# and 7531 * chip-internal interrupt pending events. 7532 * Writing non-zero to intr-mbox-0 additional tells the 7533 * NIC to stop sending us irqs, engaging "in-intr-handler" 7534 * event coalescing. 7535 */ 7536 tw32_mailbox(tnapi->int_mbox, 0x00000001); 7537 if (likely(!tg3_irq_sync(tp))) 7538 napi_schedule(&tnapi->napi); 7539 7540 return IRQ_RETVAL(1); 7541 } 7542 7543 static irqreturn_t tg3_interrupt(int irq, void *dev_id) 7544 { 7545 struct tg3_napi *tnapi = dev_id; 7546 struct tg3 *tp = tnapi->tp; 7547 struct tg3_hw_status *sblk = tnapi->hw_status; 7548 unsigned int handled = 1; 7549 7550 /* In INTx mode, it is possible for the interrupt to arrive at 7551 * the CPU before the status block posted prior to the interrupt. 7552 * Reading the PCI State register will confirm whether the 7553 * interrupt is ours and will flush the status block. 7554 */ 7555 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 7556 if (tg3_flag(tp, CHIP_RESETTING) || 7557 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7558 handled = 0; 7559 goto out; 7560 } 7561 } 7562 7563 /* 7564 * Writing any value to intr-mbox-0 clears PCI INTA# and 7565 * chip-internal interrupt pending events. 7566 * Writing non-zero to intr-mbox-0 additional tells the 7567 * NIC to stop sending us irqs, engaging "in-intr-handler" 7568 * event coalescing. 7569 * 7570 * Flush the mailbox to de-assert the IRQ immediately to prevent 7571 * spurious interrupts. The flush impacts performance but 7572 * excessive spurious interrupts can be worse in some cases. 7573 */ 7574 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7575 if (tg3_irq_sync(tp)) 7576 goto out; 7577 sblk->status &= ~SD_STATUS_UPDATED; 7578 if (likely(tg3_has_work(tnapi))) { 7579 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7580 napi_schedule(&tnapi->napi); 7581 } else { 7582 /* No work, shared interrupt perhaps? re-enable 7583 * interrupts, and flush that PCI write 7584 */ 7585 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 7586 0x00000000); 7587 } 7588 out: 7589 return IRQ_RETVAL(handled); 7590 } 7591 7592 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) 7593 { 7594 struct tg3_napi *tnapi = dev_id; 7595 struct tg3 *tp = tnapi->tp; 7596 struct tg3_hw_status *sblk = tnapi->hw_status; 7597 unsigned int handled = 1; 7598 7599 /* In INTx mode, it is possible for the interrupt to arrive at 7600 * the CPU before the status block posted prior to the interrupt. 7601 * Reading the PCI State register will confirm whether the 7602 * interrupt is ours and will flush the status block. 7603 */ 7604 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 7605 if (tg3_flag(tp, CHIP_RESETTING) || 7606 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7607 handled = 0; 7608 goto out; 7609 } 7610 } 7611 7612 /* 7613 * writing any value to intr-mbox-0 clears PCI INTA# and 7614 * chip-internal interrupt pending events. 7615 * writing non-zero to intr-mbox-0 additional tells the 7616 * NIC to stop sending us irqs, engaging "in-intr-handler" 7617 * event coalescing. 7618 * 7619 * Flush the mailbox to de-assert the IRQ immediately to prevent 7620 * spurious interrupts. The flush impacts performance but 7621 * excessive spurious interrupts can be worse in some cases. 7622 */ 7623 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 7624 7625 /* 7626 * In a shared interrupt configuration, sometimes other devices' 7627 * interrupts will scream. We record the current status tag here 7628 * so that the above check can report that the screaming interrupts 7629 * are unhandled. Eventually they will be silenced. 7630 */ 7631 tnapi->last_irq_tag = sblk->status_tag; 7632 7633 if (tg3_irq_sync(tp)) 7634 goto out; 7635 7636 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); 7637 7638 napi_schedule(&tnapi->napi); 7639 7640 out: 7641 return IRQ_RETVAL(handled); 7642 } 7643 7644 /* ISR for interrupt test */ 7645 static irqreturn_t tg3_test_isr(int irq, void *dev_id) 7646 { 7647 struct tg3_napi *tnapi = dev_id; 7648 struct tg3 *tp = tnapi->tp; 7649 struct tg3_hw_status *sblk = tnapi->hw_status; 7650 7651 if ((sblk->status & SD_STATUS_UPDATED) || 7652 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 7653 tg3_disable_ints(tp); 7654 return IRQ_RETVAL(1); 7655 } 7656 return IRQ_RETVAL(0); 7657 } 7658 7659 #ifdef CONFIG_NET_POLL_CONTROLLER 7660 static void tg3_poll_controller(struct net_device *dev) 7661 { 7662 int i; 7663 struct tg3 *tp = netdev_priv(dev); 7664 7665 if (tg3_irq_sync(tp)) 7666 return; 7667 7668 for (i = 0; i < tp->irq_cnt; i++) 7669 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); 7670 } 7671 #endif 7672 7673 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue) 7674 { 7675 struct tg3 *tp = netdev_priv(dev); 7676 7677 if (netif_msg_tx_err(tp)) { 7678 netdev_err(dev, "transmit timed out, resetting\n"); 7679 tg3_dump_state(tp); 7680 } 7681 7682 tg3_reset_task_schedule(tp); 7683 } 7684 7685 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 7686 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 7687 { 7688 u32 base = (u32) mapping & 0xffffffff; 7689 7690 return base + len + 8 < base; 7691 } 7692 7693 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7694 * of any 4GB boundaries: 4G, 8G, etc 7695 */ 7696 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7697 u32 len, u32 mss) 7698 { 7699 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { 7700 u32 base = (u32) mapping & 0xffffffff; 7701 7702 return ((base + len + (mss & 0x3fff)) < base); 7703 } 7704 return 0; 7705 } 7706 7707 /* Test for DMA addresses > 40-bit */ 7708 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7709 int len) 7710 { 7711 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 7712 if (tg3_flag(tp, 40BIT_DMA_BUG)) 7713 return ((u64) mapping + len) > DMA_BIT_MASK(40); 7714 return 0; 7715 #else 7716 return 0; 7717 #endif 7718 } 7719 7720 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, 7721 dma_addr_t mapping, u32 len, u32 flags, 7722 u32 mss, u32 vlan) 7723 { 7724 txbd->addr_hi = ((u64) mapping >> 32); 7725 txbd->addr_lo = ((u64) mapping & 0xffffffff); 7726 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); 7727 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 7728 } 7729 7730 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, 7731 dma_addr_t map, u32 len, u32 flags, 7732 u32 mss, u32 vlan) 7733 { 7734 struct tg3 *tp = tnapi->tp; 7735 bool hwbug = false; 7736 7737 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) 7738 hwbug = true; 7739 7740 if (tg3_4g_overflow_test(map, len)) 7741 hwbug = true; 7742 7743 if (tg3_4g_tso_overflow_test(tp, map, len, mss)) 7744 hwbug = true; 7745 7746 if (tg3_40bit_overflow_test(tp, map, len)) 7747 hwbug = true; 7748 7749 if (tp->dma_limit) { 7750 u32 prvidx = *entry; 7751 u32 tmp_flag = flags & ~TXD_FLAG_END; 7752 while (len > tp->dma_limit && *budget) { 7753 u32 frag_len = tp->dma_limit; 7754 len -= tp->dma_limit; 7755 7756 /* Avoid the 8byte DMA problem */ 7757 if (len <= 8) { 7758 len += tp->dma_limit / 2; 7759 frag_len = tp->dma_limit / 2; 7760 } 7761 7762 tnapi->tx_buffers[*entry].fragmented = true; 7763 7764 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7765 frag_len, tmp_flag, mss, vlan); 7766 *budget -= 1; 7767 prvidx = *entry; 7768 *entry = NEXT_TX(*entry); 7769 7770 map += frag_len; 7771 } 7772 7773 if (len) { 7774 if (*budget) { 7775 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7776 len, flags, mss, vlan); 7777 *budget -= 1; 7778 *entry = NEXT_TX(*entry); 7779 } else { 7780 hwbug = true; 7781 tnapi->tx_buffers[prvidx].fragmented = false; 7782 } 7783 } 7784 } else { 7785 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 7786 len, flags, mss, vlan); 7787 *entry = NEXT_TX(*entry); 7788 } 7789 7790 return hwbug; 7791 } 7792 7793 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) 7794 { 7795 int i; 7796 struct sk_buff *skb; 7797 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; 7798 7799 skb = txb->skb; 7800 txb->skb = NULL; 7801 7802 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), 7803 skb_headlen(skb), DMA_TO_DEVICE); 7804 7805 while (txb->fragmented) { 7806 txb->fragmented = false; 7807 entry = NEXT_TX(entry); 7808 txb = &tnapi->tx_buffers[entry]; 7809 } 7810 7811 for (i = 0; i <= last; i++) { 7812 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7813 7814 entry = NEXT_TX(entry); 7815 txb = &tnapi->tx_buffers[entry]; 7816 7817 dma_unmap_page(&tnapi->tp->pdev->dev, 7818 dma_unmap_addr(txb, mapping), 7819 skb_frag_size(frag), DMA_TO_DEVICE); 7820 7821 while (txb->fragmented) { 7822 txb->fragmented = false; 7823 entry = NEXT_TX(entry); 7824 txb = &tnapi->tx_buffers[entry]; 7825 } 7826 } 7827 } 7828 7829 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 7830 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 7831 struct sk_buff **pskb, 7832 u32 *entry, u32 *budget, 7833 u32 base_flags, u32 mss, u32 vlan) 7834 { 7835 struct tg3 *tp = tnapi->tp; 7836 struct sk_buff *new_skb, *skb = *pskb; 7837 dma_addr_t new_addr = 0; 7838 int ret = 0; 7839 7840 if (tg3_asic_rev(tp) != ASIC_REV_5701) 7841 new_skb = skb_copy(skb, GFP_ATOMIC); 7842 else { 7843 int more_headroom = 4 - ((unsigned long)skb->data & 3); 7844 7845 new_skb = skb_copy_expand(skb, 7846 skb_headroom(skb) + more_headroom, 7847 skb_tailroom(skb), GFP_ATOMIC); 7848 } 7849 7850 if (!new_skb) { 7851 ret = -1; 7852 } else { 7853 /* New SKB is guaranteed to be linear. */ 7854 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, 7855 new_skb->len, DMA_TO_DEVICE); 7856 /* Make sure the mapping succeeded */ 7857 if (dma_mapping_error(&tp->pdev->dev, new_addr)) { 7858 dev_kfree_skb_any(new_skb); 7859 ret = -1; 7860 } else { 7861 u32 save_entry = *entry; 7862 7863 base_flags |= TXD_FLAG_END; 7864 7865 tnapi->tx_buffers[*entry].skb = new_skb; 7866 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], 7867 mapping, new_addr); 7868 7869 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 7870 new_skb->len, base_flags, 7871 mss, vlan)) { 7872 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7873 dev_kfree_skb_any(new_skb); 7874 ret = -1; 7875 } 7876 } 7877 } 7878 7879 dev_consume_skb_any(skb); 7880 *pskb = new_skb; 7881 return ret; 7882 } 7883 7884 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) 7885 { 7886 /* Check if we will never have enough descriptors, 7887 * as gso_segs can be more than current ring size 7888 */ 7889 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; 7890 } 7891 7892 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *); 7893 7894 /* Use GSO to workaround all TSO packets that meet HW bug conditions 7895 * indicated in tg3_tx_frag_set() 7896 */ 7897 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, 7898 struct netdev_queue *txq, struct sk_buff *skb) 7899 { 7900 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7901 struct sk_buff *segs, *seg, *next; 7902 7903 /* Estimate the number of fragments in the worst case */ 7904 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { 7905 netif_tx_stop_queue(txq); 7906 7907 /* netif_tx_stop_queue() must be done before checking 7908 * checking tx index in tg3_tx_avail() below, because in 7909 * tg3_tx(), we update tx index before checking for 7910 * netif_tx_queue_stopped(). 7911 */ 7912 smp_mb(); 7913 if (tg3_tx_avail(tnapi) <= frag_cnt_est) 7914 return NETDEV_TX_BUSY; 7915 7916 netif_tx_wake_queue(txq); 7917 } 7918 7919 segs = skb_gso_segment(skb, tp->dev->features & 7920 ~(NETIF_F_TSO | NETIF_F_TSO6)); 7921 if (IS_ERR(segs) || !segs) { 7922 tnapi->tx_dropped++; 7923 goto tg3_tso_bug_end; 7924 } 7925 7926 skb_list_walk_safe(segs, seg, next) { 7927 skb_mark_not_on_list(seg); 7928 __tg3_start_xmit(seg, tp->dev); 7929 } 7930 7931 tg3_tso_bug_end: 7932 dev_consume_skb_any(skb); 7933 7934 return NETDEV_TX_OK; 7935 } 7936 7937 /* hard_start_xmit for all devices */ 7938 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 7939 { 7940 struct tg3 *tp = netdev_priv(dev); 7941 u32 len, entry, base_flags, mss, vlan = 0; 7942 u32 budget; 7943 int i = -1, would_hit_hwbug; 7944 dma_addr_t mapping; 7945 struct tg3_napi *tnapi; 7946 struct netdev_queue *txq; 7947 unsigned int last; 7948 struct iphdr *iph = NULL; 7949 struct tcphdr *tcph = NULL; 7950 __sum16 tcp_csum = 0, ip_csum = 0; 7951 __be16 ip_tot_len = 0; 7952 7953 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 7954 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 7955 if (tg3_flag(tp, ENABLE_TSS)) 7956 tnapi++; 7957 7958 budget = tg3_tx_avail(tnapi); 7959 7960 /* We are running in BH disabled context with netif_tx_lock 7961 * and TX reclaim runs via tp->napi.poll inside of a software 7962 * interrupt. Furthermore, IRQ processing runs lockless so we have 7963 * no IRQ context deadlocks to worry about either. Rejoice! 7964 */ 7965 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { 7966 if (!netif_tx_queue_stopped(txq)) { 7967 netif_tx_stop_queue(txq); 7968 7969 /* This is a hard error, log it. */ 7970 netdev_err(dev, 7971 "BUG! Tx Ring full when queue awake!\n"); 7972 } 7973 return NETDEV_TX_BUSY; 7974 } 7975 7976 entry = tnapi->tx_prod; 7977 base_flags = 0; 7978 7979 mss = skb_shinfo(skb)->gso_size; 7980 if (mss) { 7981 u32 tcp_opt_len, hdr_len; 7982 7983 if (skb_cow_head(skb, 0)) 7984 goto drop; 7985 7986 iph = ip_hdr(skb); 7987 tcp_opt_len = tcp_optlen(skb); 7988 7989 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN; 7990 7991 /* HW/FW can not correctly segment packets that have been 7992 * vlan encapsulated. 7993 */ 7994 if (skb->protocol == htons(ETH_P_8021Q) || 7995 skb->protocol == htons(ETH_P_8021AD)) { 7996 if (tg3_tso_bug_gso_check(tnapi, skb)) 7997 return tg3_tso_bug(tp, tnapi, txq, skb); 7998 goto drop; 7999 } 8000 8001 if (!skb_is_gso_v6(skb)) { 8002 if (unlikely((ETH_HLEN + hdr_len) > 80) && 8003 tg3_flag(tp, TSO_BUG)) { 8004 if (tg3_tso_bug_gso_check(tnapi, skb)) 8005 return tg3_tso_bug(tp, tnapi, txq, skb); 8006 goto drop; 8007 } 8008 ip_csum = iph->check; 8009 ip_tot_len = iph->tot_len; 8010 iph->check = 0; 8011 iph->tot_len = htons(mss + hdr_len); 8012 } 8013 8014 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 8015 TXD_FLAG_CPU_POST_DMA); 8016 8017 tcph = tcp_hdr(skb); 8018 tcp_csum = tcph->check; 8019 8020 if (tg3_flag(tp, HW_TSO_1) || 8021 tg3_flag(tp, HW_TSO_2) || 8022 tg3_flag(tp, HW_TSO_3)) { 8023 tcph->check = 0; 8024 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 8025 } else { 8026 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 8027 0, IPPROTO_TCP, 0); 8028 } 8029 8030 if (tg3_flag(tp, HW_TSO_3)) { 8031 mss |= (hdr_len & 0xc) << 12; 8032 if (hdr_len & 0x10) 8033 base_flags |= 0x00000010; 8034 base_flags |= (hdr_len & 0x3e0) << 5; 8035 } else if (tg3_flag(tp, HW_TSO_2)) 8036 mss |= hdr_len << 9; 8037 else if (tg3_flag(tp, HW_TSO_1) || 8038 tg3_asic_rev(tp) == ASIC_REV_5705) { 8039 if (tcp_opt_len || iph->ihl > 5) { 8040 int tsflags; 8041 8042 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8043 mss |= (tsflags << 11); 8044 } 8045 } else { 8046 if (tcp_opt_len || iph->ihl > 5) { 8047 int tsflags; 8048 8049 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); 8050 base_flags |= tsflags << 12; 8051 } 8052 } 8053 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 8054 /* HW/FW can not correctly checksum packets that have been 8055 * vlan encapsulated. 8056 */ 8057 if (skb->protocol == htons(ETH_P_8021Q) || 8058 skb->protocol == htons(ETH_P_8021AD)) { 8059 if (skb_checksum_help(skb)) 8060 goto drop; 8061 } else { 8062 base_flags |= TXD_FLAG_TCPUDP_CSUM; 8063 } 8064 } 8065 8066 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8067 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8068 base_flags |= TXD_FLAG_JMB_PKT; 8069 8070 if (skb_vlan_tag_present(skb)) { 8071 base_flags |= TXD_FLAG_VLAN; 8072 vlan = skb_vlan_tag_get(skb); 8073 } 8074 8075 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8076 tg3_flag(tp, TX_TSTAMP_EN)) { 8077 tg3_full_lock(tp, 0); 8078 if (!tp->pre_tx_ts) { 8079 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 8080 base_flags |= TXD_FLAG_HWTSTAMP; 8081 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts); 8082 } 8083 tg3_full_unlock(tp); 8084 } 8085 8086 len = skb_headlen(skb); 8087 8088 mapping = dma_map_single(&tp->pdev->dev, skb->data, len, 8089 DMA_TO_DEVICE); 8090 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8091 goto drop; 8092 8093 8094 tnapi->tx_buffers[entry].skb = skb; 8095 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 8096 8097 would_hit_hwbug = 0; 8098 8099 if (tg3_flag(tp, 5701_DMA_BUG)) 8100 would_hit_hwbug = 1; 8101 8102 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 8103 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 8104 mss, vlan)) { 8105 would_hit_hwbug = 1; 8106 } else if (skb_shinfo(skb)->nr_frags > 0) { 8107 u32 tmp_mss = mss; 8108 8109 if (!tg3_flag(tp, HW_TSO_1) && 8110 !tg3_flag(tp, HW_TSO_2) && 8111 !tg3_flag(tp, HW_TSO_3)) 8112 tmp_mss = 0; 8113 8114 /* Now loop through additional data 8115 * fragments, and queue them. 8116 */ 8117 last = skb_shinfo(skb)->nr_frags - 1; 8118 for (i = 0; i <= last; i++) { 8119 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8120 8121 len = skb_frag_size(frag); 8122 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 8123 len, DMA_TO_DEVICE); 8124 8125 tnapi->tx_buffers[entry].skb = NULL; 8126 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 8127 mapping); 8128 if (dma_mapping_error(&tp->pdev->dev, mapping)) 8129 goto dma_error; 8130 8131 if (!budget || 8132 tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 8133 len, base_flags | 8134 ((i == last) ? TXD_FLAG_END : 0), 8135 tmp_mss, vlan)) { 8136 would_hit_hwbug = 1; 8137 break; 8138 } 8139 } 8140 } 8141 8142 if (would_hit_hwbug) { 8143 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8144 8145 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { 8146 /* If it's a TSO packet, do GSO instead of 8147 * allocating and copying to a large linear SKB 8148 */ 8149 if (ip_tot_len) { 8150 iph->check = ip_csum; 8151 iph->tot_len = ip_tot_len; 8152 } 8153 tcph->check = tcp_csum; 8154 return tg3_tso_bug(tp, tnapi, txq, skb); 8155 } 8156 8157 /* If the workaround fails due to memory/mapping 8158 * failure, silently drop this packet. 8159 */ 8160 entry = tnapi->tx_prod; 8161 budget = tg3_tx_avail(tnapi); 8162 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, 8163 base_flags, mss, vlan)) 8164 goto drop_nofree; 8165 } 8166 8167 skb_tx_timestamp(skb); 8168 netdev_tx_sent_queue(txq, skb->len); 8169 8170 /* Sync BD data before updating mailbox */ 8171 wmb(); 8172 8173 tnapi->tx_prod = entry; 8174 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 8175 netif_tx_stop_queue(txq); 8176 8177 /* netif_tx_stop_queue() must be done before checking 8178 * checking tx index in tg3_tx_avail() below, because in 8179 * tg3_tx(), we update tx index before checking for 8180 * netif_tx_queue_stopped(). 8181 */ 8182 smp_mb(); 8183 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 8184 netif_tx_wake_queue(txq); 8185 } 8186 8187 return NETDEV_TX_OK; 8188 8189 dma_error: 8190 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8191 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8192 drop: 8193 dev_kfree_skb_any(skb); 8194 drop_nofree: 8195 tnapi->tx_dropped++; 8196 return NETDEV_TX_OK; 8197 } 8198 8199 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 8200 { 8201 struct netdev_queue *txq; 8202 u16 skb_queue_mapping; 8203 netdev_tx_t ret; 8204 8205 skb_queue_mapping = skb_get_queue_mapping(skb); 8206 txq = netdev_get_tx_queue(dev, skb_queue_mapping); 8207 8208 ret = __tg3_start_xmit(skb, dev); 8209 8210 /* Notify the hardware that packets are ready by updating the TX ring 8211 * tail pointer. We respect netdev_xmit_more() thus avoiding poking 8212 * the hardware for every packet. To guarantee forward progress the TX 8213 * ring must be drained when it is full as indicated by 8214 * netif_xmit_stopped(). This needs to happen even when the current 8215 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets 8216 * queued by previous __tg3_start_xmit() calls might get stuck in 8217 * the queue forever. 8218 */ 8219 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 8220 struct tg3_napi *tnapi; 8221 struct tg3 *tp; 8222 8223 tp = netdev_priv(dev); 8224 tnapi = &tp->napi[skb_queue_mapping]; 8225 8226 if (tg3_flag(tp, ENABLE_TSS)) 8227 tnapi++; 8228 8229 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 8230 } 8231 8232 return ret; 8233 } 8234 8235 static void tg3_mac_loopback(struct tg3 *tp, bool enable) 8236 { 8237 if (enable) { 8238 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | 8239 MAC_MODE_PORT_MODE_MASK); 8240 8241 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; 8242 8243 if (!tg3_flag(tp, 5705_PLUS)) 8244 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8245 8246 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 8247 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 8248 else 8249 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 8250 } else { 8251 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; 8252 8253 if (tg3_flag(tp, 5705_PLUS) || 8254 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || 8255 tg3_asic_rev(tp) == ASIC_REV_5700) 8256 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 8257 } 8258 8259 tw32(MAC_MODE, tp->mac_mode); 8260 udelay(40); 8261 } 8262 8263 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) 8264 { 8265 u32 val, bmcr, mac_mode, ptest = 0; 8266 8267 tg3_phy_toggle_apd(tp, false); 8268 tg3_phy_toggle_automdix(tp, false); 8269 8270 if (extlpbk && tg3_phy_set_extloopbk(tp)) 8271 return -EIO; 8272 8273 bmcr = BMCR_FULLDPLX; 8274 switch (speed) { 8275 case SPEED_10: 8276 break; 8277 case SPEED_100: 8278 bmcr |= BMCR_SPEED100; 8279 break; 8280 case SPEED_1000: 8281 default: 8282 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 8283 speed = SPEED_100; 8284 bmcr |= BMCR_SPEED100; 8285 } else { 8286 speed = SPEED_1000; 8287 bmcr |= BMCR_SPEED1000; 8288 } 8289 } 8290 8291 if (extlpbk) { 8292 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 8293 tg3_readphy(tp, MII_CTRL1000, &val); 8294 val |= CTL1000_AS_MASTER | 8295 CTL1000_ENABLE_MASTER; 8296 tg3_writephy(tp, MII_CTRL1000, val); 8297 } else { 8298 ptest = MII_TG3_FET_PTEST_TRIM_SEL | 8299 MII_TG3_FET_PTEST_TRIM_2; 8300 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); 8301 } 8302 } else 8303 bmcr |= BMCR_LOOPBACK; 8304 8305 tg3_writephy(tp, MII_BMCR, bmcr); 8306 8307 /* The write needs to be flushed for the FETs */ 8308 if (tp->phy_flags & TG3_PHYFLG_IS_FET) 8309 tg3_readphy(tp, MII_BMCR, &bmcr); 8310 8311 udelay(40); 8312 8313 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && 8314 tg3_asic_rev(tp) == ASIC_REV_5785) { 8315 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | 8316 MII_TG3_FET_PTEST_FRC_TX_LINK | 8317 MII_TG3_FET_PTEST_FRC_TX_LOCK); 8318 8319 /* The write needs to be flushed for the AC131 */ 8320 tg3_readphy(tp, MII_TG3_FET_PTEST, &val); 8321 } 8322 8323 /* Reset to prevent losing 1st rx packet intermittently */ 8324 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8325 tg3_flag(tp, 5780_CLASS)) { 8326 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8327 udelay(10); 8328 tw32_f(MAC_RX_MODE, tp->rx_mode); 8329 } 8330 8331 mac_mode = tp->mac_mode & 8332 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 8333 if (speed == SPEED_1000) 8334 mac_mode |= MAC_MODE_PORT_MODE_GMII; 8335 else 8336 mac_mode |= MAC_MODE_PORT_MODE_MII; 8337 8338 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 8339 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; 8340 8341 if (masked_phy_id == TG3_PHY_ID_BCM5401) 8342 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8343 else if (masked_phy_id == TG3_PHY_ID_BCM5411) 8344 mac_mode |= MAC_MODE_LINK_POLARITY; 8345 8346 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8347 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8348 } 8349 8350 tw32(MAC_MODE, mac_mode); 8351 udelay(40); 8352 8353 return 0; 8354 } 8355 8356 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) 8357 { 8358 struct tg3 *tp = netdev_priv(dev); 8359 8360 if (features & NETIF_F_LOOPBACK) { 8361 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) 8362 return; 8363 8364 spin_lock_bh(&tp->lock); 8365 tg3_mac_loopback(tp, true); 8366 netif_carrier_on(tp->dev); 8367 spin_unlock_bh(&tp->lock); 8368 netdev_info(dev, "Internal MAC loopback mode enabled.\n"); 8369 } else { 8370 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 8371 return; 8372 8373 spin_lock_bh(&tp->lock); 8374 tg3_mac_loopback(tp, false); 8375 /* Force link status check */ 8376 tg3_setup_phy(tp, true); 8377 spin_unlock_bh(&tp->lock); 8378 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8379 } 8380 } 8381 8382 static netdev_features_t tg3_fix_features(struct net_device *dev, 8383 netdev_features_t features) 8384 { 8385 struct tg3 *tp = netdev_priv(dev); 8386 8387 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) 8388 features &= ~NETIF_F_ALL_TSO; 8389 8390 return features; 8391 } 8392 8393 static int tg3_set_features(struct net_device *dev, netdev_features_t features) 8394 { 8395 netdev_features_t changed = dev->features ^ features; 8396 8397 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) 8398 tg3_set_loopback(dev, features); 8399 8400 return 0; 8401 } 8402 8403 static void tg3_rx_prodring_free(struct tg3 *tp, 8404 struct tg3_rx_prodring_set *tpr) 8405 { 8406 int i; 8407 8408 if (tpr != &tp->napi[0].prodring) { 8409 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 8410 i = (i + 1) & tp->rx_std_ring_mask) 8411 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8412 tp->rx_pkt_map_sz); 8413 8414 if (tg3_flag(tp, JUMBO_CAPABLE)) { 8415 for (i = tpr->rx_jmb_cons_idx; 8416 i != tpr->rx_jmb_prod_idx; 8417 i = (i + 1) & tp->rx_jmb_ring_mask) { 8418 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8419 TG3_RX_JMB_MAP_SZ); 8420 } 8421 } 8422 8423 return; 8424 } 8425 8426 for (i = 0; i <= tp->rx_std_ring_mask; i++) 8427 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], 8428 tp->rx_pkt_map_sz); 8429 8430 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8431 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 8432 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], 8433 TG3_RX_JMB_MAP_SZ); 8434 } 8435 } 8436 8437 /* Initialize rx rings for packet processing. 8438 * 8439 * The chip has been shut down and the driver detached from 8440 * the networking, so no interrupts or new tx packets will 8441 * end up in the driver. tp->{tx,}lock are held and thus 8442 * we may not sleep. 8443 */ 8444 static int tg3_rx_prodring_alloc(struct tg3 *tp, 8445 struct tg3_rx_prodring_set *tpr) 8446 { 8447 u32 i, rx_pkt_dma_sz; 8448 8449 tpr->rx_std_cons_idx = 0; 8450 tpr->rx_std_prod_idx = 0; 8451 tpr->rx_jmb_cons_idx = 0; 8452 tpr->rx_jmb_prod_idx = 0; 8453 8454 if (tpr != &tp->napi[0].prodring) { 8455 memset(&tpr->rx_std_buffers[0], 0, 8456 TG3_RX_STD_BUFF_RING_SIZE(tp)); 8457 if (tpr->rx_jmb_buffers) 8458 memset(&tpr->rx_jmb_buffers[0], 0, 8459 TG3_RX_JMB_BUFF_RING_SIZE(tp)); 8460 goto done; 8461 } 8462 8463 /* Zero out all descriptors. */ 8464 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 8465 8466 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 8467 if (tg3_flag(tp, 5780_CLASS) && 8468 tp->dev->mtu > ETH_DATA_LEN) 8469 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 8470 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 8471 8472 /* Initialize invariants of the rings, we only set this 8473 * stuff once. This works because the card does not 8474 * write into the rx buffer posting rings. 8475 */ 8476 for (i = 0; i <= tp->rx_std_ring_mask; i++) { 8477 struct tg3_rx_buffer_desc *rxd; 8478 8479 rxd = &tpr->rx_std[i]; 8480 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; 8481 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 8482 rxd->opaque = (RXD_OPAQUE_RING_STD | 8483 (i << RXD_OPAQUE_INDEX_SHIFT)); 8484 } 8485 8486 /* Now allocate fresh SKBs for each rx ring. */ 8487 for (i = 0; i < tp->rx_pending; i++) { 8488 unsigned int frag_size; 8489 8490 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, 8491 &frag_size) < 0) { 8492 netdev_warn(tp->dev, 8493 "Using a smaller RX standard ring. Only " 8494 "%d out of %d buffers were allocated " 8495 "successfully\n", i, tp->rx_pending); 8496 if (i == 0) 8497 goto initfail; 8498 tp->rx_pending = i; 8499 break; 8500 } 8501 } 8502 8503 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 8504 goto done; 8505 8506 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 8507 8508 if (!tg3_flag(tp, JUMBO_RING_ENABLE)) 8509 goto done; 8510 8511 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 8512 struct tg3_rx_buffer_desc *rxd; 8513 8514 rxd = &tpr->rx_jmb[i].std; 8515 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; 8516 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 8517 RXD_FLAG_JUMBO; 8518 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 8519 (i << RXD_OPAQUE_INDEX_SHIFT)); 8520 } 8521 8522 for (i = 0; i < tp->rx_jumbo_pending; i++) { 8523 unsigned int frag_size; 8524 8525 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, 8526 &frag_size) < 0) { 8527 netdev_warn(tp->dev, 8528 "Using a smaller RX jumbo ring. Only %d " 8529 "out of %d buffers were allocated " 8530 "successfully\n", i, tp->rx_jumbo_pending); 8531 if (i == 0) 8532 goto initfail; 8533 tp->rx_jumbo_pending = i; 8534 break; 8535 } 8536 } 8537 8538 done: 8539 return 0; 8540 8541 initfail: 8542 tg3_rx_prodring_free(tp, tpr); 8543 return -ENOMEM; 8544 } 8545 8546 static void tg3_rx_prodring_fini(struct tg3 *tp, 8547 struct tg3_rx_prodring_set *tpr) 8548 { 8549 kfree(tpr->rx_std_buffers); 8550 tpr->rx_std_buffers = NULL; 8551 kfree(tpr->rx_jmb_buffers); 8552 tpr->rx_jmb_buffers = NULL; 8553 if (tpr->rx_std) { 8554 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), 8555 tpr->rx_std, tpr->rx_std_mapping); 8556 tpr->rx_std = NULL; 8557 } 8558 if (tpr->rx_jmb) { 8559 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), 8560 tpr->rx_jmb, tpr->rx_jmb_mapping); 8561 tpr->rx_jmb = NULL; 8562 } 8563 } 8564 8565 static int tg3_rx_prodring_init(struct tg3 *tp, 8566 struct tg3_rx_prodring_set *tpr) 8567 { 8568 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), 8569 GFP_KERNEL); 8570 if (!tpr->rx_std_buffers) 8571 return -ENOMEM; 8572 8573 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, 8574 TG3_RX_STD_RING_BYTES(tp), 8575 &tpr->rx_std_mapping, 8576 GFP_KERNEL); 8577 if (!tpr->rx_std) 8578 goto err_out; 8579 8580 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { 8581 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 8582 GFP_KERNEL); 8583 if (!tpr->rx_jmb_buffers) 8584 goto err_out; 8585 8586 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, 8587 TG3_RX_JMB_RING_BYTES(tp), 8588 &tpr->rx_jmb_mapping, 8589 GFP_KERNEL); 8590 if (!tpr->rx_jmb) 8591 goto err_out; 8592 } 8593 8594 return 0; 8595 8596 err_out: 8597 tg3_rx_prodring_fini(tp, tpr); 8598 return -ENOMEM; 8599 } 8600 8601 /* Free up pending packets in all rx/tx rings. 8602 * 8603 * The chip has been shut down and the driver detached from 8604 * the networking, so no interrupts or new tx packets will 8605 * end up in the driver. tp->{tx,}lock is not held and we are not 8606 * in an interrupt context and thus may sleep. 8607 */ 8608 static void tg3_free_rings(struct tg3 *tp) 8609 { 8610 int i, j; 8611 8612 for (j = 0; j < tp->irq_cnt; j++) { 8613 struct tg3_napi *tnapi = &tp->napi[j]; 8614 8615 tg3_rx_prodring_free(tp, &tnapi->prodring); 8616 8617 if (!tnapi->tx_buffers) 8618 continue; 8619 8620 for (i = 0; i < TG3_TX_RING_SIZE; i++) { 8621 struct sk_buff *skb = tnapi->tx_buffers[i].skb; 8622 8623 if (!skb) 8624 continue; 8625 8626 tg3_tx_skb_unmap(tnapi, i, 8627 skb_shinfo(skb)->nr_frags - 1); 8628 8629 dev_consume_skb_any(skb); 8630 } 8631 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); 8632 } 8633 } 8634 8635 /* Initialize tx/rx rings for packet processing. 8636 * 8637 * The chip has been shut down and the driver detached from 8638 * the networking, so no interrupts or new tx packets will 8639 * end up in the driver. tp->{tx,}lock are held and thus 8640 * we may not sleep. 8641 */ 8642 static int tg3_init_rings(struct tg3 *tp) 8643 { 8644 int i; 8645 8646 /* Free up all the SKBs. */ 8647 tg3_free_rings(tp); 8648 8649 for (i = 0; i < tp->irq_cnt; i++) { 8650 struct tg3_napi *tnapi = &tp->napi[i]; 8651 8652 tnapi->last_tag = 0; 8653 tnapi->last_irq_tag = 0; 8654 tnapi->hw_status->status = 0; 8655 tnapi->hw_status->status_tag = 0; 8656 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 8657 8658 tnapi->tx_prod = 0; 8659 tnapi->tx_cons = 0; 8660 if (tnapi->tx_ring) 8661 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); 8662 8663 tnapi->rx_rcb_ptr = 0; 8664 if (tnapi->rx_rcb) 8665 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8666 8667 if (tnapi->prodring.rx_std && 8668 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8669 tg3_free_rings(tp); 8670 return -ENOMEM; 8671 } 8672 } 8673 8674 return 0; 8675 } 8676 8677 static void tg3_mem_tx_release(struct tg3 *tp) 8678 { 8679 int i; 8680 8681 for (i = 0; i < tp->irq_max; i++) { 8682 struct tg3_napi *tnapi = &tp->napi[i]; 8683 8684 if (tnapi->tx_ring) { 8685 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, 8686 tnapi->tx_ring, tnapi->tx_desc_mapping); 8687 tnapi->tx_ring = NULL; 8688 } 8689 8690 kfree(tnapi->tx_buffers); 8691 tnapi->tx_buffers = NULL; 8692 } 8693 } 8694 8695 static int tg3_mem_tx_acquire(struct tg3 *tp) 8696 { 8697 int i; 8698 struct tg3_napi *tnapi = &tp->napi[0]; 8699 8700 /* If multivector TSS is enabled, vector 0 does not handle 8701 * tx interrupts. Don't allocate any resources for it. 8702 */ 8703 if (tg3_flag(tp, ENABLE_TSS)) 8704 tnapi++; 8705 8706 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { 8707 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, 8708 sizeof(struct tg3_tx_ring_info), 8709 GFP_KERNEL); 8710 if (!tnapi->tx_buffers) 8711 goto err_out; 8712 8713 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 8714 TG3_TX_RING_BYTES, 8715 &tnapi->tx_desc_mapping, 8716 GFP_KERNEL); 8717 if (!tnapi->tx_ring) 8718 goto err_out; 8719 } 8720 8721 return 0; 8722 8723 err_out: 8724 tg3_mem_tx_release(tp); 8725 return -ENOMEM; 8726 } 8727 8728 static void tg3_mem_rx_release(struct tg3 *tp) 8729 { 8730 int i; 8731 8732 for (i = 0; i < tp->irq_max; i++) { 8733 struct tg3_napi *tnapi = &tp->napi[i]; 8734 8735 tg3_rx_prodring_fini(tp, &tnapi->prodring); 8736 8737 if (!tnapi->rx_rcb) 8738 continue; 8739 8740 dma_free_coherent(&tp->pdev->dev, 8741 TG3_RX_RCB_RING_BYTES(tp), 8742 tnapi->rx_rcb, 8743 tnapi->rx_rcb_mapping); 8744 tnapi->rx_rcb = NULL; 8745 } 8746 } 8747 8748 static int tg3_mem_rx_acquire(struct tg3 *tp) 8749 { 8750 unsigned int i, limit; 8751 8752 limit = tp->rxq_cnt; 8753 8754 /* If RSS is enabled, we need a (dummy) producer ring 8755 * set on vector zero. This is the true hw prodring. 8756 */ 8757 if (tg3_flag(tp, ENABLE_RSS)) 8758 limit++; 8759 8760 for (i = 0; i < limit; i++) { 8761 struct tg3_napi *tnapi = &tp->napi[i]; 8762 8763 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 8764 goto err_out; 8765 8766 /* If multivector RSS is enabled, vector 0 8767 * does not handle rx or tx interrupts. 8768 * Don't allocate any resources for it. 8769 */ 8770 if (!i && tg3_flag(tp, ENABLE_RSS)) 8771 continue; 8772 8773 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8774 TG3_RX_RCB_RING_BYTES(tp), 8775 &tnapi->rx_rcb_mapping, 8776 GFP_KERNEL); 8777 if (!tnapi->rx_rcb) 8778 goto err_out; 8779 } 8780 8781 return 0; 8782 8783 err_out: 8784 tg3_mem_rx_release(tp); 8785 return -ENOMEM; 8786 } 8787 8788 /* 8789 * Must not be invoked with interrupt sources disabled and 8790 * the hardware shutdown down. 8791 */ 8792 static void tg3_free_consistent(struct tg3 *tp) 8793 { 8794 int i; 8795 8796 for (i = 0; i < tp->irq_cnt; i++) { 8797 struct tg3_napi *tnapi = &tp->napi[i]; 8798 8799 if (tnapi->hw_status) { 8800 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 8801 tnapi->hw_status, 8802 tnapi->status_mapping); 8803 tnapi->hw_status = NULL; 8804 } 8805 } 8806 8807 tg3_mem_rx_release(tp); 8808 tg3_mem_tx_release(tp); 8809 8810 /* tp->hw_stats can be referenced safely: 8811 * 1. under rtnl_lock 8812 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. 8813 */ 8814 if (tp->hw_stats) { 8815 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8816 tp->hw_stats, tp->stats_mapping); 8817 tp->hw_stats = NULL; 8818 } 8819 } 8820 8821 /* 8822 * Must not be invoked with interrupt sources disabled and 8823 * the hardware shutdown down. Can sleep. 8824 */ 8825 static int tg3_alloc_consistent(struct tg3 *tp) 8826 { 8827 int i; 8828 8829 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8830 sizeof(struct tg3_hw_stats), 8831 &tp->stats_mapping, GFP_KERNEL); 8832 if (!tp->hw_stats) 8833 goto err_out; 8834 8835 for (i = 0; i < tp->irq_cnt; i++) { 8836 struct tg3_napi *tnapi = &tp->napi[i]; 8837 struct tg3_hw_status *sblk; 8838 8839 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8840 TG3_HW_STATUS_SIZE, 8841 &tnapi->status_mapping, 8842 GFP_KERNEL); 8843 if (!tnapi->hw_status) 8844 goto err_out; 8845 8846 sblk = tnapi->hw_status; 8847 8848 if (tg3_flag(tp, ENABLE_RSS)) { 8849 u16 *prodptr = NULL; 8850 8851 /* 8852 * When RSS is enabled, the status block format changes 8853 * slightly. The "rx_jumbo_consumer", "reserved", 8854 * and "rx_mini_consumer" members get mapped to the 8855 * other three rx return ring producer indexes. 8856 */ 8857 switch (i) { 8858 case 1: 8859 prodptr = &sblk->idx[0].rx_producer; 8860 break; 8861 case 2: 8862 prodptr = &sblk->rx_jumbo_consumer; 8863 break; 8864 case 3: 8865 prodptr = &sblk->reserved; 8866 break; 8867 case 4: 8868 prodptr = &sblk->rx_mini_consumer; 8869 break; 8870 } 8871 tnapi->rx_rcb_prod_idx = prodptr; 8872 } else { 8873 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 8874 } 8875 } 8876 8877 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) 8878 goto err_out; 8879 8880 return 0; 8881 8882 err_out: 8883 tg3_free_consistent(tp); 8884 return -ENOMEM; 8885 } 8886 8887 #define MAX_WAIT_CNT 1000 8888 8889 /* To stop a block, clear the enable bit and poll till it 8890 * clears. tp->lock is held. 8891 */ 8892 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) 8893 { 8894 unsigned int i; 8895 u32 val; 8896 8897 if (tg3_flag(tp, 5705_PLUS)) { 8898 switch (ofs) { 8899 case RCVLSC_MODE: 8900 case DMAC_MODE: 8901 case MBFREE_MODE: 8902 case BUFMGR_MODE: 8903 case MEMARB_MODE: 8904 /* We can't enable/disable these bits of the 8905 * 5705/5750, just say success. 8906 */ 8907 return 0; 8908 8909 default: 8910 break; 8911 } 8912 } 8913 8914 val = tr32(ofs); 8915 val &= ~enable_bit; 8916 tw32_f(ofs, val); 8917 8918 for (i = 0; i < MAX_WAIT_CNT; i++) { 8919 if (pci_channel_offline(tp->pdev)) { 8920 dev_err(&tp->pdev->dev, 8921 "tg3_stop_block device offline, " 8922 "ofs=%lx enable_bit=%x\n", 8923 ofs, enable_bit); 8924 return -ENODEV; 8925 } 8926 8927 udelay(100); 8928 val = tr32(ofs); 8929 if ((val & enable_bit) == 0) 8930 break; 8931 } 8932 8933 if (i == MAX_WAIT_CNT && !silent) { 8934 dev_err(&tp->pdev->dev, 8935 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 8936 ofs, enable_bit); 8937 return -ENODEV; 8938 } 8939 8940 return 0; 8941 } 8942 8943 /* tp->lock is held. */ 8944 static int tg3_abort_hw(struct tg3 *tp, bool silent) 8945 { 8946 int i, err; 8947 8948 tg3_disable_ints(tp); 8949 8950 if (pci_channel_offline(tp->pdev)) { 8951 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); 8952 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8953 err = -ENODEV; 8954 goto err_no_dev; 8955 } 8956 8957 tp->rx_mode &= ~RX_MODE_ENABLE; 8958 tw32_f(MAC_RX_MODE, tp->rx_mode); 8959 udelay(10); 8960 8961 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 8962 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 8963 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 8964 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 8965 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 8966 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 8967 8968 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 8969 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 8970 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 8971 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 8972 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 8973 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 8974 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 8975 8976 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 8977 tw32_f(MAC_MODE, tp->mac_mode); 8978 udelay(40); 8979 8980 tp->tx_mode &= ~TX_MODE_ENABLE; 8981 tw32_f(MAC_TX_MODE, tp->tx_mode); 8982 8983 for (i = 0; i < MAX_WAIT_CNT; i++) { 8984 udelay(100); 8985 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 8986 break; 8987 } 8988 if (i >= MAX_WAIT_CNT) { 8989 dev_err(&tp->pdev->dev, 8990 "%s timed out, TX_MODE_ENABLE will not clear " 8991 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); 8992 err |= -ENODEV; 8993 } 8994 8995 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 8996 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 8997 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 8998 8999 tw32(FTQ_RESET, 0xffffffff); 9000 tw32(FTQ_RESET, 0x00000000); 9001 9002 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 9003 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 9004 9005 err_no_dev: 9006 for (i = 0; i < tp->irq_cnt; i++) { 9007 struct tg3_napi *tnapi = &tp->napi[i]; 9008 if (tnapi->hw_status) 9009 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9010 } 9011 9012 return err; 9013 } 9014 9015 /* Save PCI command register before chip reset */ 9016 static void tg3_save_pci_state(struct tg3 *tp) 9017 { 9018 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); 9019 } 9020 9021 /* Restore PCI state after chip reset */ 9022 static void tg3_restore_pci_state(struct tg3 *tp) 9023 { 9024 u32 val; 9025 9026 /* Re-enable indirect register accesses. */ 9027 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 9028 tp->misc_host_ctrl); 9029 9030 /* Set MAX PCI retry to zero. */ 9031 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 9032 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 9033 tg3_flag(tp, PCIX_MODE)) 9034 val |= PCISTATE_RETRY_SAME_DMA; 9035 /* Allow reads and writes to the APE register and memory space. */ 9036 if (tg3_flag(tp, ENABLE_APE)) 9037 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 9038 PCISTATE_ALLOW_APE_SHMEM_WR | 9039 PCISTATE_ALLOW_APE_PSPACE_WR; 9040 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 9041 9042 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 9043 9044 if (!tg3_flag(tp, PCI_EXPRESS)) { 9045 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 9046 tp->pci_cacheline_sz); 9047 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 9048 tp->pci_lat_timer); 9049 } 9050 9051 /* Make sure PCI-X relaxed ordering bit is clear. */ 9052 if (tg3_flag(tp, PCIX_MODE)) { 9053 u16 pcix_cmd; 9054 9055 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 9056 &pcix_cmd); 9057 pcix_cmd &= ~PCI_X_CMD_ERO; 9058 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 9059 pcix_cmd); 9060 } 9061 9062 if (tg3_flag(tp, 5780_CLASS)) { 9063 9064 /* Chip reset on 5780 will reset MSI enable bit, 9065 * so need to restore it. 9066 */ 9067 if (tg3_flag(tp, USING_MSI)) { 9068 u16 ctrl; 9069 9070 pci_read_config_word(tp->pdev, 9071 tp->msi_cap + PCI_MSI_FLAGS, 9072 &ctrl); 9073 pci_write_config_word(tp->pdev, 9074 tp->msi_cap + PCI_MSI_FLAGS, 9075 ctrl | PCI_MSI_FLAGS_ENABLE); 9076 val = tr32(MSGINT_MODE); 9077 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 9078 } 9079 } 9080 } 9081 9082 static void tg3_override_clk(struct tg3 *tp) 9083 { 9084 u32 val; 9085 9086 switch (tg3_asic_rev(tp)) { 9087 case ASIC_REV_5717: 9088 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9089 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9090 TG3_CPMU_MAC_ORIDE_ENABLE); 9091 break; 9092 9093 case ASIC_REV_5719: 9094 case ASIC_REV_5720: 9095 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9096 break; 9097 9098 default: 9099 return; 9100 } 9101 } 9102 9103 static void tg3_restore_clk(struct tg3 *tp) 9104 { 9105 u32 val; 9106 9107 switch (tg3_asic_rev(tp)) { 9108 case ASIC_REV_5717: 9109 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9110 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, 9111 val & ~TG3_CPMU_MAC_ORIDE_ENABLE); 9112 break; 9113 9114 case ASIC_REV_5719: 9115 case ASIC_REV_5720: 9116 val = tr32(TG3_CPMU_CLCK_ORIDE); 9117 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 9118 break; 9119 9120 default: 9121 return; 9122 } 9123 } 9124 9125 /* tp->lock is held. */ 9126 static int tg3_chip_reset(struct tg3 *tp) 9127 __releases(tp->lock) 9128 __acquires(tp->lock) 9129 { 9130 u32 val; 9131 void (*write_op)(struct tg3 *, u32, u32); 9132 int i, err; 9133 9134 if (!pci_device_is_present(tp->pdev)) 9135 return -ENODEV; 9136 9137 tg3_nvram_lock(tp); 9138 9139 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 9140 9141 /* No matching tg3_nvram_unlock() after this because 9142 * chip reset below will undo the nvram lock. 9143 */ 9144 tp->nvram_lock_cnt = 0; 9145 9146 /* GRC_MISC_CFG core clock reset will clear the memory 9147 * enable bit in PCI register 4 and the MSI enable bit 9148 * on some chips, so we save relevant registers here. 9149 */ 9150 tg3_save_pci_state(tp); 9151 9152 if (tg3_asic_rev(tp) == ASIC_REV_5752 || 9153 tg3_flag(tp, 5755_PLUS)) 9154 tw32(GRC_FASTBOOT_PC, 0); 9155 9156 /* 9157 * We must avoid the readl() that normally takes place. 9158 * It locks machines, causes machine checks, and other 9159 * fun things. So, temporarily disable the 5701 9160 * hardware workaround, while we do the reset. 9161 */ 9162 write_op = tp->write32; 9163 if (write_op == tg3_write_flush_reg32) 9164 tp->write32 = tg3_write32; 9165 9166 /* Prevent the irq handler from reading or writing PCI registers 9167 * during chip reset when the memory enable bit in the PCI command 9168 * register may be cleared. The chip does not generate interrupt 9169 * at this time, but the irq handler may still be called due to irq 9170 * sharing or irqpoll. 9171 */ 9172 tg3_flag_set(tp, CHIP_RESETTING); 9173 for (i = 0; i < tp->irq_cnt; i++) { 9174 struct tg3_napi *tnapi = &tp->napi[i]; 9175 if (tnapi->hw_status) { 9176 tnapi->hw_status->status = 0; 9177 tnapi->hw_status->status_tag = 0; 9178 } 9179 tnapi->last_tag = 0; 9180 tnapi->last_irq_tag = 0; 9181 } 9182 smp_mb(); 9183 9184 tg3_full_unlock(tp); 9185 9186 for (i = 0; i < tp->irq_cnt; i++) 9187 synchronize_irq(tp->napi[i].irq_vec); 9188 9189 tg3_full_lock(tp, 0); 9190 9191 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9192 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9193 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9194 } 9195 9196 /* do the reset */ 9197 val = GRC_MISC_CFG_CORECLK_RESET; 9198 9199 if (tg3_flag(tp, PCI_EXPRESS)) { 9200 /* Force PCIe 1.0a mode */ 9201 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 9202 !tg3_flag(tp, 57765_PLUS) && 9203 tr32(TG3_PCIE_PHY_TSTCTL) == 9204 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 9205 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 9206 9207 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { 9208 tw32(GRC_MISC_CFG, (1 << 29)); 9209 val |= (1 << 29); 9210 } 9211 } 9212 9213 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 9214 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); 9215 tw32(GRC_VCPU_EXT_CTRL, 9216 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); 9217 } 9218 9219 /* Set the clock to the highest frequency to avoid timeouts. With link 9220 * aware mode, the clock speed could be slow and bootcode does not 9221 * complete within the expected time. Override the clock to allow the 9222 * bootcode to finish sooner and then restore it. 9223 */ 9224 tg3_override_clk(tp); 9225 9226 /* Manage gphy power for all CPMU absent PCIe devices. */ 9227 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) 9228 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 9229 9230 tw32(GRC_MISC_CFG, val); 9231 9232 /* restore 5701 hardware bug workaround write method */ 9233 tp->write32 = write_op; 9234 9235 /* Unfortunately, we have to delay before the PCI read back. 9236 * Some 575X chips even will not respond to a PCI cfg access 9237 * when the reset command is given to the chip. 9238 * 9239 * How do these hardware designers expect things to work 9240 * properly if the PCI write is posted for a long period 9241 * of time? It is always necessary to have some method by 9242 * which a register read back can occur to push the write 9243 * out which does the reset. 9244 * 9245 * For most tg3 variants the trick below was working. 9246 * Ho hum... 9247 */ 9248 udelay(120); 9249 9250 /* Flush PCI posted writes. The normal MMIO registers 9251 * are inaccessible at this time so this is the only 9252 * way to make this reliably (actually, this is no longer 9253 * the case, see above). I tried to use indirect 9254 * register read/write but this upset some 5701 variants. 9255 */ 9256 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 9257 9258 udelay(120); 9259 9260 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 9261 u16 val16; 9262 9263 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { 9264 int j; 9265 u32 cfg_val; 9266 9267 /* Wait for link training to complete. */ 9268 for (j = 0; j < 5000; j++) 9269 udelay(100); 9270 9271 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 9272 pci_write_config_dword(tp->pdev, 0xc4, 9273 cfg_val | (1 << 15)); 9274 } 9275 9276 /* Clear the "no snoop" and "relaxed ordering" bits. */ 9277 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 9278 /* 9279 * Older PCIe devices only support the 128 byte 9280 * MPS setting. Enforce the restriction. 9281 */ 9282 if (!tg3_flag(tp, CPMU_PRESENT)) 9283 val16 |= PCI_EXP_DEVCTL_PAYLOAD; 9284 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 9285 9286 /* Clear error status */ 9287 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 9288 PCI_EXP_DEVSTA_CED | 9289 PCI_EXP_DEVSTA_NFED | 9290 PCI_EXP_DEVSTA_FED | 9291 PCI_EXP_DEVSTA_URD); 9292 } 9293 9294 tg3_restore_pci_state(tp); 9295 9296 tg3_flag_clear(tp, CHIP_RESETTING); 9297 tg3_flag_clear(tp, ERROR_PROCESSED); 9298 9299 val = 0; 9300 if (tg3_flag(tp, 5780_CLASS)) 9301 val = tr32(MEMARB_MODE); 9302 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9303 9304 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { 9305 tg3_stop_fw(tp); 9306 tw32(0x5000, 0x400); 9307 } 9308 9309 if (tg3_flag(tp, IS_SSB_CORE)) { 9310 /* 9311 * BCM4785: In order to avoid repercussions from using 9312 * potentially defective internal ROM, stop the Rx RISC CPU, 9313 * which is not required. 9314 */ 9315 tg3_stop_fw(tp); 9316 tg3_halt_cpu(tp, RX_CPU_BASE); 9317 } 9318 9319 err = tg3_poll_fw(tp); 9320 if (err) 9321 return err; 9322 9323 tw32(GRC_MODE, tp->grc_mode); 9324 9325 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 9326 val = tr32(0xc4); 9327 9328 tw32(0xc4, val | (1 << 15)); 9329 } 9330 9331 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 9332 tg3_asic_rev(tp) == ASIC_REV_5705) { 9333 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 9334 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) 9335 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 9336 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 9337 } 9338 9339 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 9340 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 9341 val = tp->mac_mode; 9342 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 9343 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 9344 val = tp->mac_mode; 9345 } else 9346 val = 0; 9347 9348 tw32_f(MAC_MODE, val); 9349 udelay(40); 9350 9351 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 9352 9353 tg3_mdio_start(tp); 9354 9355 if (tg3_flag(tp, PCI_EXPRESS) && 9356 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 9357 tg3_asic_rev(tp) != ASIC_REV_5785 && 9358 !tg3_flag(tp, 57765_PLUS)) { 9359 val = tr32(0x7c00); 9360 9361 tw32(0x7c00, val | (1 << 25)); 9362 } 9363 9364 tg3_restore_clk(tp); 9365 9366 /* Increase the core clock speed to fix tx timeout issue for 5762 9367 * with 100Mbps link speed. 9368 */ 9369 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 9370 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); 9371 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | 9372 TG3_CPMU_MAC_ORIDE_ENABLE); 9373 } 9374 9375 /* Reprobe ASF enable state. */ 9376 tg3_flag_clear(tp, ENABLE_ASF); 9377 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9378 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 9379 9380 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 9381 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9382 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9383 u32 nic_cfg; 9384 9385 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9386 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9387 tg3_flag_set(tp, ENABLE_ASF); 9388 tp->last_event_jiffies = jiffies; 9389 if (tg3_flag(tp, 5750_PLUS)) 9390 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 9391 9392 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); 9393 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) 9394 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 9395 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) 9396 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 9397 } 9398 } 9399 9400 return 0; 9401 } 9402 9403 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); 9404 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 9405 static void __tg3_set_rx_mode(struct net_device *); 9406 9407 /* tp->lock is held. */ 9408 static int tg3_halt(struct tg3 *tp, int kind, bool silent) 9409 { 9410 int err, i; 9411 9412 tg3_stop_fw(tp); 9413 9414 tg3_write_sig_pre_reset(tp, kind); 9415 9416 tg3_abort_hw(tp, silent); 9417 err = tg3_chip_reset(tp); 9418 9419 __tg3_set_mac_addr(tp, false); 9420 9421 tg3_write_sig_legacy(tp, kind); 9422 tg3_write_sig_post_reset(tp, kind); 9423 9424 if (tp->hw_stats) { 9425 /* Save the stats across chip resets... */ 9426 tg3_get_nstats(tp, &tp->net_stats_prev); 9427 tg3_get_estats(tp, &tp->estats_prev); 9428 9429 /* And make sure the next sample is new data */ 9430 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 9431 9432 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) { 9433 struct tg3_napi *tnapi = &tp->napi[i]; 9434 9435 tnapi->rx_dropped = 0; 9436 tnapi->tx_dropped = 0; 9437 } 9438 } 9439 9440 return err; 9441 } 9442 9443 static int tg3_set_mac_addr(struct net_device *dev, void *p) 9444 { 9445 struct tg3 *tp = netdev_priv(dev); 9446 struct sockaddr *addr = p; 9447 int err = 0; 9448 bool skip_mac_1 = false; 9449 9450 if (!is_valid_ether_addr(addr->sa_data)) 9451 return -EADDRNOTAVAIL; 9452 9453 eth_hw_addr_set(dev, addr->sa_data); 9454 9455 if (!netif_running(dev)) 9456 return 0; 9457 9458 if (tg3_flag(tp, ENABLE_ASF)) { 9459 u32 addr0_high, addr0_low, addr1_high, addr1_low; 9460 9461 addr0_high = tr32(MAC_ADDR_0_HIGH); 9462 addr0_low = tr32(MAC_ADDR_0_LOW); 9463 addr1_high = tr32(MAC_ADDR_1_HIGH); 9464 addr1_low = tr32(MAC_ADDR_1_LOW); 9465 9466 /* Skip MAC addr 1 if ASF is using it. */ 9467 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9468 !(addr1_high == 0 && addr1_low == 0)) 9469 skip_mac_1 = true; 9470 } 9471 spin_lock_bh(&tp->lock); 9472 __tg3_set_mac_addr(tp, skip_mac_1); 9473 __tg3_set_rx_mode(dev); 9474 spin_unlock_bh(&tp->lock); 9475 9476 return err; 9477 } 9478 9479 /* tp->lock is held. */ 9480 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 9481 dma_addr_t mapping, u32 maxlen_flags, 9482 u32 nic_addr) 9483 { 9484 tg3_write_mem(tp, 9485 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 9486 ((u64) mapping >> 32)); 9487 tg3_write_mem(tp, 9488 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 9489 ((u64) mapping & 0xffffffff)); 9490 tg3_write_mem(tp, 9491 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 9492 maxlen_flags); 9493 9494 if (!tg3_flag(tp, 5705_PLUS)) 9495 tg3_write_mem(tp, 9496 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 9497 nic_addr); 9498 } 9499 9500 9501 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9502 { 9503 int i = 0; 9504 9505 if (!tg3_flag(tp, ENABLE_TSS)) { 9506 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 9507 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 9508 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 9509 } else { 9510 tw32(HOSTCC_TXCOL_TICKS, 0); 9511 tw32(HOSTCC_TXMAX_FRAMES, 0); 9512 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 9513 9514 for (; i < tp->txq_cnt; i++) { 9515 u32 reg; 9516 9517 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 9518 tw32(reg, ec->tx_coalesce_usecs); 9519 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 9520 tw32(reg, ec->tx_max_coalesced_frames); 9521 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; 9522 tw32(reg, ec->tx_max_coalesced_frames_irq); 9523 } 9524 } 9525 9526 for (; i < tp->irq_max - 1; i++) { 9527 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 9528 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 9529 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9530 } 9531 } 9532 9533 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) 9534 { 9535 int i = 0; 9536 u32 limit = tp->rxq_cnt; 9537 9538 if (!tg3_flag(tp, ENABLE_RSS)) { 9539 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 9540 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 9541 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 9542 limit--; 9543 } else { 9544 tw32(HOSTCC_RXCOL_TICKS, 0); 9545 tw32(HOSTCC_RXMAX_FRAMES, 0); 9546 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 9547 } 9548 9549 for (; i < limit; i++) { 9550 u32 reg; 9551 9552 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 9553 tw32(reg, ec->rx_coalesce_usecs); 9554 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; 9555 tw32(reg, ec->rx_max_coalesced_frames); 9556 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 9557 tw32(reg, ec->rx_max_coalesced_frames_irq); 9558 } 9559 9560 for (; i < tp->irq_max - 1; i++) { 9561 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 9562 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 9563 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 9564 } 9565 } 9566 9567 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 9568 { 9569 tg3_coal_tx_init(tp, ec); 9570 tg3_coal_rx_init(tp, ec); 9571 9572 if (!tg3_flag(tp, 5705_PLUS)) { 9573 u32 val = ec->stats_block_coalesce_usecs; 9574 9575 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 9576 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 9577 9578 if (!tp->link_up) 9579 val = 0; 9580 9581 tw32(HOSTCC_STAT_COAL_TICKS, val); 9582 } 9583 } 9584 9585 /* tp->lock is held. */ 9586 static void tg3_tx_rcbs_disable(struct tg3 *tp) 9587 { 9588 u32 txrcb, limit; 9589 9590 /* Disable all transmit rings but the first. */ 9591 if (!tg3_flag(tp, 5705_PLUS)) 9592 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 9593 else if (tg3_flag(tp, 5717_PLUS)) 9594 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 9595 else if (tg3_flag(tp, 57765_CLASS) || 9596 tg3_asic_rev(tp) == ASIC_REV_5762) 9597 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 9598 else 9599 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9600 9601 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; 9602 txrcb < limit; txrcb += TG3_BDINFO_SIZE) 9603 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, 9604 BDINFO_FLAGS_DISABLED); 9605 } 9606 9607 /* tp->lock is held. */ 9608 static void tg3_tx_rcbs_init(struct tg3 *tp) 9609 { 9610 int i = 0; 9611 u32 txrcb = NIC_SRAM_SEND_RCB; 9612 9613 if (tg3_flag(tp, ENABLE_TSS)) 9614 i++; 9615 9616 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { 9617 struct tg3_napi *tnapi = &tp->napi[i]; 9618 9619 if (!tnapi->tx_ring) 9620 continue; 9621 9622 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, 9623 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 9624 NIC_SRAM_TX_BUFFER_DESC); 9625 } 9626 } 9627 9628 /* tp->lock is held. */ 9629 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) 9630 { 9631 u32 rxrcb, limit; 9632 9633 /* Disable all receive return rings but the first. */ 9634 if (tg3_flag(tp, 5717_PLUS)) 9635 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 9636 else if (!tg3_flag(tp, 5705_PLUS)) 9637 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 9638 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9639 tg3_asic_rev(tp) == ASIC_REV_5762 || 9640 tg3_flag(tp, 57765_CLASS)) 9641 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; 9642 else 9643 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9644 9645 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; 9646 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) 9647 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, 9648 BDINFO_FLAGS_DISABLED); 9649 } 9650 9651 /* tp->lock is held. */ 9652 static void tg3_rx_ret_rcbs_init(struct tg3 *tp) 9653 { 9654 int i = 0; 9655 u32 rxrcb = NIC_SRAM_RCV_RET_RCB; 9656 9657 if (tg3_flag(tp, ENABLE_RSS)) 9658 i++; 9659 9660 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { 9661 struct tg3_napi *tnapi = &tp->napi[i]; 9662 9663 if (!tnapi->rx_rcb) 9664 continue; 9665 9666 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 9667 (tp->rx_ret_ring_mask + 1) << 9668 BDINFO_FLAGS_MAXLEN_SHIFT, 0); 9669 } 9670 } 9671 9672 /* tp->lock is held. */ 9673 static void tg3_rings_reset(struct tg3 *tp) 9674 { 9675 int i; 9676 u32 stblk; 9677 struct tg3_napi *tnapi = &tp->napi[0]; 9678 9679 tg3_tx_rcbs_disable(tp); 9680 9681 tg3_rx_ret_rcbs_disable(tp); 9682 9683 /* Disable interrupts */ 9684 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 9685 tp->napi[0].chk_msi_cnt = 0; 9686 tp->napi[0].last_rx_cons = 0; 9687 tp->napi[0].last_tx_cons = 0; 9688 9689 /* Zero mailbox registers. */ 9690 if (tg3_flag(tp, SUPPORT_MSIX)) { 9691 for (i = 1; i < tp->irq_max; i++) { 9692 tp->napi[i].tx_prod = 0; 9693 tp->napi[i].tx_cons = 0; 9694 if (tg3_flag(tp, ENABLE_TSS)) 9695 tw32_mailbox(tp->napi[i].prodmbox, 0); 9696 tw32_rx_mbox(tp->napi[i].consmbox, 0); 9697 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 9698 tp->napi[i].chk_msi_cnt = 0; 9699 tp->napi[i].last_rx_cons = 0; 9700 tp->napi[i].last_tx_cons = 0; 9701 } 9702 if (!tg3_flag(tp, ENABLE_TSS)) 9703 tw32_mailbox(tp->napi[0].prodmbox, 0); 9704 } else { 9705 tp->napi[0].tx_prod = 0; 9706 tp->napi[0].tx_cons = 0; 9707 tw32_mailbox(tp->napi[0].prodmbox, 0); 9708 tw32_rx_mbox(tp->napi[0].consmbox, 0); 9709 } 9710 9711 /* Make sure the NIC-based send BD rings are disabled. */ 9712 if (!tg3_flag(tp, 5705_PLUS)) { 9713 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 9714 for (i = 0; i < 16; i++) 9715 tw32_tx_mbox(mbox + i * 8, 0); 9716 } 9717 9718 /* Clear status block in ram. */ 9719 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9720 9721 /* Set status block DMA address */ 9722 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 9723 ((u64) tnapi->status_mapping >> 32)); 9724 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 9725 ((u64) tnapi->status_mapping & 0xffffffff)); 9726 9727 stblk = HOSTCC_STATBLCK_RING1; 9728 9729 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { 9730 u64 mapping = (u64)tnapi->status_mapping; 9731 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); 9732 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); 9733 stblk += 8; 9734 9735 /* Clear status block in ram. */ 9736 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 9737 } 9738 9739 tg3_tx_rcbs_init(tp); 9740 tg3_rx_ret_rcbs_init(tp); 9741 } 9742 9743 static void tg3_setup_rxbd_thresholds(struct tg3 *tp) 9744 { 9745 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; 9746 9747 if (!tg3_flag(tp, 5750_PLUS) || 9748 tg3_flag(tp, 5780_CLASS) || 9749 tg3_asic_rev(tp) == ASIC_REV_5750 || 9750 tg3_asic_rev(tp) == ASIC_REV_5752 || 9751 tg3_flag(tp, 57765_PLUS)) 9752 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; 9753 else if (tg3_asic_rev(tp) == ASIC_REV_5755 || 9754 tg3_asic_rev(tp) == ASIC_REV_5787) 9755 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; 9756 else 9757 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; 9758 9759 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); 9760 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); 9761 9762 val = min(nic_rep_thresh, host_rep_thresh); 9763 tw32(RCVBDI_STD_THRESH, val); 9764 9765 if (tg3_flag(tp, 57765_PLUS)) 9766 tw32(STD_REPLENISH_LWM, bdcache_maxcnt); 9767 9768 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) 9769 return; 9770 9771 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; 9772 9773 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); 9774 9775 val = min(bdcache_maxcnt / 2, host_rep_thresh); 9776 tw32(RCVBDI_JUMBO_THRESH, val); 9777 9778 if (tg3_flag(tp, 57765_PLUS)) 9779 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); 9780 } 9781 9782 static inline u32 calc_crc(unsigned char *buf, int len) 9783 { 9784 u32 reg; 9785 u32 tmp; 9786 int j, k; 9787 9788 reg = 0xffffffff; 9789 9790 for (j = 0; j < len; j++) { 9791 reg ^= buf[j]; 9792 9793 for (k = 0; k < 8; k++) { 9794 tmp = reg & 0x01; 9795 9796 reg >>= 1; 9797 9798 if (tmp) 9799 reg ^= CRC32_POLY_LE; 9800 } 9801 } 9802 9803 return ~reg; 9804 } 9805 9806 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 9807 { 9808 /* accept or reject all multicast frames */ 9809 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 9810 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 9811 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 9812 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 9813 } 9814 9815 static void __tg3_set_rx_mode(struct net_device *dev) 9816 { 9817 struct tg3 *tp = netdev_priv(dev); 9818 u32 rx_mode; 9819 9820 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9821 RX_MODE_KEEP_VLAN_TAG); 9822 9823 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9824 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9825 * flag clear. 9826 */ 9827 if (!tg3_flag(tp, ENABLE_ASF)) 9828 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9829 #endif 9830 9831 if (dev->flags & IFF_PROMISC) { 9832 /* Promiscuous mode. */ 9833 rx_mode |= RX_MODE_PROMISC; 9834 } else if (dev->flags & IFF_ALLMULTI) { 9835 /* Accept all multicast. */ 9836 tg3_set_multi(tp, 1); 9837 } else if (netdev_mc_empty(dev)) { 9838 /* Reject all multicast. */ 9839 tg3_set_multi(tp, 0); 9840 } else { 9841 /* Accept one or more multicast(s). */ 9842 struct netdev_hw_addr *ha; 9843 u32 mc_filter[4] = { 0, }; 9844 u32 regidx; 9845 u32 bit; 9846 u32 crc; 9847 9848 netdev_for_each_mc_addr(ha, dev) { 9849 crc = calc_crc(ha->addr, ETH_ALEN); 9850 bit = ~crc & 0x7f; 9851 regidx = (bit & 0x60) >> 5; 9852 bit &= 0x1f; 9853 mc_filter[regidx] |= (1 << bit); 9854 } 9855 9856 tw32(MAC_HASH_REG_0, mc_filter[0]); 9857 tw32(MAC_HASH_REG_1, mc_filter[1]); 9858 tw32(MAC_HASH_REG_2, mc_filter[2]); 9859 tw32(MAC_HASH_REG_3, mc_filter[3]); 9860 } 9861 9862 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { 9863 rx_mode |= RX_MODE_PROMISC; 9864 } else if (!(dev->flags & IFF_PROMISC)) { 9865 /* Add all entries into to the mac addr filter list */ 9866 int i = 0; 9867 struct netdev_hw_addr *ha; 9868 9869 netdev_for_each_uc_addr(ha, dev) { 9870 __tg3_set_one_mac_addr(tp, ha->addr, 9871 i + TG3_UCAST_ADDR_IDX(tp)); 9872 i++; 9873 } 9874 } 9875 9876 if (rx_mode != tp->rx_mode) { 9877 tp->rx_mode = rx_mode; 9878 tw32_f(MAC_RX_MODE, rx_mode); 9879 udelay(10); 9880 } 9881 } 9882 9883 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) 9884 { 9885 int i; 9886 9887 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 9888 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); 9889 } 9890 9891 static void tg3_rss_check_indir_tbl(struct tg3 *tp) 9892 { 9893 int i; 9894 9895 if (!tg3_flag(tp, SUPPORT_MSIX)) 9896 return; 9897 9898 if (tp->rxq_cnt == 1) { 9899 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 9900 return; 9901 } 9902 9903 /* Validate table against current IRQ count */ 9904 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 9905 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) 9906 break; 9907 } 9908 9909 if (i != TG3_RSS_INDIR_TBL_SIZE) 9910 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); 9911 } 9912 9913 static void tg3_rss_write_indir_tbl(struct tg3 *tp) 9914 { 9915 int i = 0; 9916 u32 reg = MAC_RSS_INDIR_TBL_0; 9917 9918 while (i < TG3_RSS_INDIR_TBL_SIZE) { 9919 u32 val = tp->rss_ind_tbl[i]; 9920 i++; 9921 for (; i % 8; i++) { 9922 val <<= 4; 9923 val |= tp->rss_ind_tbl[i]; 9924 } 9925 tw32(reg, val); 9926 reg += 4; 9927 } 9928 } 9929 9930 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) 9931 { 9932 if (tg3_asic_rev(tp) == ASIC_REV_5719) 9933 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; 9934 else 9935 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; 9936 } 9937 9938 /* tp->lock is held. */ 9939 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) 9940 { 9941 u32 val, rdmac_mode; 9942 int i, err, limit; 9943 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 9944 9945 tg3_disable_ints(tp); 9946 9947 tg3_stop_fw(tp); 9948 9949 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 9950 9951 if (tg3_flag(tp, INIT_COMPLETE)) 9952 tg3_abort_hw(tp, 1); 9953 9954 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 9955 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { 9956 tg3_phy_pull_config(tp); 9957 tg3_eee_pull_config(tp, NULL); 9958 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 9959 } 9960 9961 /* Enable MAC control of LPI */ 9962 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) 9963 tg3_setup_eee(tp); 9964 9965 if (reset_phy) 9966 tg3_phy_reset(tp); 9967 9968 err = tg3_chip_reset(tp); 9969 if (err) 9970 return err; 9971 9972 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 9973 9974 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { 9975 val = tr32(TG3_CPMU_CTRL); 9976 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 9977 tw32(TG3_CPMU_CTRL, val); 9978 9979 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 9980 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 9981 val |= CPMU_LSPD_10MB_MACCLK_6_25; 9982 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 9983 9984 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); 9985 val &= ~CPMU_LNK_AWARE_MACCLK_MASK; 9986 val |= CPMU_LNK_AWARE_MACCLK_6_25; 9987 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); 9988 9989 val = tr32(TG3_CPMU_HST_ACC); 9990 val &= ~CPMU_HST_ACC_MACCLK_MASK; 9991 val |= CPMU_HST_ACC_MACCLK_6_25; 9992 tw32(TG3_CPMU_HST_ACC, val); 9993 } 9994 9995 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9996 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; 9997 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | 9998 PCIE_PWR_MGMT_L1_THRESH_4MS; 9999 tw32(PCIE_PWR_MGMT_THRESH, val); 10000 10001 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; 10002 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 10003 10004 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 10005 10006 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 10007 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 10008 } 10009 10010 if (tg3_flag(tp, L1PLLPD_EN)) { 10011 u32 grc_mode = tr32(GRC_MODE); 10012 10013 /* Access the lower 1K of PL PCIE block registers. */ 10014 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 10015 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 10016 10017 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); 10018 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, 10019 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); 10020 10021 tw32(GRC_MODE, grc_mode); 10022 } 10023 10024 if (tg3_flag(tp, 57765_CLASS)) { 10025 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { 10026 u32 grc_mode = tr32(GRC_MODE); 10027 10028 /* Access the lower 1K of PL PCIE block registers. */ 10029 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 10030 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 10031 10032 val = tr32(TG3_PCIE_TLDLPL_PORT + 10033 TG3_PCIE_PL_LO_PHYCTL5); 10034 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 10035 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 10036 10037 tw32(GRC_MODE, grc_mode); 10038 } 10039 10040 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { 10041 u32 grc_mode; 10042 10043 /* Fix transmit hangs */ 10044 val = tr32(TG3_CPMU_PADRNG_CTL); 10045 val |= TG3_CPMU_PADRNG_CTL_RDIV2; 10046 tw32(TG3_CPMU_PADRNG_CTL, val); 10047 10048 grc_mode = tr32(GRC_MODE); 10049 10050 /* Access the lower 1K of DL PCIE block registers. */ 10051 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 10052 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); 10053 10054 val = tr32(TG3_PCIE_TLDLPL_PORT + 10055 TG3_PCIE_DL_LO_FTSMAX); 10056 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; 10057 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, 10058 val | TG3_PCIE_DL_LO_FTSMAX_VAL); 10059 10060 tw32(GRC_MODE, grc_mode); 10061 } 10062 10063 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 10064 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 10065 val |= CPMU_LSPD_10MB_MACCLK_6_25; 10066 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 10067 } 10068 10069 /* This works around an issue with Athlon chipsets on 10070 * B3 tigon3 silicon. This bit has no effect on any 10071 * other revision. But do not set this on PCI Express 10072 * chips and don't even touch the clocks if the CPMU is present. 10073 */ 10074 if (!tg3_flag(tp, CPMU_PRESENT)) { 10075 if (!tg3_flag(tp, PCI_EXPRESS)) 10076 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 10077 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 10078 } 10079 10080 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && 10081 tg3_flag(tp, PCIX_MODE)) { 10082 val = tr32(TG3PCI_PCISTATE); 10083 val |= PCISTATE_RETRY_SAME_DMA; 10084 tw32(TG3PCI_PCISTATE, val); 10085 } 10086 10087 if (tg3_flag(tp, ENABLE_APE)) { 10088 /* Allow reads and writes to the 10089 * APE register and memory space. 10090 */ 10091 val = tr32(TG3PCI_PCISTATE); 10092 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 10093 PCISTATE_ALLOW_APE_SHMEM_WR | 10094 PCISTATE_ALLOW_APE_PSPACE_WR; 10095 tw32(TG3PCI_PCISTATE, val); 10096 } 10097 10098 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { 10099 /* Enable some hw fixes. */ 10100 val = tr32(TG3PCI_MSI_DATA); 10101 val |= (1 << 26) | (1 << 28) | (1 << 29); 10102 tw32(TG3PCI_MSI_DATA, val); 10103 } 10104 10105 /* Descriptor ring init may make accesses to the 10106 * NIC SRAM area to setup the TX descriptors, so we 10107 * can only do this after the hardware has been 10108 * successfully reset. 10109 */ 10110 err = tg3_init_rings(tp); 10111 if (err) 10112 return err; 10113 10114 if (tg3_flag(tp, 57765_PLUS)) { 10115 val = tr32(TG3PCI_DMA_RW_CTRL) & 10116 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 10117 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) 10118 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 10119 if (!tg3_flag(tp, 57765_CLASS) && 10120 tg3_asic_rev(tp) != ASIC_REV_5717 && 10121 tg3_asic_rev(tp) != ASIC_REV_5762) 10122 val |= DMA_RWCTRL_TAGGED_STAT_WA; 10123 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 10124 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && 10125 tg3_asic_rev(tp) != ASIC_REV_5761) { 10126 /* This value is determined during the probe time DMA 10127 * engine test, tg3_test_dma. 10128 */ 10129 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10130 } 10131 10132 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 10133 GRC_MODE_4X_NIC_SEND_RINGS | 10134 GRC_MODE_NO_TX_PHDR_CSUM | 10135 GRC_MODE_NO_RX_PHDR_CSUM); 10136 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 10137 10138 /* Pseudo-header checksum is done by hardware logic and not 10139 * the offload processers, so make the chip do the pseudo- 10140 * header checksums on receive. For transmit it is more 10141 * convenient to do the pseudo-header checksum in software 10142 * as Linux does that on transmit for us in all cases. 10143 */ 10144 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 10145 10146 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; 10147 if (tp->rxptpctl) 10148 tw32(TG3_RX_PTP_CTL, 10149 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 10150 10151 if (tg3_flag(tp, PTP_CAPABLE)) 10152 val |= GRC_MODE_TIME_SYNC_ENABLE; 10153 10154 tw32(GRC_MODE, tp->grc_mode | val); 10155 10156 /* On one of the AMD platform, MRRS is restricted to 4000 because of 10157 * south bridge limitation. As a workaround, Driver is setting MRRS 10158 * to 2048 instead of default 4096. 10159 */ 10160 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10161 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { 10162 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; 10163 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); 10164 } 10165 10166 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10167 val = tr32(GRC_MISC_CFG); 10168 val &= ~0xff; 10169 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 10170 tw32(GRC_MISC_CFG, val); 10171 10172 /* Initialize MBUF/DESC pool. */ 10173 if (tg3_flag(tp, 5750_PLUS)) { 10174 /* Do nothing. */ 10175 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { 10176 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 10177 if (tg3_asic_rev(tp) == ASIC_REV_5704) 10178 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 10179 else 10180 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 10181 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 10182 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 10183 } else if (tg3_flag(tp, TSO_CAPABLE)) { 10184 int fw_len; 10185 10186 fw_len = tp->fw_len; 10187 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 10188 tw32(BUFMGR_MB_POOL_ADDR, 10189 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 10190 tw32(BUFMGR_MB_POOL_SIZE, 10191 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 10192 } 10193 10194 if (tp->dev->mtu <= ETH_DATA_LEN) { 10195 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10196 tp->bufmgr_config.mbuf_read_dma_low_water); 10197 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10198 tp->bufmgr_config.mbuf_mac_rx_low_water); 10199 tw32(BUFMGR_MB_HIGH_WATER, 10200 tp->bufmgr_config.mbuf_high_water); 10201 } else { 10202 tw32(BUFMGR_MB_RDMA_LOW_WATER, 10203 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 10204 tw32(BUFMGR_MB_MACRX_LOW_WATER, 10205 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 10206 tw32(BUFMGR_MB_HIGH_WATER, 10207 tp->bufmgr_config.mbuf_high_water_jumbo); 10208 } 10209 tw32(BUFMGR_DMA_LOW_WATER, 10210 tp->bufmgr_config.dma_low_water); 10211 tw32(BUFMGR_DMA_HIGH_WATER, 10212 tp->bufmgr_config.dma_high_water); 10213 10214 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 10215 if (tg3_asic_rev(tp) == ASIC_REV_5719) 10216 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 10217 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10218 tg3_asic_rev(tp) == ASIC_REV_5762 || 10219 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10220 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) 10221 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; 10222 tw32(BUFMGR_MODE, val); 10223 for (i = 0; i < 2000; i++) { 10224 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 10225 break; 10226 udelay(10); 10227 } 10228 if (i >= 2000) { 10229 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); 10230 return -ENODEV; 10231 } 10232 10233 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) 10234 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); 10235 10236 tg3_setup_rxbd_thresholds(tp); 10237 10238 /* Initialize TG3_BDINFO's at: 10239 * RCVDBDI_STD_BD: standard eth size rx ring 10240 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 10241 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 10242 * 10243 * like so: 10244 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 10245 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 10246 * ring attribute flags 10247 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 10248 * 10249 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 10250 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 10251 * 10252 * The size of each ring is fixed in the firmware, but the location is 10253 * configurable. 10254 */ 10255 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10256 ((u64) tpr->rx_std_mapping >> 32)); 10257 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10258 ((u64) tpr->rx_std_mapping & 0xffffffff)); 10259 if (!tg3_flag(tp, 5717_PLUS)) 10260 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 10261 NIC_SRAM_RX_BUFFER_DESC); 10262 10263 /* Disable the mini ring */ 10264 if (!tg3_flag(tp, 5705_PLUS)) 10265 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 10266 BDINFO_FLAGS_DISABLED); 10267 10268 /* Program the jumbo buffer descriptor ring control 10269 * blocks on those devices that have them. 10270 */ 10271 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10272 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { 10273 10274 if (tg3_flag(tp, JUMBO_RING_ENABLE)) { 10275 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 10276 ((u64) tpr->rx_jmb_mapping >> 32)); 10277 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 10278 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 10279 val = TG3_RX_JMB_RING_SIZE(tp) << 10280 BDINFO_FLAGS_MAXLEN_SHIFT; 10281 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10282 val | BDINFO_FLAGS_USE_EXT_RECV); 10283 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || 10284 tg3_flag(tp, 57765_CLASS) || 10285 tg3_asic_rev(tp) == ASIC_REV_5762) 10286 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 10287 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 10288 } else { 10289 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 10290 BDINFO_FLAGS_DISABLED); 10291 } 10292 10293 if (tg3_flag(tp, 57765_PLUS)) { 10294 val = TG3_RX_STD_RING_SIZE(tp); 10295 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 10296 val |= (TG3_RX_STD_DMA_SZ << 2); 10297 } else 10298 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 10299 } else 10300 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; 10301 10302 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 10303 10304 tpr->rx_std_prod_idx = tp->rx_pending; 10305 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 10306 10307 tpr->rx_jmb_prod_idx = 10308 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; 10309 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 10310 10311 tg3_rings_reset(tp); 10312 10313 /* Initialize MAC address and backoff seed. */ 10314 __tg3_set_mac_addr(tp, false); 10315 10316 /* MTU + ethernet header + FCS + optional VLAN tag */ 10317 tw32(MAC_RX_MTU_SIZE, 10318 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 10319 10320 /* The slot time is changed by tg3_setup_phy if we 10321 * run at gigabit with half duplex. 10322 */ 10323 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 10324 (6 << TX_LENGTHS_IPG_SHIFT) | 10325 (32 << TX_LENGTHS_SLOT_TIME_SHIFT); 10326 10327 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10328 tg3_asic_rev(tp) == ASIC_REV_5762) 10329 val |= tr32(MAC_TX_LENGTHS) & 10330 (TX_LENGTHS_JMB_FRM_LEN_MSK | 10331 TX_LENGTHS_CNT_DWN_VAL_MSK); 10332 10333 tw32(MAC_TX_LENGTHS, val); 10334 10335 /* Receive rules. */ 10336 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 10337 tw32(RCVLPC_CONFIG, 0x0181); 10338 10339 /* Calculate RDMAC_MODE setting early, we need it to determine 10340 * the RCVLPC_STATE_ENABLE mask. 10341 */ 10342 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 10343 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 10344 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 10345 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 10346 RDMAC_MODE_LNGREAD_ENAB); 10347 10348 if (tg3_asic_rev(tp) == ASIC_REV_5717) 10349 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 10350 10351 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 10352 tg3_asic_rev(tp) == ASIC_REV_5785 || 10353 tg3_asic_rev(tp) == ASIC_REV_57780) 10354 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 10355 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 10356 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 10357 10358 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10359 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10360 if (tg3_flag(tp, TSO_CAPABLE)) { 10361 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 10362 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10363 !tg3_flag(tp, IS_5788)) { 10364 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10365 } 10366 } 10367 10368 if (tg3_flag(tp, PCI_EXPRESS)) 10369 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 10370 10371 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10372 tp->dma_limit = 0; 10373 if (tp->dev->mtu <= ETH_DATA_LEN) { 10374 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; 10375 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; 10376 } 10377 } 10378 10379 if (tg3_flag(tp, HW_TSO_1) || 10380 tg3_flag(tp, HW_TSO_2) || 10381 tg3_flag(tp, HW_TSO_3)) 10382 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 10383 10384 if (tg3_flag(tp, 57765_PLUS) || 10385 tg3_asic_rev(tp) == ASIC_REV_5785 || 10386 tg3_asic_rev(tp) == ASIC_REV_57780) 10387 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 10388 10389 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10390 tg3_asic_rev(tp) == ASIC_REV_5762) 10391 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; 10392 10393 if (tg3_asic_rev(tp) == ASIC_REV_5761 || 10394 tg3_asic_rev(tp) == ASIC_REV_5784 || 10395 tg3_asic_rev(tp) == ASIC_REV_5785 || 10396 tg3_asic_rev(tp) == ASIC_REV_57780 || 10397 tg3_flag(tp, 57765_PLUS)) { 10398 u32 tgtreg; 10399 10400 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10401 tgtreg = TG3_RDMA_RSRVCTRL_REG2; 10402 else 10403 tgtreg = TG3_RDMA_RSRVCTRL_REG; 10404 10405 val = tr32(tgtreg); 10406 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 10407 tg3_asic_rev(tp) == ASIC_REV_5762) { 10408 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 10409 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 10410 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 10411 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | 10412 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 10413 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; 10414 } 10415 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 10416 } 10417 10418 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10419 tg3_asic_rev(tp) == ASIC_REV_5720 || 10420 tg3_asic_rev(tp) == ASIC_REV_5762) { 10421 u32 tgtreg; 10422 10423 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10424 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; 10425 else 10426 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; 10427 10428 val = tr32(tgtreg); 10429 tw32(tgtreg, val | 10430 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 10431 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); 10432 } 10433 10434 /* Receive/send statistics. */ 10435 if (tg3_flag(tp, 5750_PLUS)) { 10436 val = tr32(RCVLPC_STATS_ENABLE); 10437 val &= ~RCVLPC_STATSENAB_DACK_FIX; 10438 tw32(RCVLPC_STATS_ENABLE, val); 10439 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 10440 tg3_flag(tp, TSO_CAPABLE)) { 10441 val = tr32(RCVLPC_STATS_ENABLE); 10442 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 10443 tw32(RCVLPC_STATS_ENABLE, val); 10444 } else { 10445 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 10446 } 10447 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 10448 tw32(SNDDATAI_STATSENAB, 0xffffff); 10449 tw32(SNDDATAI_STATSCTRL, 10450 (SNDDATAI_SCTRL_ENABLE | 10451 SNDDATAI_SCTRL_FASTUPD)); 10452 10453 /* Setup host coalescing engine. */ 10454 tw32(HOSTCC_MODE, 0); 10455 for (i = 0; i < 2000; i++) { 10456 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 10457 break; 10458 udelay(10); 10459 } 10460 10461 __tg3_set_coalesce(tp, &tp->coal); 10462 10463 if (!tg3_flag(tp, 5705_PLUS)) { 10464 /* Status/statistics block address. See tg3_timer, 10465 * the tg3_periodic_fetch_stats call there, and 10466 * tg3_get_stats to see how this works for 5705/5750 chips. 10467 */ 10468 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 10469 ((u64) tp->stats_mapping >> 32)); 10470 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 10471 ((u64) tp->stats_mapping & 0xffffffff)); 10472 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 10473 10474 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 10475 10476 /* Clear statistics and status block memory areas */ 10477 for (i = NIC_SRAM_STATS_BLK; 10478 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 10479 i += sizeof(u32)) { 10480 tg3_write_mem(tp, i, 0); 10481 udelay(40); 10482 } 10483 } 10484 10485 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 10486 10487 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 10488 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 10489 if (!tg3_flag(tp, 5705_PLUS)) 10490 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 10491 10492 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 10493 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 10494 /* reset to prevent losing 1st rx packet intermittently */ 10495 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10496 udelay(10); 10497 } 10498 10499 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 10500 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | 10501 MAC_MODE_FHDE_ENABLE; 10502 if (tg3_flag(tp, ENABLE_APE)) 10503 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 10504 if (!tg3_flag(tp, 5705_PLUS) && 10505 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10506 tg3_asic_rev(tp) != ASIC_REV_5700) 10507 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 10508 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 10509 udelay(40); 10510 10511 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 10512 * If TG3_FLAG_IS_NIC is zero, we should read the 10513 * register to preserve the GPIO settings for LOMs. The GPIOs, 10514 * whether used as inputs or outputs, are set by boot code after 10515 * reset. 10516 */ 10517 if (!tg3_flag(tp, IS_NIC)) { 10518 u32 gpio_mask; 10519 10520 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 10521 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | 10522 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; 10523 10524 if (tg3_asic_rev(tp) == ASIC_REV_5752) 10525 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 10526 GRC_LCLCTRL_GPIO_OUTPUT3; 10527 10528 if (tg3_asic_rev(tp) == ASIC_REV_5755) 10529 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 10530 10531 tp->grc_local_ctrl &= ~gpio_mask; 10532 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 10533 10534 /* GPIO1 must be driven high for eeprom write protect */ 10535 if (tg3_flag(tp, EEPROM_WRITE_PROT)) 10536 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10537 GRC_LCLCTRL_GPIO_OUTPUT1); 10538 } 10539 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10540 udelay(100); 10541 10542 if (tg3_flag(tp, USING_MSIX)) { 10543 val = tr32(MSGINT_MODE); 10544 val |= MSGINT_MODE_ENABLE; 10545 if (tp->irq_cnt > 1) 10546 val |= MSGINT_MODE_MULTIVEC_EN; 10547 if (!tg3_flag(tp, 1SHOT_MSI)) 10548 val |= MSGINT_MODE_ONE_SHOT_DISABLE; 10549 tw32(MSGINT_MODE, val); 10550 } 10551 10552 if (!tg3_flag(tp, 5705_PLUS)) { 10553 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 10554 udelay(40); 10555 } 10556 10557 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 10558 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 10559 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 10560 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 10561 WDMAC_MODE_LNGREAD_ENAB); 10562 10563 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 10564 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 10565 if (tg3_flag(tp, TSO_CAPABLE) && 10566 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || 10567 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { 10568 /* nothing */ 10569 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 10570 !tg3_flag(tp, IS_5788)) { 10571 val |= WDMAC_MODE_RX_ACCEL; 10572 } 10573 } 10574 10575 /* Enable host coalescing bug fix */ 10576 if (tg3_flag(tp, 5755_PLUS)) 10577 val |= WDMAC_MODE_STATUS_TAG_FIX; 10578 10579 if (tg3_asic_rev(tp) == ASIC_REV_5785) 10580 val |= WDMAC_MODE_BURST_ALL_DATA; 10581 10582 tw32_f(WDMAC_MODE, val); 10583 udelay(40); 10584 10585 if (tg3_flag(tp, PCIX_MODE)) { 10586 u16 pcix_cmd; 10587 10588 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10589 &pcix_cmd); 10590 if (tg3_asic_rev(tp) == ASIC_REV_5703) { 10591 pcix_cmd &= ~PCI_X_CMD_MAX_READ; 10592 pcix_cmd |= PCI_X_CMD_READ_2K; 10593 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { 10594 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); 10595 pcix_cmd |= PCI_X_CMD_READ_2K; 10596 } 10597 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 10598 pcix_cmd); 10599 } 10600 10601 tw32_f(RDMAC_MODE, rdmac_mode); 10602 udelay(40); 10603 10604 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 10605 tg3_asic_rev(tp) == ASIC_REV_5720) { 10606 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { 10607 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) 10608 break; 10609 } 10610 if (i < TG3_NUM_RDMA_CHANNELS) { 10611 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10612 val |= tg3_lso_rd_dma_workaround_bit(tp); 10613 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10614 tg3_flag_set(tp, 5719_5720_RDMA_BUG); 10615 } 10616 } 10617 10618 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 10619 if (!tg3_flag(tp, 5705_PLUS)) 10620 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 10621 10622 if (tg3_asic_rev(tp) == ASIC_REV_5761) 10623 tw32(SNDDATAC_MODE, 10624 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); 10625 else 10626 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 10627 10628 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 10629 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 10630 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 10631 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 10632 val |= RCVDBDI_MODE_LRG_RING_SZ; 10633 tw32(RCVDBDI_MODE, val); 10634 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 10635 if (tg3_flag(tp, HW_TSO_1) || 10636 tg3_flag(tp, HW_TSO_2) || 10637 tg3_flag(tp, HW_TSO_3)) 10638 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 10639 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 10640 if (tg3_flag(tp, ENABLE_TSS)) 10641 val |= SNDBDI_MODE_MULTI_TXQ_EN; 10642 tw32(SNDBDI_MODE, val); 10643 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 10644 10645 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 10646 err = tg3_load_5701_a0_firmware_fix(tp); 10647 if (err) 10648 return err; 10649 } 10650 10651 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 10652 /* Ignore any errors for the firmware download. If download 10653 * fails, the device will operate with EEE disabled 10654 */ 10655 tg3_load_57766_firmware(tp); 10656 } 10657 10658 if (tg3_flag(tp, TSO_CAPABLE)) { 10659 err = tg3_load_tso_firmware(tp); 10660 if (err) 10661 return err; 10662 } 10663 10664 tp->tx_mode = TX_MODE_ENABLE; 10665 10666 if (tg3_flag(tp, 5755_PLUS) || 10667 tg3_asic_rev(tp) == ASIC_REV_5906) 10668 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 10669 10670 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 10671 tg3_asic_rev(tp) == ASIC_REV_5762) { 10672 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; 10673 tp->tx_mode &= ~val; 10674 tp->tx_mode |= tr32(MAC_TX_MODE) & val; 10675 } 10676 10677 tw32_f(MAC_TX_MODE, tp->tx_mode); 10678 udelay(100); 10679 10680 if (tg3_flag(tp, ENABLE_RSS)) { 10681 u32 rss_key[10]; 10682 10683 tg3_rss_write_indir_tbl(tp); 10684 10685 netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); 10686 10687 for (i = 0; i < 10 ; i++) 10688 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); 10689 } 10690 10691 tp->rx_mode = RX_MODE_ENABLE; 10692 if (tg3_flag(tp, 5755_PLUS)) 10693 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 10694 10695 if (tg3_asic_rev(tp) == ASIC_REV_5762) 10696 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; 10697 10698 if (tg3_flag(tp, ENABLE_RSS)) 10699 tp->rx_mode |= RX_MODE_RSS_ENABLE | 10700 RX_MODE_RSS_ITBL_HASH_BITS_7 | 10701 RX_MODE_RSS_IPV6_HASH_EN | 10702 RX_MODE_RSS_TCP_IPV6_HASH_EN | 10703 RX_MODE_RSS_IPV4_HASH_EN | 10704 RX_MODE_RSS_TCP_IPV4_HASH_EN; 10705 10706 tw32_f(MAC_RX_MODE, tp->rx_mode); 10707 udelay(10); 10708 10709 tw32(MAC_LED_CTRL, tp->led_ctrl); 10710 10711 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 10712 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10713 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 10714 udelay(10); 10715 } 10716 tw32_f(MAC_RX_MODE, tp->rx_mode); 10717 udelay(10); 10718 10719 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 10720 if ((tg3_asic_rev(tp) == ASIC_REV_5704) && 10721 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { 10722 /* Set drive transmission level to 1.2V */ 10723 /* only if the signal pre-emphasis bit is not set */ 10724 val = tr32(MAC_SERDES_CFG); 10725 val &= 0xfffff000; 10726 val |= 0x880; 10727 tw32(MAC_SERDES_CFG, val); 10728 } 10729 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) 10730 tw32(MAC_SERDES_CFG, 0x616000); 10731 } 10732 10733 /* Prevent chip from dropping frames when flow control 10734 * is enabled. 10735 */ 10736 if (tg3_flag(tp, 57765_CLASS)) 10737 val = 1; 10738 else 10739 val = 2; 10740 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); 10741 10742 if (tg3_asic_rev(tp) == ASIC_REV_5704 && 10743 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 10744 /* Use hardware link auto-negotiation */ 10745 tg3_flag_set(tp, HW_AUTONEG); 10746 } 10747 10748 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10749 tg3_asic_rev(tp) == ASIC_REV_5714) { 10750 u32 tmp; 10751 10752 tmp = tr32(SERDES_RX_CTRL); 10753 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 10754 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 10755 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 10756 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 10757 } 10758 10759 if (!tg3_flag(tp, USE_PHYLIB)) { 10760 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10761 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10762 10763 err = tg3_setup_phy(tp, false); 10764 if (err) 10765 return err; 10766 10767 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 10768 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { 10769 u32 tmp; 10770 10771 /* Clear CRC stats. */ 10772 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 10773 tg3_writephy(tp, MII_TG3_TEST1, 10774 tmp | MII_TG3_TEST1_CRC_EN); 10775 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); 10776 } 10777 } 10778 } 10779 10780 __tg3_set_rx_mode(tp->dev); 10781 10782 /* Initialize receive rules. */ 10783 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 10784 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 10785 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 10786 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 10787 10788 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) 10789 limit = 8; 10790 else 10791 limit = 16; 10792 if (tg3_flag(tp, ENABLE_ASF)) 10793 limit -= 4; 10794 switch (limit) { 10795 case 16: 10796 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 10797 fallthrough; 10798 case 15: 10799 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 10800 fallthrough; 10801 case 14: 10802 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 10803 fallthrough; 10804 case 13: 10805 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 10806 fallthrough; 10807 case 12: 10808 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 10809 fallthrough; 10810 case 11: 10811 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 10812 fallthrough; 10813 case 10: 10814 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 10815 fallthrough; 10816 case 9: 10817 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 10818 fallthrough; 10819 case 8: 10820 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 10821 fallthrough; 10822 case 7: 10823 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 10824 fallthrough; 10825 case 6: 10826 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 10827 fallthrough; 10828 case 5: 10829 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 10830 fallthrough; 10831 case 4: 10832 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 10833 case 3: 10834 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 10835 case 2: 10836 case 1: 10837 10838 default: 10839 break; 10840 } 10841 10842 if (tg3_flag(tp, ENABLE_APE)) 10843 /* Write our heartbeat update interval to APE. */ 10844 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10845 APE_HOST_HEARTBEAT_INT_5SEC); 10846 10847 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10848 10849 return 0; 10850 } 10851 10852 /* Called at device open time to get the chip ready for 10853 * packet processing. Invoked with tp->lock held. 10854 */ 10855 static int tg3_init_hw(struct tg3 *tp, bool reset_phy) 10856 { 10857 /* Chip may have been just powered on. If so, the boot code may still 10858 * be running initialization. Wait for it to finish to avoid races in 10859 * accessing the hardware. 10860 */ 10861 tg3_enable_register_access(tp); 10862 tg3_poll_fw(tp); 10863 10864 tg3_switch_clocks(tp); 10865 10866 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10867 10868 return tg3_reset_hw(tp, reset_phy); 10869 } 10870 10871 #ifdef CONFIG_TIGON3_HWMON 10872 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 10873 { 10874 u32 off, len = TG3_OCIR_LEN; 10875 int i; 10876 10877 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) { 10878 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); 10879 10880 if (ocir->signature != TG3_OCIR_SIG_MAGIC || 10881 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) 10882 memset(ocir, 0, len); 10883 } 10884 } 10885 10886 /* sysfs attributes for hwmon */ 10887 static ssize_t tg3_show_temp(struct device *dev, 10888 struct device_attribute *devattr, char *buf) 10889 { 10890 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 10891 struct tg3 *tp = dev_get_drvdata(dev); 10892 u32 temperature; 10893 10894 spin_lock_bh(&tp->lock); 10895 tg3_ape_scratchpad_read(tp, &temperature, attr->index, 10896 sizeof(temperature)); 10897 spin_unlock_bh(&tp->lock); 10898 return sprintf(buf, "%u\n", temperature * 1000); 10899 } 10900 10901 10902 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, 10903 TG3_TEMP_SENSOR_OFFSET); 10904 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, 10905 TG3_TEMP_CAUTION_OFFSET); 10906 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, 10907 TG3_TEMP_MAX_OFFSET); 10908 10909 static struct attribute *tg3_attrs[] = { 10910 &sensor_dev_attr_temp1_input.dev_attr.attr, 10911 &sensor_dev_attr_temp1_crit.dev_attr.attr, 10912 &sensor_dev_attr_temp1_max.dev_attr.attr, 10913 NULL 10914 }; 10915 ATTRIBUTE_GROUPS(tg3); 10916 10917 static void tg3_hwmon_close(struct tg3 *tp) 10918 { 10919 if (tp->hwmon_dev) { 10920 hwmon_device_unregister(tp->hwmon_dev); 10921 tp->hwmon_dev = NULL; 10922 } 10923 } 10924 10925 static void tg3_hwmon_open(struct tg3 *tp) 10926 { 10927 int i; 10928 u32 size = 0; 10929 struct pci_dev *pdev = tp->pdev; 10930 struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; 10931 10932 tg3_sd_scan_scratchpad(tp, ocirs); 10933 10934 for (i = 0; i < TG3_SD_NUM_RECS; i++) { 10935 if (!ocirs[i].src_data_length) 10936 continue; 10937 10938 size += ocirs[i].src_hdr_length; 10939 size += ocirs[i].src_data_length; 10940 } 10941 10942 if (!size) 10943 return; 10944 10945 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", 10946 tp, tg3_groups); 10947 if (IS_ERR(tp->hwmon_dev)) { 10948 tp->hwmon_dev = NULL; 10949 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 10950 } 10951 } 10952 #else 10953 static inline void tg3_hwmon_close(struct tg3 *tp) { } 10954 static inline void tg3_hwmon_open(struct tg3 *tp) { } 10955 #endif /* CONFIG_TIGON3_HWMON */ 10956 10957 10958 #define TG3_STAT_ADD32(PSTAT, REG) \ 10959 do { u32 __val = tr32(REG); \ 10960 (PSTAT)->low += __val; \ 10961 if ((PSTAT)->low < __val) \ 10962 (PSTAT)->high += 1; \ 10963 } while (0) 10964 10965 static void tg3_periodic_fetch_stats(struct tg3 *tp) 10966 { 10967 struct tg3_hw_stats *sp = tp->hw_stats; 10968 10969 if (!tp->link_up) 10970 return; 10971 10972 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 10973 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 10974 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 10975 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 10976 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 10977 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 10978 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 10979 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 10980 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 10981 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 10982 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 10983 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 10984 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 10985 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && 10986 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + 10987 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { 10988 u32 val; 10989 10990 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 10991 val &= ~tg3_lso_rd_dma_workaround_bit(tp); 10992 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); 10993 tg3_flag_clear(tp, 5719_5720_RDMA_BUG); 10994 } 10995 10996 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 10997 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 10998 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 10999 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 11000 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 11001 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 11002 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 11003 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 11004 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 11005 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 11006 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 11007 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 11008 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 11009 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 11010 11011 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 11012 if (tg3_asic_rev(tp) != ASIC_REV_5717 && 11013 tg3_asic_rev(tp) != ASIC_REV_5762 && 11014 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && 11015 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { 11016 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 11017 } else { 11018 u32 val = tr32(HOSTCC_FLOW_ATTN); 11019 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; 11020 if (val) { 11021 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); 11022 sp->rx_discards.low += val; 11023 if (sp->rx_discards.low < val) 11024 sp->rx_discards.high += 1; 11025 } 11026 sp->mbuf_lwm_thresh_hit = sp->rx_discards; 11027 } 11028 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 11029 } 11030 11031 static void tg3_chk_missed_msi(struct tg3 *tp) 11032 { 11033 u32 i; 11034 11035 for (i = 0; i < tp->irq_cnt; i++) { 11036 struct tg3_napi *tnapi = &tp->napi[i]; 11037 11038 if (tg3_has_work(tnapi)) { 11039 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && 11040 tnapi->last_tx_cons == tnapi->tx_cons) { 11041 if (tnapi->chk_msi_cnt < 1) { 11042 tnapi->chk_msi_cnt++; 11043 return; 11044 } 11045 tg3_msi(0, tnapi); 11046 } 11047 } 11048 tnapi->chk_msi_cnt = 0; 11049 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; 11050 tnapi->last_tx_cons = tnapi->tx_cons; 11051 } 11052 } 11053 11054 static void tg3_timer(struct timer_list *t) 11055 { 11056 struct tg3 *tp = from_timer(tp, t, timer); 11057 11058 spin_lock(&tp->lock); 11059 11060 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 11061 spin_unlock(&tp->lock); 11062 goto restart_timer; 11063 } 11064 11065 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 11066 tg3_flag(tp, 57765_CLASS)) 11067 tg3_chk_missed_msi(tp); 11068 11069 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 11070 /* BCM4785: Flush posted writes from GbE to host memory. */ 11071 tr32(HOSTCC_MODE); 11072 } 11073 11074 if (!tg3_flag(tp, TAGGED_STATUS)) { 11075 /* All of this garbage is because when using non-tagged 11076 * IRQ status the mailbox/status_block protocol the chip 11077 * uses with the cpu is race prone. 11078 */ 11079 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { 11080 tw32(GRC_LOCAL_CTRL, 11081 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 11082 } else { 11083 tw32(HOSTCC_MODE, tp->coalesce_mode | 11084 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); 11085 } 11086 11087 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 11088 spin_unlock(&tp->lock); 11089 tg3_reset_task_schedule(tp); 11090 goto restart_timer; 11091 } 11092 } 11093 11094 /* This part only runs once per second. */ 11095 if (!--tp->timer_counter) { 11096 if (tg3_flag(tp, 5705_PLUS)) 11097 tg3_periodic_fetch_stats(tp); 11098 11099 if (tp->setlpicnt && !--tp->setlpicnt) 11100 tg3_phy_eee_enable(tp); 11101 11102 if (tg3_flag(tp, USE_LINKCHG_REG)) { 11103 u32 mac_stat; 11104 int phy_event; 11105 11106 mac_stat = tr32(MAC_STATUS); 11107 11108 phy_event = 0; 11109 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { 11110 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 11111 phy_event = 1; 11112 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 11113 phy_event = 1; 11114 11115 if (phy_event) 11116 tg3_setup_phy(tp, false); 11117 } else if (tg3_flag(tp, POLL_SERDES)) { 11118 u32 mac_stat = tr32(MAC_STATUS); 11119 int need_setup = 0; 11120 11121 if (tp->link_up && 11122 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 11123 need_setup = 1; 11124 } 11125 if (!tp->link_up && 11126 (mac_stat & (MAC_STATUS_PCS_SYNCED | 11127 MAC_STATUS_SIGNAL_DET))) { 11128 need_setup = 1; 11129 } 11130 if (need_setup) { 11131 if (!tp->serdes_counter) { 11132 tw32_f(MAC_MODE, 11133 (tp->mac_mode & 11134 ~MAC_MODE_PORT_MODE_MASK)); 11135 udelay(40); 11136 tw32_f(MAC_MODE, tp->mac_mode); 11137 udelay(40); 11138 } 11139 tg3_setup_phy(tp, false); 11140 } 11141 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 11142 tg3_flag(tp, 5780_CLASS)) { 11143 tg3_serdes_parallel_detect(tp); 11144 } else if (tg3_flag(tp, POLL_CPMU_LINK)) { 11145 u32 cpmu = tr32(TG3_CPMU_STATUS); 11146 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == 11147 TG3_CPMU_STATUS_LINK_MASK); 11148 11149 if (link_up != tp->link_up) 11150 tg3_setup_phy(tp, false); 11151 } 11152 11153 tp->timer_counter = tp->timer_multiplier; 11154 } 11155 11156 /* Heartbeat is only sent once every 2 seconds. 11157 * 11158 * The heartbeat is to tell the ASF firmware that the host 11159 * driver is still alive. In the event that the OS crashes, 11160 * ASF needs to reset the hardware to free up the FIFO space 11161 * that may be filled with rx packets destined for the host. 11162 * If the FIFO is full, ASF will no longer function properly. 11163 * 11164 * Unintended resets have been reported on real time kernels 11165 * where the timer doesn't run on time. Netpoll will also have 11166 * same problem. 11167 * 11168 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware 11169 * to check the ring condition when the heartbeat is expiring 11170 * before doing the reset. This will prevent most unintended 11171 * resets. 11172 */ 11173 if (!--tp->asf_counter) { 11174 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { 11175 tg3_wait_for_event_ack(tp); 11176 11177 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 11178 FWCMD_NICDRV_ALIVE3); 11179 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 11180 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 11181 TG3_FW_UPDATE_TIMEOUT_SEC); 11182 11183 tg3_generate_fw_event(tp); 11184 } 11185 tp->asf_counter = tp->asf_multiplier; 11186 } 11187 11188 /* Update the APE heartbeat every 5 seconds.*/ 11189 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); 11190 11191 spin_unlock(&tp->lock); 11192 11193 restart_timer: 11194 tp->timer.expires = jiffies + tp->timer_offset; 11195 add_timer(&tp->timer); 11196 } 11197 11198 static void tg3_timer_init(struct tg3 *tp) 11199 { 11200 if (tg3_flag(tp, TAGGED_STATUS) && 11201 tg3_asic_rev(tp) != ASIC_REV_5717 && 11202 !tg3_flag(tp, 57765_CLASS)) 11203 tp->timer_offset = HZ; 11204 else 11205 tp->timer_offset = HZ / 10; 11206 11207 BUG_ON(tp->timer_offset > HZ); 11208 11209 tp->timer_multiplier = (HZ / tp->timer_offset); 11210 tp->asf_multiplier = (HZ / tp->timer_offset) * 11211 TG3_FW_UPDATE_FREQ_SEC; 11212 11213 timer_setup(&tp->timer, tg3_timer, 0); 11214 } 11215 11216 static void tg3_timer_start(struct tg3 *tp) 11217 { 11218 tp->asf_counter = tp->asf_multiplier; 11219 tp->timer_counter = tp->timer_multiplier; 11220 11221 tp->timer.expires = jiffies + tp->timer_offset; 11222 add_timer(&tp->timer); 11223 } 11224 11225 static void tg3_timer_stop(struct tg3 *tp) 11226 { 11227 del_timer_sync(&tp->timer); 11228 } 11229 11230 /* Restart hardware after configuration changes, self-test, etc. 11231 * Invoked with tp->lock held. 11232 */ 11233 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) 11234 __releases(tp->lock) 11235 __acquires(tp->lock) 11236 { 11237 int err; 11238 11239 err = tg3_init_hw(tp, reset_phy); 11240 if (err) { 11241 netdev_err(tp->dev, 11242 "Failed to re-initialize device, aborting\n"); 11243 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11244 tg3_full_unlock(tp); 11245 tg3_timer_stop(tp); 11246 tp->irq_sync = 0; 11247 tg3_napi_enable(tp); 11248 dev_close(tp->dev); 11249 tg3_full_lock(tp, 0); 11250 } 11251 return err; 11252 } 11253 11254 static void tg3_reset_task(struct work_struct *work) 11255 { 11256 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11257 int err; 11258 11259 rtnl_lock(); 11260 tg3_full_lock(tp, 0); 11261 11262 if (tp->pcierr_recovery || !netif_running(tp->dev)) { 11263 tg3_flag_clear(tp, RESET_TASK_PENDING); 11264 tg3_full_unlock(tp); 11265 rtnl_unlock(); 11266 return; 11267 } 11268 11269 tg3_full_unlock(tp); 11270 11271 tg3_phy_stop(tp); 11272 11273 tg3_netif_stop(tp); 11274 11275 tg3_full_lock(tp, 1); 11276 11277 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 11278 tp->write32_tx_mbox = tg3_write32_tx_mbox; 11279 tp->write32_rx_mbox = tg3_write_flush_reg32; 11280 tg3_flag_set(tp, MBOX_WRITE_REORDER); 11281 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 11282 } 11283 11284 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 11285 err = tg3_init_hw(tp, true); 11286 if (err) { 11287 tg3_full_unlock(tp); 11288 tp->irq_sync = 0; 11289 tg3_napi_enable(tp); 11290 /* Clear this flag so that tg3_reset_task_cancel() will not 11291 * call cancel_work_sync() and wait forever. 11292 */ 11293 tg3_flag_clear(tp, RESET_TASK_PENDING); 11294 dev_close(tp->dev); 11295 goto out; 11296 } 11297 11298 tg3_netif_start(tp); 11299 tg3_full_unlock(tp); 11300 tg3_phy_start(tp); 11301 tg3_flag_clear(tp, RESET_TASK_PENDING); 11302 out: 11303 rtnl_unlock(); 11304 } 11305 11306 static int tg3_request_irq(struct tg3 *tp, int irq_num) 11307 { 11308 irq_handler_t fn; 11309 unsigned long flags; 11310 char *name; 11311 struct tg3_napi *tnapi = &tp->napi[irq_num]; 11312 11313 if (tp->irq_cnt == 1) 11314 name = tp->dev->name; 11315 else { 11316 name = &tnapi->irq_lbl[0]; 11317 if (tnapi->tx_buffers && tnapi->rx_rcb) 11318 snprintf(name, IFNAMSIZ, 11319 "%s-txrx-%d", tp->dev->name, irq_num); 11320 else if (tnapi->tx_buffers) 11321 snprintf(name, IFNAMSIZ, 11322 "%s-tx-%d", tp->dev->name, irq_num); 11323 else if (tnapi->rx_rcb) 11324 snprintf(name, IFNAMSIZ, 11325 "%s-rx-%d", tp->dev->name, irq_num); 11326 else 11327 snprintf(name, IFNAMSIZ, 11328 "%s-%d", tp->dev->name, irq_num); 11329 name[IFNAMSIZ-1] = 0; 11330 } 11331 11332 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11333 fn = tg3_msi; 11334 if (tg3_flag(tp, 1SHOT_MSI)) 11335 fn = tg3_msi_1shot; 11336 flags = 0; 11337 } else { 11338 fn = tg3_interrupt; 11339 if (tg3_flag(tp, TAGGED_STATUS)) 11340 fn = tg3_interrupt_tagged; 11341 flags = IRQF_SHARED; 11342 } 11343 11344 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 11345 } 11346 11347 static int tg3_test_interrupt(struct tg3 *tp) 11348 { 11349 struct tg3_napi *tnapi = &tp->napi[0]; 11350 struct net_device *dev = tp->dev; 11351 int err, i, intr_ok = 0; 11352 u32 val; 11353 11354 if (!netif_running(dev)) 11355 return -ENODEV; 11356 11357 tg3_disable_ints(tp); 11358 11359 free_irq(tnapi->irq_vec, tnapi); 11360 11361 /* 11362 * Turn off MSI one shot mode. Otherwise this test has no 11363 * observable way to know whether the interrupt was delivered. 11364 */ 11365 if (tg3_flag(tp, 57765_PLUS)) { 11366 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 11367 tw32(MSGINT_MODE, val); 11368 } 11369 11370 err = request_irq(tnapi->irq_vec, tg3_test_isr, 11371 IRQF_SHARED, dev->name, tnapi); 11372 if (err) 11373 return err; 11374 11375 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; 11376 tg3_enable_ints(tp); 11377 11378 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 11379 tnapi->coal_now); 11380 11381 for (i = 0; i < 5; i++) { 11382 u32 int_mbox, misc_host_ctrl; 11383 11384 int_mbox = tr32_mailbox(tnapi->int_mbox); 11385 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 11386 11387 if ((int_mbox != 0) || 11388 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { 11389 intr_ok = 1; 11390 break; 11391 } 11392 11393 if (tg3_flag(tp, 57765_PLUS) && 11394 tnapi->hw_status->status_tag != tnapi->last_tag) 11395 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 11396 11397 msleep(10); 11398 } 11399 11400 tg3_disable_ints(tp); 11401 11402 free_irq(tnapi->irq_vec, tnapi); 11403 11404 err = tg3_request_irq(tp, 0); 11405 11406 if (err) 11407 return err; 11408 11409 if (intr_ok) { 11410 /* Reenable MSI one shot mode. */ 11411 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { 11412 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 11413 tw32(MSGINT_MODE, val); 11414 } 11415 return 0; 11416 } 11417 11418 return -EIO; 11419 } 11420 11421 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 11422 * successfully restored 11423 */ 11424 static int tg3_test_msi(struct tg3 *tp) 11425 { 11426 int err; 11427 u16 pci_cmd; 11428 11429 if (!tg3_flag(tp, USING_MSI)) 11430 return 0; 11431 11432 /* Turn off SERR reporting in case MSI terminates with Master 11433 * Abort. 11434 */ 11435 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 11436 pci_write_config_word(tp->pdev, PCI_COMMAND, 11437 pci_cmd & ~PCI_COMMAND_SERR); 11438 11439 err = tg3_test_interrupt(tp); 11440 11441 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 11442 11443 if (!err) 11444 return 0; 11445 11446 /* other failures */ 11447 if (err != -EIO) 11448 return err; 11449 11450 /* MSI test failed, go back to INTx mode */ 11451 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " 11452 "to INTx mode. Please report this failure to the PCI " 11453 "maintainer and include system chipset information\n"); 11454 11455 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11456 11457 pci_disable_msi(tp->pdev); 11458 11459 tg3_flag_clear(tp, USING_MSI); 11460 tp->napi[0].irq_vec = tp->pdev->irq; 11461 11462 err = tg3_request_irq(tp, 0); 11463 if (err) 11464 return err; 11465 11466 /* Need to reset the chip because the MSI cycle may have terminated 11467 * with Master Abort. 11468 */ 11469 tg3_full_lock(tp, 1); 11470 11471 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11472 err = tg3_init_hw(tp, true); 11473 11474 tg3_full_unlock(tp); 11475 11476 if (err) 11477 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 11478 11479 return err; 11480 } 11481 11482 static int tg3_request_firmware(struct tg3 *tp) 11483 { 11484 const struct tg3_firmware_hdr *fw_hdr; 11485 11486 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 11487 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 11488 tp->fw_needed); 11489 return -ENOENT; 11490 } 11491 11492 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; 11493 11494 /* Firmware blob starts with version numbers, followed by 11495 * start address and _full_ length including BSS sections 11496 * (which must be longer than the actual data, of course 11497 */ 11498 11499 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ 11500 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { 11501 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 11502 tp->fw_len, tp->fw_needed); 11503 release_firmware(tp->fw); 11504 tp->fw = NULL; 11505 return -EINVAL; 11506 } 11507 11508 /* We no longer need firmware; we have it. */ 11509 tp->fw_needed = NULL; 11510 return 0; 11511 } 11512 11513 static u32 tg3_irq_count(struct tg3 *tp) 11514 { 11515 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); 11516 11517 if (irq_cnt > 1) { 11518 /* We want as many rx rings enabled as there are cpus. 11519 * In multiqueue MSI-X mode, the first MSI-X vector 11520 * only deals with link interrupts, etc, so we add 11521 * one to the number of vectors we are requesting. 11522 */ 11523 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); 11524 } 11525 11526 return irq_cnt; 11527 } 11528 11529 static bool tg3_enable_msix(struct tg3 *tp) 11530 { 11531 int i, rc; 11532 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; 11533 11534 tp->txq_cnt = tp->txq_req; 11535 tp->rxq_cnt = tp->rxq_req; 11536 if (!tp->rxq_cnt) 11537 tp->rxq_cnt = netif_get_num_default_rss_queues(); 11538 if (tp->rxq_cnt > tp->rxq_max) 11539 tp->rxq_cnt = tp->rxq_max; 11540 11541 /* Disable multiple TX rings by default. Simple round-robin hardware 11542 * scheduling of the TX rings can cause starvation of rings with 11543 * small packets when other rings have TSO or jumbo packets. 11544 */ 11545 if (!tp->txq_req) 11546 tp->txq_cnt = 1; 11547 11548 tp->irq_cnt = tg3_irq_count(tp); 11549 11550 for (i = 0; i < tp->irq_max; i++) { 11551 msix_ent[i].entry = i; 11552 msix_ent[i].vector = 0; 11553 } 11554 11555 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); 11556 if (rc < 0) { 11557 return false; 11558 } else if (rc < tp->irq_cnt) { 11559 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11560 tp->irq_cnt, rc); 11561 tp->irq_cnt = rc; 11562 tp->rxq_cnt = max(rc - 1, 1); 11563 if (tp->txq_cnt) 11564 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); 11565 } 11566 11567 for (i = 0; i < tp->irq_max; i++) 11568 tp->napi[i].irq_vec = msix_ent[i].vector; 11569 11570 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { 11571 pci_disable_msix(tp->pdev); 11572 return false; 11573 } 11574 11575 if (tp->irq_cnt == 1) 11576 return true; 11577 11578 tg3_flag_set(tp, ENABLE_RSS); 11579 11580 if (tp->txq_cnt > 1) 11581 tg3_flag_set(tp, ENABLE_TSS); 11582 11583 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); 11584 11585 return true; 11586 } 11587 11588 static void tg3_ints_init(struct tg3 *tp) 11589 { 11590 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && 11591 !tg3_flag(tp, TAGGED_STATUS)) { 11592 /* All MSI supporting chips should support tagged 11593 * status. Assert that this is the case. 11594 */ 11595 netdev_warn(tp->dev, 11596 "MSI without TAGGED_STATUS? Not using MSI\n"); 11597 goto defcfg; 11598 } 11599 11600 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) 11601 tg3_flag_set(tp, USING_MSIX); 11602 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) 11603 tg3_flag_set(tp, USING_MSI); 11604 11605 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { 11606 u32 msi_mode = tr32(MSGINT_MODE); 11607 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) 11608 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 11609 if (!tg3_flag(tp, 1SHOT_MSI)) 11610 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; 11611 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 11612 } 11613 defcfg: 11614 if (!tg3_flag(tp, USING_MSIX)) { 11615 tp->irq_cnt = 1; 11616 tp->napi[0].irq_vec = tp->pdev->irq; 11617 } 11618 11619 if (tp->irq_cnt == 1) { 11620 tp->txq_cnt = 1; 11621 tp->rxq_cnt = 1; 11622 netif_set_real_num_tx_queues(tp->dev, 1); 11623 netif_set_real_num_rx_queues(tp->dev, 1); 11624 } 11625 } 11626 11627 static void tg3_ints_fini(struct tg3 *tp) 11628 { 11629 if (tg3_flag(tp, USING_MSIX)) 11630 pci_disable_msix(tp->pdev); 11631 else if (tg3_flag(tp, USING_MSI)) 11632 pci_disable_msi(tp->pdev); 11633 tg3_flag_clear(tp, USING_MSI); 11634 tg3_flag_clear(tp, USING_MSIX); 11635 tg3_flag_clear(tp, ENABLE_RSS); 11636 tg3_flag_clear(tp, ENABLE_TSS); 11637 } 11638 11639 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, 11640 bool init) 11641 { 11642 struct net_device *dev = tp->dev; 11643 int i, err; 11644 11645 /* 11646 * Setup interrupts first so we know how 11647 * many NAPI resources to allocate 11648 */ 11649 tg3_ints_init(tp); 11650 11651 tg3_rss_check_indir_tbl(tp); 11652 11653 /* The placement of this call is tied 11654 * to the setup and use of Host TX descriptors. 11655 */ 11656 err = tg3_alloc_consistent(tp); 11657 if (err) 11658 goto out_ints_fini; 11659 11660 tg3_napi_init(tp); 11661 11662 tg3_napi_enable(tp); 11663 11664 for (i = 0; i < tp->irq_cnt; i++) { 11665 err = tg3_request_irq(tp, i); 11666 if (err) { 11667 for (i--; i >= 0; i--) { 11668 struct tg3_napi *tnapi = &tp->napi[i]; 11669 11670 free_irq(tnapi->irq_vec, tnapi); 11671 } 11672 goto out_napi_fini; 11673 } 11674 } 11675 11676 tg3_full_lock(tp, 0); 11677 11678 if (init) 11679 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 11680 11681 err = tg3_init_hw(tp, reset_phy); 11682 if (err) { 11683 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11684 tg3_free_rings(tp); 11685 } 11686 11687 tg3_full_unlock(tp); 11688 11689 if (err) 11690 goto out_free_irq; 11691 11692 if (test_irq && tg3_flag(tp, USING_MSI)) { 11693 err = tg3_test_msi(tp); 11694 11695 if (err) { 11696 tg3_full_lock(tp, 0); 11697 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11698 tg3_free_rings(tp); 11699 tg3_full_unlock(tp); 11700 11701 goto out_napi_fini; 11702 } 11703 11704 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { 11705 u32 val = tr32(PCIE_TRANSACTION_CFG); 11706 11707 tw32(PCIE_TRANSACTION_CFG, 11708 val | PCIE_TRANS_CFG_1SHOT_MSI); 11709 } 11710 } 11711 11712 tg3_phy_start(tp); 11713 11714 tg3_hwmon_open(tp); 11715 11716 tg3_full_lock(tp, 0); 11717 11718 tg3_timer_start(tp); 11719 tg3_flag_set(tp, INIT_COMPLETE); 11720 tg3_enable_ints(tp); 11721 11722 tg3_ptp_resume(tp); 11723 11724 tg3_full_unlock(tp); 11725 11726 netif_tx_start_all_queues(dev); 11727 11728 /* 11729 * Reset loopback feature if it was turned on while the device was down 11730 * make sure that it's installed properly now. 11731 */ 11732 if (dev->features & NETIF_F_LOOPBACK) 11733 tg3_set_loopback(dev, dev->features); 11734 11735 return 0; 11736 11737 out_free_irq: 11738 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11739 struct tg3_napi *tnapi = &tp->napi[i]; 11740 free_irq(tnapi->irq_vec, tnapi); 11741 } 11742 11743 out_napi_fini: 11744 tg3_napi_disable(tp); 11745 tg3_napi_fini(tp); 11746 tg3_free_consistent(tp); 11747 11748 out_ints_fini: 11749 tg3_ints_fini(tp); 11750 11751 return err; 11752 } 11753 11754 static void tg3_stop(struct tg3 *tp) 11755 { 11756 int i; 11757 11758 tg3_reset_task_cancel(tp); 11759 tg3_netif_stop(tp); 11760 11761 tg3_timer_stop(tp); 11762 11763 tg3_hwmon_close(tp); 11764 11765 tg3_phy_stop(tp); 11766 11767 tg3_full_lock(tp, 1); 11768 11769 tg3_disable_ints(tp); 11770 11771 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11772 tg3_free_rings(tp); 11773 tg3_flag_clear(tp, INIT_COMPLETE); 11774 11775 tg3_full_unlock(tp); 11776 11777 for (i = tp->irq_cnt - 1; i >= 0; i--) { 11778 struct tg3_napi *tnapi = &tp->napi[i]; 11779 free_irq(tnapi->irq_vec, tnapi); 11780 } 11781 11782 tg3_ints_fini(tp); 11783 11784 tg3_napi_fini(tp); 11785 11786 tg3_free_consistent(tp); 11787 } 11788 11789 static int tg3_open(struct net_device *dev) 11790 { 11791 struct tg3 *tp = netdev_priv(dev); 11792 int err; 11793 11794 if (tp->pcierr_recovery) { 11795 netdev_err(dev, "Failed to open device. PCI error recovery " 11796 "in progress\n"); 11797 return -EAGAIN; 11798 } 11799 11800 if (tp->fw_needed) { 11801 err = tg3_request_firmware(tp); 11802 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11803 if (err) { 11804 netdev_warn(tp->dev, "EEE capability disabled\n"); 11805 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 11806 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 11807 netdev_warn(tp->dev, "EEE capability restored\n"); 11808 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 11809 } 11810 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11811 if (err) 11812 return err; 11813 } else if (err) { 11814 netdev_warn(tp->dev, "TSO capability disabled\n"); 11815 tg3_flag_clear(tp, TSO_CAPABLE); 11816 } else if (!tg3_flag(tp, TSO_CAPABLE)) { 11817 netdev_notice(tp->dev, "TSO capability restored\n"); 11818 tg3_flag_set(tp, TSO_CAPABLE); 11819 } 11820 } 11821 11822 tg3_carrier_off(tp); 11823 11824 err = tg3_power_up(tp); 11825 if (err) 11826 return err; 11827 11828 tg3_full_lock(tp, 0); 11829 11830 tg3_disable_ints(tp); 11831 tg3_flag_clear(tp, INIT_COMPLETE); 11832 11833 tg3_full_unlock(tp); 11834 11835 err = tg3_start(tp, 11836 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), 11837 true, true); 11838 if (err) { 11839 tg3_frob_aux_power(tp, false); 11840 pci_set_power_state(tp->pdev, PCI_D3hot); 11841 } 11842 11843 return err; 11844 } 11845 11846 static int tg3_close(struct net_device *dev) 11847 { 11848 struct tg3 *tp = netdev_priv(dev); 11849 11850 if (tp->pcierr_recovery) { 11851 netdev_err(dev, "Failed to close device. PCI error recovery " 11852 "in progress\n"); 11853 return -EAGAIN; 11854 } 11855 11856 tg3_stop(tp); 11857 11858 if (pci_device_is_present(tp->pdev)) { 11859 tg3_power_down_prepare(tp); 11860 11861 tg3_carrier_off(tp); 11862 } 11863 return 0; 11864 } 11865 11866 static inline u64 get_stat64(tg3_stat64_t *val) 11867 { 11868 return ((u64)val->high << 32) | ((u64)val->low); 11869 } 11870 11871 static u64 tg3_calc_crc_errors(struct tg3 *tp) 11872 { 11873 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11874 11875 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11876 (tg3_asic_rev(tp) == ASIC_REV_5700 || 11877 tg3_asic_rev(tp) == ASIC_REV_5701)) { 11878 u32 val; 11879 11880 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { 11881 tg3_writephy(tp, MII_TG3_TEST1, 11882 val | MII_TG3_TEST1_CRC_EN); 11883 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); 11884 } else 11885 val = 0; 11886 11887 tp->phy_crc_errors += val; 11888 11889 return tp->phy_crc_errors; 11890 } 11891 11892 return get_stat64(&hw_stats->rx_fcs_errors); 11893 } 11894 11895 #define ESTAT_ADD(member) \ 11896 estats->member = old_estats->member + \ 11897 get_stat64(&hw_stats->member) 11898 11899 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) 11900 { 11901 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 11902 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11903 11904 ESTAT_ADD(rx_octets); 11905 ESTAT_ADD(rx_fragments); 11906 ESTAT_ADD(rx_ucast_packets); 11907 ESTAT_ADD(rx_mcast_packets); 11908 ESTAT_ADD(rx_bcast_packets); 11909 ESTAT_ADD(rx_fcs_errors); 11910 ESTAT_ADD(rx_align_errors); 11911 ESTAT_ADD(rx_xon_pause_rcvd); 11912 ESTAT_ADD(rx_xoff_pause_rcvd); 11913 ESTAT_ADD(rx_mac_ctrl_rcvd); 11914 ESTAT_ADD(rx_xoff_entered); 11915 ESTAT_ADD(rx_frame_too_long_errors); 11916 ESTAT_ADD(rx_jabbers); 11917 ESTAT_ADD(rx_undersize_packets); 11918 ESTAT_ADD(rx_in_length_errors); 11919 ESTAT_ADD(rx_out_length_errors); 11920 ESTAT_ADD(rx_64_or_less_octet_packets); 11921 ESTAT_ADD(rx_65_to_127_octet_packets); 11922 ESTAT_ADD(rx_128_to_255_octet_packets); 11923 ESTAT_ADD(rx_256_to_511_octet_packets); 11924 ESTAT_ADD(rx_512_to_1023_octet_packets); 11925 ESTAT_ADD(rx_1024_to_1522_octet_packets); 11926 ESTAT_ADD(rx_1523_to_2047_octet_packets); 11927 ESTAT_ADD(rx_2048_to_4095_octet_packets); 11928 ESTAT_ADD(rx_4096_to_8191_octet_packets); 11929 ESTAT_ADD(rx_8192_to_9022_octet_packets); 11930 11931 ESTAT_ADD(tx_octets); 11932 ESTAT_ADD(tx_collisions); 11933 ESTAT_ADD(tx_xon_sent); 11934 ESTAT_ADD(tx_xoff_sent); 11935 ESTAT_ADD(tx_flow_control); 11936 ESTAT_ADD(tx_mac_errors); 11937 ESTAT_ADD(tx_single_collisions); 11938 ESTAT_ADD(tx_mult_collisions); 11939 ESTAT_ADD(tx_deferred); 11940 ESTAT_ADD(tx_excessive_collisions); 11941 ESTAT_ADD(tx_late_collisions); 11942 ESTAT_ADD(tx_collide_2times); 11943 ESTAT_ADD(tx_collide_3times); 11944 ESTAT_ADD(tx_collide_4times); 11945 ESTAT_ADD(tx_collide_5times); 11946 ESTAT_ADD(tx_collide_6times); 11947 ESTAT_ADD(tx_collide_7times); 11948 ESTAT_ADD(tx_collide_8times); 11949 ESTAT_ADD(tx_collide_9times); 11950 ESTAT_ADD(tx_collide_10times); 11951 ESTAT_ADD(tx_collide_11times); 11952 ESTAT_ADD(tx_collide_12times); 11953 ESTAT_ADD(tx_collide_13times); 11954 ESTAT_ADD(tx_collide_14times); 11955 ESTAT_ADD(tx_collide_15times); 11956 ESTAT_ADD(tx_ucast_packets); 11957 ESTAT_ADD(tx_mcast_packets); 11958 ESTAT_ADD(tx_bcast_packets); 11959 ESTAT_ADD(tx_carrier_sense_errors); 11960 ESTAT_ADD(tx_discards); 11961 ESTAT_ADD(tx_errors); 11962 11963 ESTAT_ADD(dma_writeq_full); 11964 ESTAT_ADD(dma_write_prioq_full); 11965 ESTAT_ADD(rxbds_empty); 11966 ESTAT_ADD(rx_discards); 11967 ESTAT_ADD(rx_errors); 11968 ESTAT_ADD(rx_threshold_hit); 11969 11970 ESTAT_ADD(dma_readq_full); 11971 ESTAT_ADD(dma_read_prioq_full); 11972 ESTAT_ADD(tx_comp_queue_full); 11973 11974 ESTAT_ADD(ring_set_send_prod_index); 11975 ESTAT_ADD(ring_status_update); 11976 ESTAT_ADD(nic_irqs); 11977 ESTAT_ADD(nic_avoided_irqs); 11978 ESTAT_ADD(nic_tx_threshold_hit); 11979 11980 ESTAT_ADD(mbuf_lwm_thresh_hit); 11981 } 11982 11983 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) 11984 { 11985 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; 11986 struct tg3_hw_stats *hw_stats = tp->hw_stats; 11987 unsigned long rx_dropped; 11988 unsigned long tx_dropped; 11989 int i; 11990 11991 stats->rx_packets = old_stats->rx_packets + 11992 get_stat64(&hw_stats->rx_ucast_packets) + 11993 get_stat64(&hw_stats->rx_mcast_packets) + 11994 get_stat64(&hw_stats->rx_bcast_packets); 11995 11996 stats->tx_packets = old_stats->tx_packets + 11997 get_stat64(&hw_stats->tx_ucast_packets) + 11998 get_stat64(&hw_stats->tx_mcast_packets) + 11999 get_stat64(&hw_stats->tx_bcast_packets); 12000 12001 stats->rx_bytes = old_stats->rx_bytes + 12002 get_stat64(&hw_stats->rx_octets); 12003 stats->tx_bytes = old_stats->tx_bytes + 12004 get_stat64(&hw_stats->tx_octets); 12005 12006 stats->rx_errors = old_stats->rx_errors + 12007 get_stat64(&hw_stats->rx_errors); 12008 stats->tx_errors = old_stats->tx_errors + 12009 get_stat64(&hw_stats->tx_errors) + 12010 get_stat64(&hw_stats->tx_mac_errors) + 12011 get_stat64(&hw_stats->tx_carrier_sense_errors) + 12012 get_stat64(&hw_stats->tx_discards); 12013 12014 stats->multicast = old_stats->multicast + 12015 get_stat64(&hw_stats->rx_mcast_packets); 12016 stats->collisions = old_stats->collisions + 12017 get_stat64(&hw_stats->tx_collisions); 12018 12019 stats->rx_length_errors = old_stats->rx_length_errors + 12020 get_stat64(&hw_stats->rx_frame_too_long_errors) + 12021 get_stat64(&hw_stats->rx_undersize_packets); 12022 12023 stats->rx_frame_errors = old_stats->rx_frame_errors + 12024 get_stat64(&hw_stats->rx_align_errors); 12025 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 12026 get_stat64(&hw_stats->tx_discards); 12027 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 12028 get_stat64(&hw_stats->tx_carrier_sense_errors); 12029 12030 stats->rx_crc_errors = old_stats->rx_crc_errors + 12031 tg3_calc_crc_errors(tp); 12032 12033 stats->rx_missed_errors = old_stats->rx_missed_errors + 12034 get_stat64(&hw_stats->rx_discards); 12035 12036 /* Aggregate per-queue counters. The per-queue counters are updated 12037 * by a single writer, race-free. The result computed by this loop 12038 * might not be 100% accurate (counters can be updated in the middle of 12039 * the loop) but the next tg3_get_nstats() will recompute the current 12040 * value so it is acceptable. 12041 * 12042 * Note that these counters wrap around at 4G on 32bit machines. 12043 */ 12044 rx_dropped = (unsigned long)(old_stats->rx_dropped); 12045 tx_dropped = (unsigned long)(old_stats->tx_dropped); 12046 12047 for (i = 0; i < tp->irq_cnt; i++) { 12048 struct tg3_napi *tnapi = &tp->napi[i]; 12049 12050 rx_dropped += tnapi->rx_dropped; 12051 tx_dropped += tnapi->tx_dropped; 12052 } 12053 12054 stats->rx_dropped = rx_dropped; 12055 stats->tx_dropped = tx_dropped; 12056 } 12057 12058 static int tg3_get_regs_len(struct net_device *dev) 12059 { 12060 return TG3_REG_BLK_SIZE; 12061 } 12062 12063 static void tg3_get_regs(struct net_device *dev, 12064 struct ethtool_regs *regs, void *_p) 12065 { 12066 struct tg3 *tp = netdev_priv(dev); 12067 12068 regs->version = 0; 12069 12070 memset(_p, 0, TG3_REG_BLK_SIZE); 12071 12072 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 12073 return; 12074 12075 tg3_full_lock(tp, 0); 12076 12077 tg3_dump_legacy_regs(tp, (u32 *)_p); 12078 12079 tg3_full_unlock(tp); 12080 } 12081 12082 static int tg3_get_eeprom_len(struct net_device *dev) 12083 { 12084 struct tg3 *tp = netdev_priv(dev); 12085 12086 return tp->nvram_size; 12087 } 12088 12089 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12090 { 12091 struct tg3 *tp = netdev_priv(dev); 12092 int ret, cpmu_restore = 0; 12093 u8 *pd; 12094 u32 i, offset, len, b_offset, b_count, cpmu_val = 0; 12095 __be32 val; 12096 12097 if (tg3_flag(tp, NO_NVRAM)) 12098 return -EINVAL; 12099 12100 offset = eeprom->offset; 12101 len = eeprom->len; 12102 eeprom->len = 0; 12103 12104 eeprom->magic = TG3_EEPROM_MAGIC; 12105 12106 /* Override clock, link aware and link idle modes */ 12107 if (tg3_flag(tp, CPMU_PRESENT)) { 12108 cpmu_val = tr32(TG3_CPMU_CTRL); 12109 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | 12110 CPMU_CTRL_LINK_IDLE_MODE)) { 12111 tw32(TG3_CPMU_CTRL, cpmu_val & 12112 ~(CPMU_CTRL_LINK_AWARE_MODE | 12113 CPMU_CTRL_LINK_IDLE_MODE)); 12114 cpmu_restore = 1; 12115 } 12116 } 12117 tg3_override_clk(tp); 12118 12119 if (offset & 3) { 12120 /* adjustments to start on required 4 byte boundary */ 12121 b_offset = offset & 3; 12122 b_count = 4 - b_offset; 12123 if (b_count > len) { 12124 /* i.e. offset=1 len=2 */ 12125 b_count = len; 12126 } 12127 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); 12128 if (ret) 12129 goto eeprom_done; 12130 memcpy(data, ((char *)&val) + b_offset, b_count); 12131 len -= b_count; 12132 offset += b_count; 12133 eeprom->len += b_count; 12134 } 12135 12136 /* read bytes up to the last 4 byte boundary */ 12137 pd = &data[eeprom->len]; 12138 for (i = 0; i < (len - (len & 3)); i += 4) { 12139 ret = tg3_nvram_read_be32(tp, offset + i, &val); 12140 if (ret) { 12141 if (i) 12142 i -= 4; 12143 eeprom->len += i; 12144 goto eeprom_done; 12145 } 12146 memcpy(pd + i, &val, 4); 12147 if (need_resched()) { 12148 if (signal_pending(current)) { 12149 eeprom->len += i; 12150 ret = -EINTR; 12151 goto eeprom_done; 12152 } 12153 cond_resched(); 12154 } 12155 } 12156 eeprom->len += i; 12157 12158 if (len & 3) { 12159 /* read last bytes not ending on 4 byte boundary */ 12160 pd = &data[eeprom->len]; 12161 b_count = len & 3; 12162 b_offset = offset + len - b_count; 12163 ret = tg3_nvram_read_be32(tp, b_offset, &val); 12164 if (ret) 12165 goto eeprom_done; 12166 memcpy(pd, &val, b_count); 12167 eeprom->len += b_count; 12168 } 12169 ret = 0; 12170 12171 eeprom_done: 12172 /* Restore clock, link aware and link idle modes */ 12173 tg3_restore_clk(tp); 12174 if (cpmu_restore) 12175 tw32(TG3_CPMU_CTRL, cpmu_val); 12176 12177 return ret; 12178 } 12179 12180 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 12181 { 12182 struct tg3 *tp = netdev_priv(dev); 12183 int ret; 12184 u32 offset, len, b_offset, odd_len; 12185 u8 *buf; 12186 __be32 start = 0, end; 12187 12188 if (tg3_flag(tp, NO_NVRAM) || 12189 eeprom->magic != TG3_EEPROM_MAGIC) 12190 return -EINVAL; 12191 12192 offset = eeprom->offset; 12193 len = eeprom->len; 12194 12195 if ((b_offset = (offset & 3))) { 12196 /* adjustments to start on required 4 byte boundary */ 12197 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); 12198 if (ret) 12199 return ret; 12200 len += b_offset; 12201 offset &= ~3; 12202 if (len < 4) 12203 len = 4; 12204 } 12205 12206 odd_len = 0; 12207 if (len & 3) { 12208 /* adjustments to end on required 4 byte boundary */ 12209 odd_len = 1; 12210 len = (len + 3) & ~3; 12211 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); 12212 if (ret) 12213 return ret; 12214 } 12215 12216 buf = data; 12217 if (b_offset || odd_len) { 12218 buf = kmalloc(len, GFP_KERNEL); 12219 if (!buf) 12220 return -ENOMEM; 12221 if (b_offset) 12222 memcpy(buf, &start, 4); 12223 if (odd_len) 12224 memcpy(buf+len-4, &end, 4); 12225 memcpy(buf + b_offset, data, eeprom->len); 12226 } 12227 12228 ret = tg3_nvram_write_block(tp, offset, len, buf); 12229 12230 if (buf != data) 12231 kfree(buf); 12232 12233 return ret; 12234 } 12235 12236 static int tg3_get_link_ksettings(struct net_device *dev, 12237 struct ethtool_link_ksettings *cmd) 12238 { 12239 struct tg3 *tp = netdev_priv(dev); 12240 u32 supported, advertising; 12241 12242 if (tg3_flag(tp, USE_PHYLIB)) { 12243 struct phy_device *phydev; 12244 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12245 return -EAGAIN; 12246 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12247 phy_ethtool_ksettings_get(phydev, cmd); 12248 12249 return 0; 12250 } 12251 12252 supported = (SUPPORTED_Autoneg); 12253 12254 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12255 supported |= (SUPPORTED_1000baseT_Half | 12256 SUPPORTED_1000baseT_Full); 12257 12258 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12259 supported |= (SUPPORTED_100baseT_Half | 12260 SUPPORTED_100baseT_Full | 12261 SUPPORTED_10baseT_Half | 12262 SUPPORTED_10baseT_Full | 12263 SUPPORTED_TP); 12264 cmd->base.port = PORT_TP; 12265 } else { 12266 supported |= SUPPORTED_FIBRE; 12267 cmd->base.port = PORT_FIBRE; 12268 } 12269 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 12270 supported); 12271 12272 advertising = tp->link_config.advertising; 12273 if (tg3_flag(tp, PAUSE_AUTONEG)) { 12274 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { 12275 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12276 advertising |= ADVERTISED_Pause; 12277 } else { 12278 advertising |= ADVERTISED_Pause | 12279 ADVERTISED_Asym_Pause; 12280 } 12281 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { 12282 advertising |= ADVERTISED_Asym_Pause; 12283 } 12284 } 12285 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 12286 advertising); 12287 12288 if (netif_running(dev) && tp->link_up) { 12289 cmd->base.speed = tp->link_config.active_speed; 12290 cmd->base.duplex = tp->link_config.active_duplex; 12291 ethtool_convert_legacy_u32_to_link_mode( 12292 cmd->link_modes.lp_advertising, 12293 tp->link_config.rmt_adv); 12294 12295 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { 12296 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) 12297 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; 12298 else 12299 cmd->base.eth_tp_mdix = ETH_TP_MDI; 12300 } 12301 } else { 12302 cmd->base.speed = SPEED_UNKNOWN; 12303 cmd->base.duplex = DUPLEX_UNKNOWN; 12304 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; 12305 } 12306 cmd->base.phy_address = tp->phy_addr; 12307 cmd->base.autoneg = tp->link_config.autoneg; 12308 return 0; 12309 } 12310 12311 static int tg3_set_link_ksettings(struct net_device *dev, 12312 const struct ethtool_link_ksettings *cmd) 12313 { 12314 struct tg3 *tp = netdev_priv(dev); 12315 u32 speed = cmd->base.speed; 12316 u32 advertising; 12317 12318 if (tg3_flag(tp, USE_PHYLIB)) { 12319 struct phy_device *phydev; 12320 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12321 return -EAGAIN; 12322 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12323 return phy_ethtool_ksettings_set(phydev, cmd); 12324 } 12325 12326 if (cmd->base.autoneg != AUTONEG_ENABLE && 12327 cmd->base.autoneg != AUTONEG_DISABLE) 12328 return -EINVAL; 12329 12330 if (cmd->base.autoneg == AUTONEG_DISABLE && 12331 cmd->base.duplex != DUPLEX_FULL && 12332 cmd->base.duplex != DUPLEX_HALF) 12333 return -EINVAL; 12334 12335 ethtool_convert_link_mode_to_legacy_u32(&advertising, 12336 cmd->link_modes.advertising); 12337 12338 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12339 u32 mask = ADVERTISED_Autoneg | 12340 ADVERTISED_Pause | 12341 ADVERTISED_Asym_Pause; 12342 12343 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 12344 mask |= ADVERTISED_1000baseT_Half | 12345 ADVERTISED_1000baseT_Full; 12346 12347 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 12348 mask |= ADVERTISED_100baseT_Half | 12349 ADVERTISED_100baseT_Full | 12350 ADVERTISED_10baseT_Half | 12351 ADVERTISED_10baseT_Full | 12352 ADVERTISED_TP; 12353 else 12354 mask |= ADVERTISED_FIBRE; 12355 12356 if (advertising & ~mask) 12357 return -EINVAL; 12358 12359 mask &= (ADVERTISED_1000baseT_Half | 12360 ADVERTISED_1000baseT_Full | 12361 ADVERTISED_100baseT_Half | 12362 ADVERTISED_100baseT_Full | 12363 ADVERTISED_10baseT_Half | 12364 ADVERTISED_10baseT_Full); 12365 12366 advertising &= mask; 12367 } else { 12368 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 12369 if (speed != SPEED_1000) 12370 return -EINVAL; 12371 12372 if (cmd->base.duplex != DUPLEX_FULL) 12373 return -EINVAL; 12374 } else { 12375 if (speed != SPEED_100 && 12376 speed != SPEED_10) 12377 return -EINVAL; 12378 } 12379 } 12380 12381 tg3_full_lock(tp, 0); 12382 12383 tp->link_config.autoneg = cmd->base.autoneg; 12384 if (cmd->base.autoneg == AUTONEG_ENABLE) { 12385 tp->link_config.advertising = (advertising | 12386 ADVERTISED_Autoneg); 12387 tp->link_config.speed = SPEED_UNKNOWN; 12388 tp->link_config.duplex = DUPLEX_UNKNOWN; 12389 } else { 12390 tp->link_config.advertising = 0; 12391 tp->link_config.speed = speed; 12392 tp->link_config.duplex = cmd->base.duplex; 12393 } 12394 12395 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12396 12397 tg3_warn_mgmt_link_flap(tp); 12398 12399 if (netif_running(dev)) 12400 tg3_setup_phy(tp, true); 12401 12402 tg3_full_unlock(tp); 12403 12404 return 0; 12405 } 12406 12407 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 12408 { 12409 struct tg3 *tp = netdev_priv(dev); 12410 12411 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 12412 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); 12413 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); 12414 } 12415 12416 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12417 { 12418 struct tg3 *tp = netdev_priv(dev); 12419 12420 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) 12421 wol->supported = WAKE_MAGIC; 12422 else 12423 wol->supported = 0; 12424 wol->wolopts = 0; 12425 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) 12426 wol->wolopts = WAKE_MAGIC; 12427 memset(&wol->sopass, 0, sizeof(wol->sopass)); 12428 } 12429 12430 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 12431 { 12432 struct tg3 *tp = netdev_priv(dev); 12433 struct device *dp = &tp->pdev->dev; 12434 12435 if (wol->wolopts & ~WAKE_MAGIC) 12436 return -EINVAL; 12437 if ((wol->wolopts & WAKE_MAGIC) && 12438 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) 12439 return -EINVAL; 12440 12441 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 12442 12443 if (device_may_wakeup(dp)) 12444 tg3_flag_set(tp, WOL_ENABLE); 12445 else 12446 tg3_flag_clear(tp, WOL_ENABLE); 12447 12448 return 0; 12449 } 12450 12451 static u32 tg3_get_msglevel(struct net_device *dev) 12452 { 12453 struct tg3 *tp = netdev_priv(dev); 12454 return tp->msg_enable; 12455 } 12456 12457 static void tg3_set_msglevel(struct net_device *dev, u32 value) 12458 { 12459 struct tg3 *tp = netdev_priv(dev); 12460 tp->msg_enable = value; 12461 } 12462 12463 static int tg3_nway_reset(struct net_device *dev) 12464 { 12465 struct tg3 *tp = netdev_priv(dev); 12466 int r; 12467 12468 if (!netif_running(dev)) 12469 return -EAGAIN; 12470 12471 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 12472 return -EINVAL; 12473 12474 tg3_warn_mgmt_link_flap(tp); 12475 12476 if (tg3_flag(tp, USE_PHYLIB)) { 12477 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 12478 return -EAGAIN; 12479 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); 12480 } else { 12481 u32 bmcr; 12482 12483 spin_lock_bh(&tp->lock); 12484 r = -EINVAL; 12485 tg3_readphy(tp, MII_BMCR, &bmcr); 12486 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 12487 ((bmcr & BMCR_ANENABLE) || 12488 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { 12489 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 12490 BMCR_ANENABLE); 12491 r = 0; 12492 } 12493 spin_unlock_bh(&tp->lock); 12494 } 12495 12496 return r; 12497 } 12498 12499 static void tg3_get_ringparam(struct net_device *dev, 12500 struct ethtool_ringparam *ering, 12501 struct kernel_ethtool_ringparam *kernel_ering, 12502 struct netlink_ext_ack *extack) 12503 { 12504 struct tg3 *tp = netdev_priv(dev); 12505 12506 ering->rx_max_pending = tp->rx_std_ring_mask; 12507 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12508 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 12509 else 12510 ering->rx_jumbo_max_pending = 0; 12511 12512 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 12513 12514 ering->rx_pending = tp->rx_pending; 12515 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12516 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 12517 else 12518 ering->rx_jumbo_pending = 0; 12519 12520 ering->tx_pending = tp->napi[0].tx_pending; 12521 } 12522 12523 static int tg3_set_ringparam(struct net_device *dev, 12524 struct ethtool_ringparam *ering, 12525 struct kernel_ethtool_ringparam *kernel_ering, 12526 struct netlink_ext_ack *extack) 12527 { 12528 struct tg3 *tp = netdev_priv(dev); 12529 int i, irq_sync = 0, err = 0; 12530 bool reset_phy = false; 12531 12532 if ((ering->rx_pending > tp->rx_std_ring_mask) || 12533 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 12534 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 12535 (ering->tx_pending <= MAX_SKB_FRAGS) || 12536 (tg3_flag(tp, TSO_BUG) && 12537 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 12538 return -EINVAL; 12539 12540 if (netif_running(dev)) { 12541 tg3_phy_stop(tp); 12542 tg3_netif_stop(tp); 12543 irq_sync = 1; 12544 } 12545 12546 tg3_full_lock(tp, irq_sync); 12547 12548 tp->rx_pending = ering->rx_pending; 12549 12550 if (tg3_flag(tp, MAX_RXPEND_64) && 12551 tp->rx_pending > 63) 12552 tp->rx_pending = 63; 12553 12554 if (tg3_flag(tp, JUMBO_RING_ENABLE)) 12555 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12556 12557 for (i = 0; i < tp->irq_max; i++) 12558 tp->napi[i].tx_pending = ering->tx_pending; 12559 12560 if (netif_running(dev)) { 12561 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12562 /* Reset PHY to avoid PHY lock up */ 12563 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12564 tg3_asic_rev(tp) == ASIC_REV_5719 || 12565 tg3_asic_rev(tp) == ASIC_REV_5720) 12566 reset_phy = true; 12567 12568 err = tg3_restart_hw(tp, reset_phy); 12569 if (!err) 12570 tg3_netif_start(tp); 12571 } 12572 12573 tg3_full_unlock(tp); 12574 12575 if (irq_sync && !err) 12576 tg3_phy_start(tp); 12577 12578 return err; 12579 } 12580 12581 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12582 { 12583 struct tg3 *tp = netdev_priv(dev); 12584 12585 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); 12586 12587 if (tp->link_config.flowctrl & FLOW_CTRL_RX) 12588 epause->rx_pause = 1; 12589 else 12590 epause->rx_pause = 0; 12591 12592 if (tp->link_config.flowctrl & FLOW_CTRL_TX) 12593 epause->tx_pause = 1; 12594 else 12595 epause->tx_pause = 0; 12596 } 12597 12598 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 12599 { 12600 struct tg3 *tp = netdev_priv(dev); 12601 int err = 0; 12602 bool reset_phy = false; 12603 12604 if (tp->link_config.autoneg == AUTONEG_ENABLE) 12605 tg3_warn_mgmt_link_flap(tp); 12606 12607 if (tg3_flag(tp, USE_PHYLIB)) { 12608 struct phy_device *phydev; 12609 12610 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 12611 12612 if (!phy_validate_pause(phydev, epause)) 12613 return -EINVAL; 12614 12615 tp->link_config.flowctrl = 0; 12616 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); 12617 if (epause->rx_pause) { 12618 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12619 12620 if (epause->tx_pause) { 12621 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12622 } 12623 } else if (epause->tx_pause) { 12624 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12625 } 12626 12627 if (epause->autoneg) 12628 tg3_flag_set(tp, PAUSE_AUTONEG); 12629 else 12630 tg3_flag_clear(tp, PAUSE_AUTONEG); 12631 12632 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 12633 if (phydev->autoneg) { 12634 /* phy_set_asym_pause() will 12635 * renegotiate the link to inform our 12636 * link partner of our flow control 12637 * settings, even if the flow control 12638 * is forced. Let tg3_adjust_link() 12639 * do the final flow control setup. 12640 */ 12641 return 0; 12642 } 12643 12644 if (!epause->autoneg) 12645 tg3_setup_flow_control(tp, 0, 0); 12646 } 12647 } else { 12648 int irq_sync = 0; 12649 12650 if (netif_running(dev)) { 12651 tg3_netif_stop(tp); 12652 irq_sync = 1; 12653 } 12654 12655 tg3_full_lock(tp, irq_sync); 12656 12657 if (epause->autoneg) 12658 tg3_flag_set(tp, PAUSE_AUTONEG); 12659 else 12660 tg3_flag_clear(tp, PAUSE_AUTONEG); 12661 if (epause->rx_pause) 12662 tp->link_config.flowctrl |= FLOW_CTRL_RX; 12663 else 12664 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; 12665 if (epause->tx_pause) 12666 tp->link_config.flowctrl |= FLOW_CTRL_TX; 12667 else 12668 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; 12669 12670 if (netif_running(dev)) { 12671 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12672 /* Reset PHY to avoid PHY lock up */ 12673 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 12674 tg3_asic_rev(tp) == ASIC_REV_5719 || 12675 tg3_asic_rev(tp) == ASIC_REV_5720) 12676 reset_phy = true; 12677 12678 err = tg3_restart_hw(tp, reset_phy); 12679 if (!err) 12680 tg3_netif_start(tp); 12681 } 12682 12683 tg3_full_unlock(tp); 12684 } 12685 12686 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 12687 12688 return err; 12689 } 12690 12691 static int tg3_get_sset_count(struct net_device *dev, int sset) 12692 { 12693 switch (sset) { 12694 case ETH_SS_TEST: 12695 return TG3_NUM_TEST; 12696 case ETH_SS_STATS: 12697 return TG3_NUM_STATS; 12698 default: 12699 return -EOPNOTSUPP; 12700 } 12701 } 12702 12703 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 12704 u32 *rules __always_unused) 12705 { 12706 struct tg3 *tp = netdev_priv(dev); 12707 12708 if (!tg3_flag(tp, SUPPORT_MSIX)) 12709 return -EOPNOTSUPP; 12710 12711 switch (info->cmd) { 12712 case ETHTOOL_GRXRINGS: 12713 if (netif_running(tp->dev)) 12714 info->data = tp->rxq_cnt; 12715 else { 12716 info->data = num_online_cpus(); 12717 if (info->data > TG3_RSS_MAX_NUM_QS) 12718 info->data = TG3_RSS_MAX_NUM_QS; 12719 } 12720 12721 return 0; 12722 12723 default: 12724 return -EOPNOTSUPP; 12725 } 12726 } 12727 12728 static u32 tg3_get_rxfh_indir_size(struct net_device *dev) 12729 { 12730 u32 size = 0; 12731 struct tg3 *tp = netdev_priv(dev); 12732 12733 if (tg3_flag(tp, SUPPORT_MSIX)) 12734 size = TG3_RSS_INDIR_TBL_SIZE; 12735 12736 return size; 12737 } 12738 12739 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 12740 { 12741 struct tg3 *tp = netdev_priv(dev); 12742 int i; 12743 12744 if (hfunc) 12745 *hfunc = ETH_RSS_HASH_TOP; 12746 if (!indir) 12747 return 0; 12748 12749 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12750 indir[i] = tp->rss_ind_tbl[i]; 12751 12752 return 0; 12753 } 12754 12755 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 12756 const u8 hfunc) 12757 { 12758 struct tg3 *tp = netdev_priv(dev); 12759 size_t i; 12760 12761 /* We require at least one supported parameter to be changed and no 12762 * change in any of the unsupported parameters 12763 */ 12764 if (key || 12765 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 12766 return -EOPNOTSUPP; 12767 12768 if (!indir) 12769 return 0; 12770 12771 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 12772 tp->rss_ind_tbl[i] = indir[i]; 12773 12774 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) 12775 return 0; 12776 12777 /* It is legal to write the indirection 12778 * table while the device is running. 12779 */ 12780 tg3_full_lock(tp, 0); 12781 tg3_rss_write_indir_tbl(tp); 12782 tg3_full_unlock(tp); 12783 12784 return 0; 12785 } 12786 12787 static void tg3_get_channels(struct net_device *dev, 12788 struct ethtool_channels *channel) 12789 { 12790 struct tg3 *tp = netdev_priv(dev); 12791 u32 deflt_qs = netif_get_num_default_rss_queues(); 12792 12793 channel->max_rx = tp->rxq_max; 12794 channel->max_tx = tp->txq_max; 12795 12796 if (netif_running(dev)) { 12797 channel->rx_count = tp->rxq_cnt; 12798 channel->tx_count = tp->txq_cnt; 12799 } else { 12800 if (tp->rxq_req) 12801 channel->rx_count = tp->rxq_req; 12802 else 12803 channel->rx_count = min(deflt_qs, tp->rxq_max); 12804 12805 if (tp->txq_req) 12806 channel->tx_count = tp->txq_req; 12807 else 12808 channel->tx_count = min(deflt_qs, tp->txq_max); 12809 } 12810 } 12811 12812 static int tg3_set_channels(struct net_device *dev, 12813 struct ethtool_channels *channel) 12814 { 12815 struct tg3 *tp = netdev_priv(dev); 12816 12817 if (!tg3_flag(tp, SUPPORT_MSIX)) 12818 return -EOPNOTSUPP; 12819 12820 if (channel->rx_count > tp->rxq_max || 12821 channel->tx_count > tp->txq_max) 12822 return -EINVAL; 12823 12824 tp->rxq_req = channel->rx_count; 12825 tp->txq_req = channel->tx_count; 12826 12827 if (!netif_running(dev)) 12828 return 0; 12829 12830 tg3_stop(tp); 12831 12832 tg3_carrier_off(tp); 12833 12834 tg3_start(tp, true, false, false); 12835 12836 return 0; 12837 } 12838 12839 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 12840 { 12841 switch (stringset) { 12842 case ETH_SS_STATS: 12843 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 12844 break; 12845 case ETH_SS_TEST: 12846 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); 12847 break; 12848 default: 12849 WARN_ON(1); /* we need a WARN() */ 12850 break; 12851 } 12852 } 12853 12854 static int tg3_set_phys_id(struct net_device *dev, 12855 enum ethtool_phys_id_state state) 12856 { 12857 struct tg3 *tp = netdev_priv(dev); 12858 12859 switch (state) { 12860 case ETHTOOL_ID_ACTIVE: 12861 return 1; /* cycle on/off once per second */ 12862 12863 case ETHTOOL_ID_ON: 12864 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12865 LED_CTRL_1000MBPS_ON | 12866 LED_CTRL_100MBPS_ON | 12867 LED_CTRL_10MBPS_ON | 12868 LED_CTRL_TRAFFIC_OVERRIDE | 12869 LED_CTRL_TRAFFIC_BLINK | 12870 LED_CTRL_TRAFFIC_LED); 12871 break; 12872 12873 case ETHTOOL_ID_OFF: 12874 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 12875 LED_CTRL_TRAFFIC_OVERRIDE); 12876 break; 12877 12878 case ETHTOOL_ID_INACTIVE: 12879 tw32(MAC_LED_CTRL, tp->led_ctrl); 12880 break; 12881 } 12882 12883 return 0; 12884 } 12885 12886 static void tg3_get_ethtool_stats(struct net_device *dev, 12887 struct ethtool_stats *estats, u64 *tmp_stats) 12888 { 12889 struct tg3 *tp = netdev_priv(dev); 12890 12891 if (tp->hw_stats) 12892 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); 12893 else 12894 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); 12895 } 12896 12897 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen) 12898 { 12899 int i; 12900 __be32 *buf; 12901 u32 offset = 0, len = 0; 12902 u32 magic, val; 12903 12904 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) 12905 return NULL; 12906 12907 if (magic == TG3_EEPROM_MAGIC) { 12908 for (offset = TG3_NVM_DIR_START; 12909 offset < TG3_NVM_DIR_END; 12910 offset += TG3_NVM_DIRENT_SIZE) { 12911 if (tg3_nvram_read(tp, offset, &val)) 12912 return NULL; 12913 12914 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == 12915 TG3_NVM_DIRTYPE_EXTVPD) 12916 break; 12917 } 12918 12919 if (offset != TG3_NVM_DIR_END) { 12920 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; 12921 if (tg3_nvram_read(tp, offset + 4, &offset)) 12922 return NULL; 12923 12924 offset = tg3_nvram_logical_addr(tp, offset); 12925 } 12926 12927 if (!offset || !len) { 12928 offset = TG3_NVM_VPD_OFF; 12929 len = TG3_NVM_VPD_LEN; 12930 } 12931 12932 buf = kmalloc(len, GFP_KERNEL); 12933 if (!buf) 12934 return NULL; 12935 12936 for (i = 0; i < len; i += 4) { 12937 /* The data is in little-endian format in NVRAM. 12938 * Use the big-endian read routines to preserve 12939 * the byte order as it exists in NVRAM. 12940 */ 12941 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) 12942 goto error; 12943 } 12944 *vpdlen = len; 12945 } else { 12946 buf = pci_vpd_alloc(tp->pdev, vpdlen); 12947 if (IS_ERR(buf)) 12948 return NULL; 12949 } 12950 12951 return buf; 12952 12953 error: 12954 kfree(buf); 12955 return NULL; 12956 } 12957 12958 #define NVRAM_TEST_SIZE 0x100 12959 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 12960 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 12961 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c 12962 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 12963 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 12964 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 12965 #define NVRAM_SELFBOOT_HW_SIZE 0x20 12966 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c 12967 12968 static int tg3_test_nvram(struct tg3 *tp) 12969 { 12970 u32 csum, magic; 12971 __be32 *buf; 12972 int i, j, k, err = 0, size; 12973 unsigned int len; 12974 12975 if (tg3_flag(tp, NO_NVRAM)) 12976 return 0; 12977 12978 if (tg3_nvram_read(tp, 0, &magic) != 0) 12979 return -EIO; 12980 12981 if (magic == TG3_EEPROM_MAGIC) 12982 size = NVRAM_TEST_SIZE; 12983 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { 12984 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == 12985 TG3_EEPROM_SB_FORMAT_1) { 12986 switch (magic & TG3_EEPROM_SB_REVISION_MASK) { 12987 case TG3_EEPROM_SB_REVISION_0: 12988 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; 12989 break; 12990 case TG3_EEPROM_SB_REVISION_2: 12991 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; 12992 break; 12993 case TG3_EEPROM_SB_REVISION_3: 12994 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; 12995 break; 12996 case TG3_EEPROM_SB_REVISION_4: 12997 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; 12998 break; 12999 case TG3_EEPROM_SB_REVISION_5: 13000 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; 13001 break; 13002 case TG3_EEPROM_SB_REVISION_6: 13003 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; 13004 break; 13005 default: 13006 return -EIO; 13007 } 13008 } else 13009 return 0; 13010 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 13011 size = NVRAM_SELFBOOT_HW_SIZE; 13012 else 13013 return -EIO; 13014 13015 buf = kmalloc(size, GFP_KERNEL); 13016 if (buf == NULL) 13017 return -ENOMEM; 13018 13019 err = -EIO; 13020 for (i = 0, j = 0; i < size; i += 4, j++) { 13021 err = tg3_nvram_read_be32(tp, i, &buf[j]); 13022 if (err) 13023 break; 13024 } 13025 if (i < size) 13026 goto out; 13027 13028 /* Selfboot format */ 13029 magic = be32_to_cpu(buf[0]); 13030 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == 13031 TG3_EEPROM_MAGIC_FW) { 13032 u8 *buf8 = (u8 *) buf, csum8 = 0; 13033 13034 if ((magic & TG3_EEPROM_SB_REVISION_MASK) == 13035 TG3_EEPROM_SB_REVISION_2) { 13036 /* For rev 2, the csum doesn't include the MBA. */ 13037 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) 13038 csum8 += buf8[i]; 13039 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) 13040 csum8 += buf8[i]; 13041 } else { 13042 for (i = 0; i < size; i++) 13043 csum8 += buf8[i]; 13044 } 13045 13046 if (csum8 == 0) { 13047 err = 0; 13048 goto out; 13049 } 13050 13051 err = -EIO; 13052 goto out; 13053 } 13054 13055 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == 13056 TG3_EEPROM_MAGIC_HW) { 13057 u8 data[NVRAM_SELFBOOT_DATA_SIZE]; 13058 u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; 13059 u8 *buf8 = (u8 *) buf; 13060 13061 /* Separate the parity bits and the data bytes. */ 13062 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { 13063 if ((i == 0) || (i == 8)) { 13064 int l; 13065 u8 msk; 13066 13067 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 13068 parity[k++] = buf8[i] & msk; 13069 i++; 13070 } else if (i == 16) { 13071 int l; 13072 u8 msk; 13073 13074 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) 13075 parity[k++] = buf8[i] & msk; 13076 i++; 13077 13078 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) 13079 parity[k++] = buf8[i] & msk; 13080 i++; 13081 } 13082 data[j++] = buf8[i]; 13083 } 13084 13085 err = -EIO; 13086 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { 13087 u8 hw8 = hweight8(data[i]); 13088 13089 if ((hw8 & 0x1) && parity[i]) 13090 goto out; 13091 else if (!(hw8 & 0x1) && !parity[i]) 13092 goto out; 13093 } 13094 err = 0; 13095 goto out; 13096 } 13097 13098 err = -EIO; 13099 13100 /* Bootstrap checksum at offset 0x10 */ 13101 csum = calc_crc((unsigned char *) buf, 0x10); 13102 if (csum != le32_to_cpu(buf[0x10/4])) 13103 goto out; 13104 13105 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 13106 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 13107 if (csum != le32_to_cpu(buf[0xfc/4])) 13108 goto out; 13109 13110 kfree(buf); 13111 13112 buf = tg3_vpd_readblock(tp, &len); 13113 if (!buf) 13114 return -ENOMEM; 13115 13116 err = pci_vpd_check_csum(buf, len); 13117 /* go on if no checksum found */ 13118 if (err == 1) 13119 err = 0; 13120 out: 13121 kfree(buf); 13122 return err; 13123 } 13124 13125 #define TG3_SERDES_TIMEOUT_SEC 2 13126 #define TG3_COPPER_TIMEOUT_SEC 6 13127 13128 static int tg3_test_link(struct tg3 *tp) 13129 { 13130 int i, max; 13131 13132 if (!netif_running(tp->dev)) 13133 return -ENODEV; 13134 13135 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 13136 max = TG3_SERDES_TIMEOUT_SEC; 13137 else 13138 max = TG3_COPPER_TIMEOUT_SEC; 13139 13140 for (i = 0; i < max; i++) { 13141 if (tp->link_up) 13142 return 0; 13143 13144 if (msleep_interruptible(1000)) 13145 break; 13146 } 13147 13148 return -EIO; 13149 } 13150 13151 /* Only test the commonly used registers */ 13152 static int tg3_test_registers(struct tg3 *tp) 13153 { 13154 int i, is_5705, is_5750; 13155 u32 offset, read_mask, write_mask, val, save_val, read_val; 13156 static struct { 13157 u16 offset; 13158 u16 flags; 13159 #define TG3_FL_5705 0x1 13160 #define TG3_FL_NOT_5705 0x2 13161 #define TG3_FL_NOT_5788 0x4 13162 #define TG3_FL_NOT_5750 0x8 13163 u32 read_mask; 13164 u32 write_mask; 13165 } reg_tbl[] = { 13166 /* MAC Control Registers */ 13167 { MAC_MODE, TG3_FL_NOT_5705, 13168 0x00000000, 0x00ef6f8c }, 13169 { MAC_MODE, TG3_FL_5705, 13170 0x00000000, 0x01ef6b8c }, 13171 { MAC_STATUS, TG3_FL_NOT_5705, 13172 0x03800107, 0x00000000 }, 13173 { MAC_STATUS, TG3_FL_5705, 13174 0x03800100, 0x00000000 }, 13175 { MAC_ADDR_0_HIGH, 0x0000, 13176 0x00000000, 0x0000ffff }, 13177 { MAC_ADDR_0_LOW, 0x0000, 13178 0x00000000, 0xffffffff }, 13179 { MAC_RX_MTU_SIZE, 0x0000, 13180 0x00000000, 0x0000ffff }, 13181 { MAC_TX_MODE, 0x0000, 13182 0x00000000, 0x00000070 }, 13183 { MAC_TX_LENGTHS, 0x0000, 13184 0x00000000, 0x00003fff }, 13185 { MAC_RX_MODE, TG3_FL_NOT_5705, 13186 0x00000000, 0x000007fc }, 13187 { MAC_RX_MODE, TG3_FL_5705, 13188 0x00000000, 0x000007dc }, 13189 { MAC_HASH_REG_0, 0x0000, 13190 0x00000000, 0xffffffff }, 13191 { MAC_HASH_REG_1, 0x0000, 13192 0x00000000, 0xffffffff }, 13193 { MAC_HASH_REG_2, 0x0000, 13194 0x00000000, 0xffffffff }, 13195 { MAC_HASH_REG_3, 0x0000, 13196 0x00000000, 0xffffffff }, 13197 13198 /* Receive Data and Receive BD Initiator Control Registers. */ 13199 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 13200 0x00000000, 0xffffffff }, 13201 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 13202 0x00000000, 0xffffffff }, 13203 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 13204 0x00000000, 0x00000003 }, 13205 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 13206 0x00000000, 0xffffffff }, 13207 { RCVDBDI_STD_BD+0, 0x0000, 13208 0x00000000, 0xffffffff }, 13209 { RCVDBDI_STD_BD+4, 0x0000, 13210 0x00000000, 0xffffffff }, 13211 { RCVDBDI_STD_BD+8, 0x0000, 13212 0x00000000, 0xffff0002 }, 13213 { RCVDBDI_STD_BD+0xc, 0x0000, 13214 0x00000000, 0xffffffff }, 13215 13216 /* Receive BD Initiator Control Registers. */ 13217 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 13218 0x00000000, 0xffffffff }, 13219 { RCVBDI_STD_THRESH, TG3_FL_5705, 13220 0x00000000, 0x000003ff }, 13221 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 13222 0x00000000, 0xffffffff }, 13223 13224 /* Host Coalescing Control Registers. */ 13225 { HOSTCC_MODE, TG3_FL_NOT_5705, 13226 0x00000000, 0x00000004 }, 13227 { HOSTCC_MODE, TG3_FL_5705, 13228 0x00000000, 0x000000f6 }, 13229 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 13230 0x00000000, 0xffffffff }, 13231 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 13232 0x00000000, 0x000003ff }, 13233 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 13234 0x00000000, 0xffffffff }, 13235 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 13236 0x00000000, 0x000003ff }, 13237 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 13238 0x00000000, 0xffffffff }, 13239 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13240 0x00000000, 0x000000ff }, 13241 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 13242 0x00000000, 0xffffffff }, 13243 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 13244 0x00000000, 0x000000ff }, 13245 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 13246 0x00000000, 0xffffffff }, 13247 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 13248 0x00000000, 0xffffffff }, 13249 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13250 0x00000000, 0xffffffff }, 13251 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13252 0x00000000, 0x000000ff }, 13253 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 13254 0x00000000, 0xffffffff }, 13255 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 13256 0x00000000, 0x000000ff }, 13257 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 13258 0x00000000, 0xffffffff }, 13259 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 13260 0x00000000, 0xffffffff }, 13261 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 13262 0x00000000, 0xffffffff }, 13263 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 13264 0x00000000, 0xffffffff }, 13265 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 13266 0x00000000, 0xffffffff }, 13267 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 13268 0xffffffff, 0x00000000 }, 13269 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 13270 0xffffffff, 0x00000000 }, 13271 13272 /* Buffer Manager Control Registers. */ 13273 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 13274 0x00000000, 0x007fff80 }, 13275 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 13276 0x00000000, 0x007fffff }, 13277 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 13278 0x00000000, 0x0000003f }, 13279 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 13280 0x00000000, 0x000001ff }, 13281 { BUFMGR_MB_HIGH_WATER, 0x0000, 13282 0x00000000, 0x000001ff }, 13283 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 13284 0xffffffff, 0x00000000 }, 13285 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 13286 0xffffffff, 0x00000000 }, 13287 13288 /* Mailbox Registers */ 13289 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 13290 0x00000000, 0x000001ff }, 13291 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 13292 0x00000000, 0x000001ff }, 13293 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 13294 0x00000000, 0x000007ff }, 13295 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 13296 0x00000000, 0x000001ff }, 13297 13298 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 13299 }; 13300 13301 is_5705 = is_5750 = 0; 13302 if (tg3_flag(tp, 5705_PLUS)) { 13303 is_5705 = 1; 13304 if (tg3_flag(tp, 5750_PLUS)) 13305 is_5750 = 1; 13306 } 13307 13308 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 13309 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 13310 continue; 13311 13312 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 13313 continue; 13314 13315 if (tg3_flag(tp, IS_5788) && 13316 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 13317 continue; 13318 13319 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) 13320 continue; 13321 13322 offset = (u32) reg_tbl[i].offset; 13323 read_mask = reg_tbl[i].read_mask; 13324 write_mask = reg_tbl[i].write_mask; 13325 13326 /* Save the original register content */ 13327 save_val = tr32(offset); 13328 13329 /* Determine the read-only value. */ 13330 read_val = save_val & read_mask; 13331 13332 /* Write zero to the register, then make sure the read-only bits 13333 * are not changed and the read/write bits are all zeros. 13334 */ 13335 tw32(offset, 0); 13336 13337 val = tr32(offset); 13338 13339 /* Test the read-only and read/write bits. */ 13340 if (((val & read_mask) != read_val) || (val & write_mask)) 13341 goto out; 13342 13343 /* Write ones to all the bits defined by RdMask and WrMask, then 13344 * make sure the read-only bits are not changed and the 13345 * read/write bits are all ones. 13346 */ 13347 tw32(offset, read_mask | write_mask); 13348 13349 val = tr32(offset); 13350 13351 /* Test the read-only bits. */ 13352 if ((val & read_mask) != read_val) 13353 goto out; 13354 13355 /* Test the read/write bits. */ 13356 if ((val & write_mask) != write_mask) 13357 goto out; 13358 13359 tw32(offset, save_val); 13360 } 13361 13362 return 0; 13363 13364 out: 13365 if (netif_msg_hw(tp)) 13366 netdev_err(tp->dev, 13367 "Register test failed at offset %x\n", offset); 13368 tw32(offset, save_val); 13369 return -EIO; 13370 } 13371 13372 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 13373 { 13374 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 13375 int i; 13376 u32 j; 13377 13378 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { 13379 for (j = 0; j < len; j += 4) { 13380 u32 val; 13381 13382 tg3_write_mem(tp, offset + j, test_pattern[i]); 13383 tg3_read_mem(tp, offset + j, &val); 13384 if (val != test_pattern[i]) 13385 return -EIO; 13386 } 13387 } 13388 return 0; 13389 } 13390 13391 static int tg3_test_memory(struct tg3 *tp) 13392 { 13393 static struct mem_entry { 13394 u32 offset; 13395 u32 len; 13396 } mem_tbl_570x[] = { 13397 { 0x00000000, 0x00b50}, 13398 { 0x00002000, 0x1c000}, 13399 { 0xffffffff, 0x00000} 13400 }, mem_tbl_5705[] = { 13401 { 0x00000100, 0x0000c}, 13402 { 0x00000200, 0x00008}, 13403 { 0x00004000, 0x00800}, 13404 { 0x00006000, 0x01000}, 13405 { 0x00008000, 0x02000}, 13406 { 0x00010000, 0x0e000}, 13407 { 0xffffffff, 0x00000} 13408 }, mem_tbl_5755[] = { 13409 { 0x00000200, 0x00008}, 13410 { 0x00004000, 0x00800}, 13411 { 0x00006000, 0x00800}, 13412 { 0x00008000, 0x02000}, 13413 { 0x00010000, 0x0c000}, 13414 { 0xffffffff, 0x00000} 13415 }, mem_tbl_5906[] = { 13416 { 0x00000200, 0x00008}, 13417 { 0x00004000, 0x00400}, 13418 { 0x00006000, 0x00400}, 13419 { 0x00008000, 0x01000}, 13420 { 0x00010000, 0x01000}, 13421 { 0xffffffff, 0x00000} 13422 }, mem_tbl_5717[] = { 13423 { 0x00000200, 0x00008}, 13424 { 0x00010000, 0x0a000}, 13425 { 0x00020000, 0x13c00}, 13426 { 0xffffffff, 0x00000} 13427 }, mem_tbl_57765[] = { 13428 { 0x00000200, 0x00008}, 13429 { 0x00004000, 0x00800}, 13430 { 0x00006000, 0x09800}, 13431 { 0x00010000, 0x0a000}, 13432 { 0xffffffff, 0x00000} 13433 }; 13434 struct mem_entry *mem_tbl; 13435 int err = 0; 13436 int i; 13437 13438 if (tg3_flag(tp, 5717_PLUS)) 13439 mem_tbl = mem_tbl_5717; 13440 else if (tg3_flag(tp, 57765_CLASS) || 13441 tg3_asic_rev(tp) == ASIC_REV_5762) 13442 mem_tbl = mem_tbl_57765; 13443 else if (tg3_flag(tp, 5755_PLUS)) 13444 mem_tbl = mem_tbl_5755; 13445 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 13446 mem_tbl = mem_tbl_5906; 13447 else if (tg3_flag(tp, 5705_PLUS)) 13448 mem_tbl = mem_tbl_5705; 13449 else 13450 mem_tbl = mem_tbl_570x; 13451 13452 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 13453 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); 13454 if (err) 13455 break; 13456 } 13457 13458 return err; 13459 } 13460 13461 #define TG3_TSO_MSS 500 13462 13463 #define TG3_TSO_IP_HDR_LEN 20 13464 #define TG3_TSO_TCP_HDR_LEN 20 13465 #define TG3_TSO_TCP_OPT_LEN 12 13466 13467 static const u8 tg3_tso_header[] = { 13468 0x08, 0x00, 13469 0x45, 0x00, 0x00, 0x00, 13470 0x00, 0x00, 0x40, 0x00, 13471 0x40, 0x06, 0x00, 0x00, 13472 0x0a, 0x00, 0x00, 0x01, 13473 0x0a, 0x00, 0x00, 0x02, 13474 0x0d, 0x00, 0xe0, 0x00, 13475 0x00, 0x00, 0x01, 0x00, 13476 0x00, 0x00, 0x02, 0x00, 13477 0x80, 0x10, 0x10, 0x00, 13478 0x14, 0x09, 0x00, 0x00, 13479 0x01, 0x01, 0x08, 0x0a, 13480 0x11, 0x11, 0x11, 0x11, 13481 0x11, 0x11, 0x11, 0x11, 13482 }; 13483 13484 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) 13485 { 13486 u32 rx_start_idx, rx_idx, tx_idx, opaque_key; 13487 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 13488 u32 budget; 13489 struct sk_buff *skb; 13490 u8 *tx_data, *rx_data; 13491 dma_addr_t map; 13492 int num_pkts, tx_len, rx_len, i, err; 13493 struct tg3_rx_buffer_desc *desc; 13494 struct tg3_napi *tnapi, *rnapi; 13495 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; 13496 13497 tnapi = &tp->napi[0]; 13498 rnapi = &tp->napi[0]; 13499 if (tp->irq_cnt > 1) { 13500 if (tg3_flag(tp, ENABLE_RSS)) 13501 rnapi = &tp->napi[1]; 13502 if (tg3_flag(tp, ENABLE_TSS)) 13503 tnapi = &tp->napi[1]; 13504 } 13505 coal_now = tnapi->coal_now | rnapi->coal_now; 13506 13507 err = -EIO; 13508 13509 tx_len = pktsz; 13510 skb = netdev_alloc_skb(tp->dev, tx_len); 13511 if (!skb) 13512 return -ENOMEM; 13513 13514 tx_data = skb_put(skb, tx_len); 13515 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); 13516 memset(tx_data + ETH_ALEN, 0x0, 8); 13517 13518 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); 13519 13520 if (tso_loopback) { 13521 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; 13522 13523 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + 13524 TG3_TSO_TCP_OPT_LEN; 13525 13526 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, 13527 sizeof(tg3_tso_header)); 13528 mss = TG3_TSO_MSS; 13529 13530 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); 13531 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); 13532 13533 /* Set the total length field in the IP header */ 13534 iph->tot_len = htons((u16)(mss + hdr_len)); 13535 13536 base_flags = (TXD_FLAG_CPU_PRE_DMA | 13537 TXD_FLAG_CPU_POST_DMA); 13538 13539 if (tg3_flag(tp, HW_TSO_1) || 13540 tg3_flag(tp, HW_TSO_2) || 13541 tg3_flag(tp, HW_TSO_3)) { 13542 struct tcphdr *th; 13543 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; 13544 th = (struct tcphdr *)&tx_data[val]; 13545 th->check = 0; 13546 } else 13547 base_flags |= TXD_FLAG_TCPUDP_CSUM; 13548 13549 if (tg3_flag(tp, HW_TSO_3)) { 13550 mss |= (hdr_len & 0xc) << 12; 13551 if (hdr_len & 0x10) 13552 base_flags |= 0x00000010; 13553 base_flags |= (hdr_len & 0x3e0) << 5; 13554 } else if (tg3_flag(tp, HW_TSO_2)) 13555 mss |= hdr_len << 9; 13556 else if (tg3_flag(tp, HW_TSO_1) || 13557 tg3_asic_rev(tp) == ASIC_REV_5705) { 13558 mss |= (TG3_TSO_TCP_OPT_LEN << 9); 13559 } else { 13560 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); 13561 } 13562 13563 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); 13564 } else { 13565 num_pkts = 1; 13566 data_off = ETH_HLEN; 13567 13568 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 13569 tx_len > VLAN_ETH_FRAME_LEN) 13570 base_flags |= TXD_FLAG_JMB_PKT; 13571 } 13572 13573 for (i = data_off; i < tx_len; i++) 13574 tx_data[i] = (u8) (i & 0xff); 13575 13576 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); 13577 if (dma_mapping_error(&tp->pdev->dev, map)) { 13578 dev_kfree_skb(skb); 13579 return -EIO; 13580 } 13581 13582 val = tnapi->tx_prod; 13583 tnapi->tx_buffers[val].skb = skb; 13584 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); 13585 13586 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13587 rnapi->coal_now); 13588 13589 udelay(10); 13590 13591 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 13592 13593 budget = tg3_tx_avail(tnapi); 13594 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, 13595 base_flags | TXD_FLAG_END, mss, 0)) { 13596 tnapi->tx_buffers[val].skb = NULL; 13597 dev_kfree_skb(skb); 13598 return -EIO; 13599 } 13600 13601 tnapi->tx_prod++; 13602 13603 /* Sync BD data before updating mailbox */ 13604 wmb(); 13605 13606 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 13607 tr32_mailbox(tnapi->prodmbox); 13608 13609 udelay(10); 13610 13611 /* 350 usec to allow enough time on some 10/100 Mbps devices. */ 13612 for (i = 0; i < 35; i++) { 13613 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 13614 coal_now); 13615 13616 udelay(10); 13617 13618 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 13619 rx_idx = rnapi->hw_status->idx[0].rx_producer; 13620 if ((tx_idx == tnapi->tx_prod) && 13621 (rx_idx == (rx_start_idx + num_pkts))) 13622 break; 13623 } 13624 13625 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 13626 dev_kfree_skb(skb); 13627 13628 if (tx_idx != tnapi->tx_prod) 13629 goto out; 13630 13631 if (rx_idx != rx_start_idx + num_pkts) 13632 goto out; 13633 13634 val = data_off; 13635 while (rx_idx != rx_start_idx) { 13636 desc = &rnapi->rx_rcb[rx_start_idx++]; 13637 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 13638 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 13639 13640 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 13641 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 13642 goto out; 13643 13644 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) 13645 - ETH_FCS_LEN; 13646 13647 if (!tso_loopback) { 13648 if (rx_len != tx_len) 13649 goto out; 13650 13651 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { 13652 if (opaque_key != RXD_OPAQUE_RING_STD) 13653 goto out; 13654 } else { 13655 if (opaque_key != RXD_OPAQUE_RING_JUMBO) 13656 goto out; 13657 } 13658 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 13659 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 13660 >> RXD_TCPCSUM_SHIFT != 0xffff) { 13661 goto out; 13662 } 13663 13664 if (opaque_key == RXD_OPAQUE_RING_STD) { 13665 rx_data = tpr->rx_std_buffers[desc_idx].data; 13666 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], 13667 mapping); 13668 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 13669 rx_data = tpr->rx_jmb_buffers[desc_idx].data; 13670 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], 13671 mapping); 13672 } else 13673 goto out; 13674 13675 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, 13676 DMA_FROM_DEVICE); 13677 13678 rx_data += TG3_RX_OFFSET(tp); 13679 for (i = data_off; i < rx_len; i++, val++) { 13680 if (*(rx_data + i) != (u8) (val & 0xff)) 13681 goto out; 13682 } 13683 } 13684 13685 err = 0; 13686 13687 /* tg3_free_rings will unmap and free the rx_data */ 13688 out: 13689 return err; 13690 } 13691 13692 #define TG3_STD_LOOPBACK_FAILED 1 13693 #define TG3_JMB_LOOPBACK_FAILED 2 13694 #define TG3_TSO_LOOPBACK_FAILED 4 13695 #define TG3_LOOPBACK_FAILED \ 13696 (TG3_STD_LOOPBACK_FAILED | \ 13697 TG3_JMB_LOOPBACK_FAILED | \ 13698 TG3_TSO_LOOPBACK_FAILED) 13699 13700 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) 13701 { 13702 int err = -EIO; 13703 u32 eee_cap; 13704 u32 jmb_pkt_sz = 9000; 13705 13706 if (tp->dma_limit) 13707 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; 13708 13709 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; 13710 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 13711 13712 if (!netif_running(tp->dev)) { 13713 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13714 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13715 if (do_extlpbk) 13716 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13717 goto done; 13718 } 13719 13720 err = tg3_reset_hw(tp, true); 13721 if (err) { 13722 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13723 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13724 if (do_extlpbk) 13725 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13726 goto done; 13727 } 13728 13729 if (tg3_flag(tp, ENABLE_RSS)) { 13730 int i; 13731 13732 /* Reroute all rx packets to the 1st queue */ 13733 for (i = MAC_RSS_INDIR_TBL_0; 13734 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) 13735 tw32(i, 0x0); 13736 } 13737 13738 /* HW errata - mac loopback fails in some cases on 5780. 13739 * Normal traffic and PHY loopback are not affected by 13740 * errata. Also, the MAC loopback test is deprecated for 13741 * all newer ASIC revisions. 13742 */ 13743 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 13744 !tg3_flag(tp, CPMU_PRESENT)) { 13745 tg3_mac_loopback(tp, true); 13746 13747 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13748 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13749 13750 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13751 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13752 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13753 13754 tg3_mac_loopback(tp, false); 13755 } 13756 13757 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 13758 !tg3_flag(tp, USE_PHYLIB)) { 13759 int i; 13760 13761 tg3_phy_lpbk_set(tp, 0, false); 13762 13763 /* Wait for link */ 13764 for (i = 0; i < 100; i++) { 13765 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 13766 break; 13767 mdelay(1); 13768 } 13769 13770 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13771 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; 13772 if (tg3_flag(tp, TSO_CAPABLE) && 13773 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13774 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; 13775 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13776 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13777 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; 13778 13779 if (do_extlpbk) { 13780 tg3_phy_lpbk_set(tp, 0, true); 13781 13782 /* All link indications report up, but the hardware 13783 * isn't really ready for about 20 msec. Double it 13784 * to be sure. 13785 */ 13786 mdelay(40); 13787 13788 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 13789 data[TG3_EXT_LOOPB_TEST] |= 13790 TG3_STD_LOOPBACK_FAILED; 13791 if (tg3_flag(tp, TSO_CAPABLE) && 13792 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 13793 data[TG3_EXT_LOOPB_TEST] |= 13794 TG3_TSO_LOOPBACK_FAILED; 13795 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 13796 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 13797 data[TG3_EXT_LOOPB_TEST] |= 13798 TG3_JMB_LOOPBACK_FAILED; 13799 } 13800 13801 /* Re-enable gphy autopowerdown. */ 13802 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 13803 tg3_phy_toggle_apd(tp, true); 13804 } 13805 13806 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | 13807 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; 13808 13809 done: 13810 tp->phy_flags |= eee_cap; 13811 13812 return err; 13813 } 13814 13815 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 13816 u64 *data) 13817 { 13818 struct tg3 *tp = netdev_priv(dev); 13819 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; 13820 13821 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 13822 if (tg3_power_up(tp)) { 13823 etest->flags |= ETH_TEST_FL_FAILED; 13824 memset(data, 1, sizeof(u64) * TG3_NUM_TEST); 13825 return; 13826 } 13827 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 13828 } 13829 13830 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 13831 13832 if (tg3_test_nvram(tp) != 0) { 13833 etest->flags |= ETH_TEST_FL_FAILED; 13834 data[TG3_NVRAM_TEST] = 1; 13835 } 13836 if (!doextlpbk && tg3_test_link(tp)) { 13837 etest->flags |= ETH_TEST_FL_FAILED; 13838 data[TG3_LINK_TEST] = 1; 13839 } 13840 if (etest->flags & ETH_TEST_FL_OFFLINE) { 13841 int err, err2 = 0, irq_sync = 0; 13842 13843 if (netif_running(dev)) { 13844 tg3_phy_stop(tp); 13845 tg3_netif_stop(tp); 13846 irq_sync = 1; 13847 } 13848 13849 tg3_full_lock(tp, irq_sync); 13850 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 13851 err = tg3_nvram_lock(tp); 13852 tg3_halt_cpu(tp, RX_CPU_BASE); 13853 if (!tg3_flag(tp, 5705_PLUS)) 13854 tg3_halt_cpu(tp, TX_CPU_BASE); 13855 if (!err) 13856 tg3_nvram_unlock(tp); 13857 13858 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 13859 tg3_phy_reset(tp); 13860 13861 if (tg3_test_registers(tp) != 0) { 13862 etest->flags |= ETH_TEST_FL_FAILED; 13863 data[TG3_REGISTER_TEST] = 1; 13864 } 13865 13866 if (tg3_test_memory(tp) != 0) { 13867 etest->flags |= ETH_TEST_FL_FAILED; 13868 data[TG3_MEMORY_TEST] = 1; 13869 } 13870 13871 if (doextlpbk) 13872 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 13873 13874 if (tg3_test_loopback(tp, data, doextlpbk)) 13875 etest->flags |= ETH_TEST_FL_FAILED; 13876 13877 tg3_full_unlock(tp); 13878 13879 if (tg3_test_interrupt(tp) != 0) { 13880 etest->flags |= ETH_TEST_FL_FAILED; 13881 data[TG3_INTERRUPT_TEST] = 1; 13882 } 13883 13884 tg3_full_lock(tp, 0); 13885 13886 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13887 if (netif_running(dev)) { 13888 tg3_flag_set(tp, INIT_COMPLETE); 13889 err2 = tg3_restart_hw(tp, true); 13890 if (!err2) 13891 tg3_netif_start(tp); 13892 } 13893 13894 tg3_full_unlock(tp); 13895 13896 if (irq_sync && !err2) 13897 tg3_phy_start(tp); 13898 } 13899 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 13900 tg3_power_down_prepare(tp); 13901 13902 } 13903 13904 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 13905 { 13906 struct tg3 *tp = netdev_priv(dev); 13907 struct hwtstamp_config stmpconf; 13908 13909 if (!tg3_flag(tp, PTP_CAPABLE)) 13910 return -EOPNOTSUPP; 13911 13912 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) 13913 return -EFAULT; 13914 13915 if (stmpconf.tx_type != HWTSTAMP_TX_ON && 13916 stmpconf.tx_type != HWTSTAMP_TX_OFF) 13917 return -ERANGE; 13918 13919 switch (stmpconf.rx_filter) { 13920 case HWTSTAMP_FILTER_NONE: 13921 tp->rxptpctl = 0; 13922 break; 13923 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 13924 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13925 TG3_RX_PTP_CTL_ALL_V1_EVENTS; 13926 break; 13927 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 13928 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13929 TG3_RX_PTP_CTL_SYNC_EVNT; 13930 break; 13931 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 13932 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | 13933 TG3_RX_PTP_CTL_DELAY_REQ; 13934 break; 13935 case HWTSTAMP_FILTER_PTP_V2_EVENT: 13936 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13937 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13938 break; 13939 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 13940 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13941 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13942 break; 13943 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 13944 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13945 TG3_RX_PTP_CTL_ALL_V2_EVENTS; 13946 break; 13947 case HWTSTAMP_FILTER_PTP_V2_SYNC: 13948 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13949 TG3_RX_PTP_CTL_SYNC_EVNT; 13950 break; 13951 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 13952 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13953 TG3_RX_PTP_CTL_SYNC_EVNT; 13954 break; 13955 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 13956 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13957 TG3_RX_PTP_CTL_SYNC_EVNT; 13958 break; 13959 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 13960 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | 13961 TG3_RX_PTP_CTL_DELAY_REQ; 13962 break; 13963 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 13964 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | 13965 TG3_RX_PTP_CTL_DELAY_REQ; 13966 break; 13967 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 13968 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | 13969 TG3_RX_PTP_CTL_DELAY_REQ; 13970 break; 13971 default: 13972 return -ERANGE; 13973 } 13974 13975 if (netif_running(dev) && tp->rxptpctl) 13976 tw32(TG3_RX_PTP_CTL, 13977 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); 13978 13979 if (stmpconf.tx_type == HWTSTAMP_TX_ON) 13980 tg3_flag_set(tp, TX_TSTAMP_EN); 13981 else 13982 tg3_flag_clear(tp, TX_TSTAMP_EN); 13983 13984 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 13985 -EFAULT : 0; 13986 } 13987 13988 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 13989 { 13990 struct tg3 *tp = netdev_priv(dev); 13991 struct hwtstamp_config stmpconf; 13992 13993 if (!tg3_flag(tp, PTP_CAPABLE)) 13994 return -EOPNOTSUPP; 13995 13996 stmpconf.flags = 0; 13997 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? 13998 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); 13999 14000 switch (tp->rxptpctl) { 14001 case 0: 14002 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; 14003 break; 14004 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: 14005 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 14006 break; 14007 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 14008 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 14009 break; 14010 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: 14011 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 14012 break; 14013 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 14014 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 14015 break; 14016 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 14017 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 14018 break; 14019 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: 14020 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 14021 break; 14022 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 14023 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 14024 break; 14025 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 14026 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; 14027 break; 14028 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: 14029 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 14030 break; 14031 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 14032 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 14033 break; 14034 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: 14035 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; 14036 break; 14037 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: 14038 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 14039 break; 14040 default: 14041 WARN_ON_ONCE(1); 14042 return -ERANGE; 14043 } 14044 14045 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? 14046 -EFAULT : 0; 14047 } 14048 14049 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 14050 { 14051 struct mii_ioctl_data *data = if_mii(ifr); 14052 struct tg3 *tp = netdev_priv(dev); 14053 int err; 14054 14055 if (tg3_flag(tp, USE_PHYLIB)) { 14056 struct phy_device *phydev; 14057 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 14058 return -EAGAIN; 14059 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); 14060 return phy_mii_ioctl(phydev, ifr, cmd); 14061 } 14062 14063 switch (cmd) { 14064 case SIOCGMIIPHY: 14065 data->phy_id = tp->phy_addr; 14066 14067 fallthrough; 14068 case SIOCGMIIREG: { 14069 u32 mii_regval; 14070 14071 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14072 break; /* We have no PHY */ 14073 14074 if (!netif_running(dev)) 14075 return -EAGAIN; 14076 14077 spin_lock_bh(&tp->lock); 14078 err = __tg3_readphy(tp, data->phy_id & 0x1f, 14079 data->reg_num & 0x1f, &mii_regval); 14080 spin_unlock_bh(&tp->lock); 14081 14082 data->val_out = mii_regval; 14083 14084 return err; 14085 } 14086 14087 case SIOCSMIIREG: 14088 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14089 break; /* We have no PHY */ 14090 14091 if (!netif_running(dev)) 14092 return -EAGAIN; 14093 14094 spin_lock_bh(&tp->lock); 14095 err = __tg3_writephy(tp, data->phy_id & 0x1f, 14096 data->reg_num & 0x1f, data->val_in); 14097 spin_unlock_bh(&tp->lock); 14098 14099 return err; 14100 14101 case SIOCSHWTSTAMP: 14102 return tg3_hwtstamp_set(dev, ifr); 14103 14104 case SIOCGHWTSTAMP: 14105 return tg3_hwtstamp_get(dev, ifr); 14106 14107 default: 14108 /* do nothing */ 14109 break; 14110 } 14111 return -EOPNOTSUPP; 14112 } 14113 14114 static int tg3_get_coalesce(struct net_device *dev, 14115 struct ethtool_coalesce *ec, 14116 struct kernel_ethtool_coalesce *kernel_coal, 14117 struct netlink_ext_ack *extack) 14118 { 14119 struct tg3 *tp = netdev_priv(dev); 14120 14121 memcpy(ec, &tp->coal, sizeof(*ec)); 14122 return 0; 14123 } 14124 14125 static int tg3_set_coalesce(struct net_device *dev, 14126 struct ethtool_coalesce *ec, 14127 struct kernel_ethtool_coalesce *kernel_coal, 14128 struct netlink_ext_ack *extack) 14129 { 14130 struct tg3 *tp = netdev_priv(dev); 14131 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 14132 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 14133 14134 if (!tg3_flag(tp, 5705_PLUS)) { 14135 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 14136 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 14137 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 14138 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 14139 } 14140 14141 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14142 (!ec->rx_coalesce_usecs) || 14143 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14144 (!ec->tx_coalesce_usecs) || 14145 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14146 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14147 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14148 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 14149 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 14150 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 14151 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 14152 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14153 return -EINVAL; 14154 14155 /* Only copy relevant parameters, ignore all others. */ 14156 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14157 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14158 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 14159 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 14160 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 14161 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 14162 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 14163 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 14164 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 14165 14166 if (netif_running(dev)) { 14167 tg3_full_lock(tp, 0); 14168 __tg3_set_coalesce(tp, &tp->coal); 14169 tg3_full_unlock(tp); 14170 } 14171 return 0; 14172 } 14173 14174 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) 14175 { 14176 struct tg3 *tp = netdev_priv(dev); 14177 14178 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14179 netdev_warn(tp->dev, "Board does not support EEE!\n"); 14180 return -EOPNOTSUPP; 14181 } 14182 14183 if (edata->advertised != tp->eee.advertised) { 14184 netdev_warn(tp->dev, 14185 "Direct manipulation of EEE advertisement is not supported\n"); 14186 return -EINVAL; 14187 } 14188 14189 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { 14190 netdev_warn(tp->dev, 14191 "Maximal Tx Lpi timer supported is %#x(u)\n", 14192 TG3_CPMU_DBTMR1_LNKIDLE_MAX); 14193 return -EINVAL; 14194 } 14195 14196 tp->eee = *edata; 14197 14198 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; 14199 tg3_warn_mgmt_link_flap(tp); 14200 14201 if (netif_running(tp->dev)) { 14202 tg3_full_lock(tp, 0); 14203 tg3_setup_eee(tp); 14204 tg3_phy_reset(tp); 14205 tg3_full_unlock(tp); 14206 } 14207 14208 return 0; 14209 } 14210 14211 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) 14212 { 14213 struct tg3 *tp = netdev_priv(dev); 14214 14215 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { 14216 netdev_warn(tp->dev, 14217 "Board does not support EEE!\n"); 14218 return -EOPNOTSUPP; 14219 } 14220 14221 *edata = tp->eee; 14222 return 0; 14223 } 14224 14225 static const struct ethtool_ops tg3_ethtool_ops = { 14226 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 14227 ETHTOOL_COALESCE_MAX_FRAMES | 14228 ETHTOOL_COALESCE_USECS_IRQ | 14229 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 14230 ETHTOOL_COALESCE_STATS_BLOCK_USECS, 14231 .get_drvinfo = tg3_get_drvinfo, 14232 .get_regs_len = tg3_get_regs_len, 14233 .get_regs = tg3_get_regs, 14234 .get_wol = tg3_get_wol, 14235 .set_wol = tg3_set_wol, 14236 .get_msglevel = tg3_get_msglevel, 14237 .set_msglevel = tg3_set_msglevel, 14238 .nway_reset = tg3_nway_reset, 14239 .get_link = ethtool_op_get_link, 14240 .get_eeprom_len = tg3_get_eeprom_len, 14241 .get_eeprom = tg3_get_eeprom, 14242 .set_eeprom = tg3_set_eeprom, 14243 .get_ringparam = tg3_get_ringparam, 14244 .set_ringparam = tg3_set_ringparam, 14245 .get_pauseparam = tg3_get_pauseparam, 14246 .set_pauseparam = tg3_set_pauseparam, 14247 .self_test = tg3_self_test, 14248 .get_strings = tg3_get_strings, 14249 .set_phys_id = tg3_set_phys_id, 14250 .get_ethtool_stats = tg3_get_ethtool_stats, 14251 .get_coalesce = tg3_get_coalesce, 14252 .set_coalesce = tg3_set_coalesce, 14253 .get_sset_count = tg3_get_sset_count, 14254 .get_rxnfc = tg3_get_rxnfc, 14255 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 14256 .get_rxfh = tg3_get_rxfh, 14257 .set_rxfh = tg3_set_rxfh, 14258 .get_channels = tg3_get_channels, 14259 .set_channels = tg3_set_channels, 14260 .get_ts_info = tg3_get_ts_info, 14261 .get_eee = tg3_get_eee, 14262 .set_eee = tg3_set_eee, 14263 .get_link_ksettings = tg3_get_link_ksettings, 14264 .set_link_ksettings = tg3_set_link_ksettings, 14265 }; 14266 14267 static void tg3_get_stats64(struct net_device *dev, 14268 struct rtnl_link_stats64 *stats) 14269 { 14270 struct tg3 *tp = netdev_priv(dev); 14271 14272 spin_lock_bh(&tp->lock); 14273 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { 14274 *stats = tp->net_stats_prev; 14275 spin_unlock_bh(&tp->lock); 14276 return; 14277 } 14278 14279 tg3_get_nstats(tp, stats); 14280 spin_unlock_bh(&tp->lock); 14281 } 14282 14283 static void tg3_set_rx_mode(struct net_device *dev) 14284 { 14285 struct tg3 *tp = netdev_priv(dev); 14286 14287 if (!netif_running(dev)) 14288 return; 14289 14290 tg3_full_lock(tp, 0); 14291 __tg3_set_rx_mode(dev); 14292 tg3_full_unlock(tp); 14293 } 14294 14295 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14296 int new_mtu) 14297 { 14298 dev->mtu = new_mtu; 14299 14300 if (new_mtu > ETH_DATA_LEN) { 14301 if (tg3_flag(tp, 5780_CLASS)) { 14302 netdev_update_features(dev); 14303 tg3_flag_clear(tp, TSO_CAPABLE); 14304 } else { 14305 tg3_flag_set(tp, JUMBO_RING_ENABLE); 14306 } 14307 } else { 14308 if (tg3_flag(tp, 5780_CLASS)) { 14309 tg3_flag_set(tp, TSO_CAPABLE); 14310 netdev_update_features(dev); 14311 } 14312 tg3_flag_clear(tp, JUMBO_RING_ENABLE); 14313 } 14314 } 14315 14316 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 14317 { 14318 struct tg3 *tp = netdev_priv(dev); 14319 int err; 14320 bool reset_phy = false; 14321 14322 if (!netif_running(dev)) { 14323 /* We'll just catch it later when the 14324 * device is up'd. 14325 */ 14326 tg3_set_mtu(dev, tp, new_mtu); 14327 return 0; 14328 } 14329 14330 tg3_phy_stop(tp); 14331 14332 tg3_netif_stop(tp); 14333 14334 tg3_set_mtu(dev, tp, new_mtu); 14335 14336 tg3_full_lock(tp, 1); 14337 14338 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14339 14340 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14341 * breaks all requests to 256 bytes. 14342 */ 14343 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14344 tg3_asic_rev(tp) == ASIC_REV_5717 || 14345 tg3_asic_rev(tp) == ASIC_REV_5719 || 14346 tg3_asic_rev(tp) == ASIC_REV_5720) 14347 reset_phy = true; 14348 14349 err = tg3_restart_hw(tp, reset_phy); 14350 14351 if (!err) 14352 tg3_netif_start(tp); 14353 14354 tg3_full_unlock(tp); 14355 14356 if (!err) 14357 tg3_phy_start(tp); 14358 14359 return err; 14360 } 14361 14362 static const struct net_device_ops tg3_netdev_ops = { 14363 .ndo_open = tg3_open, 14364 .ndo_stop = tg3_close, 14365 .ndo_start_xmit = tg3_start_xmit, 14366 .ndo_get_stats64 = tg3_get_stats64, 14367 .ndo_validate_addr = eth_validate_addr, 14368 .ndo_set_rx_mode = tg3_set_rx_mode, 14369 .ndo_set_mac_address = tg3_set_mac_addr, 14370 .ndo_eth_ioctl = tg3_ioctl, 14371 .ndo_tx_timeout = tg3_tx_timeout, 14372 .ndo_change_mtu = tg3_change_mtu, 14373 .ndo_fix_features = tg3_fix_features, 14374 .ndo_set_features = tg3_set_features, 14375 #ifdef CONFIG_NET_POLL_CONTROLLER 14376 .ndo_poll_controller = tg3_poll_controller, 14377 #endif 14378 }; 14379 14380 static void tg3_get_eeprom_size(struct tg3 *tp) 14381 { 14382 u32 cursize, val, magic; 14383 14384 tp->nvram_size = EEPROM_CHIP_SIZE; 14385 14386 if (tg3_nvram_read(tp, 0, &magic) != 0) 14387 return; 14388 14389 if ((magic != TG3_EEPROM_MAGIC) && 14390 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && 14391 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) 14392 return; 14393 14394 /* 14395 * Size the chip by reading offsets at increasing powers of two. 14396 * When we encounter our validation signature, we know the addressing 14397 * has wrapped around, and thus have our chip size. 14398 */ 14399 cursize = 0x10; 14400 14401 while (cursize < tp->nvram_size) { 14402 if (tg3_nvram_read(tp, cursize, &val) != 0) 14403 return; 14404 14405 if (val == magic) 14406 break; 14407 14408 cursize <<= 1; 14409 } 14410 14411 tp->nvram_size = cursize; 14412 } 14413 14414 static void tg3_get_nvram_size(struct tg3 *tp) 14415 { 14416 u32 val; 14417 14418 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) 14419 return; 14420 14421 /* Selfboot format */ 14422 if (val != TG3_EEPROM_MAGIC) { 14423 tg3_get_eeprom_size(tp); 14424 return; 14425 } 14426 14427 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 14428 if (val != 0) { 14429 /* This is confusing. We want to operate on the 14430 * 16-bit value at offset 0xf2. The tg3_nvram_read() 14431 * call will read from NVRAM and byteswap the data 14432 * according to the byteswapping settings for all 14433 * other register accesses. This ensures the data we 14434 * want will always reside in the lower 16-bits. 14435 * However, the data in NVRAM is in LE format, which 14436 * means the data from the NVRAM read will always be 14437 * opposite the endianness of the CPU. The 16-bit 14438 * byteswap then brings the data to CPU endianness. 14439 */ 14440 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; 14441 return; 14442 } 14443 } 14444 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14445 } 14446 14447 static void tg3_get_nvram_info(struct tg3 *tp) 14448 { 14449 u32 nvcfg1; 14450 14451 nvcfg1 = tr32(NVRAM_CFG1); 14452 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 14453 tg3_flag_set(tp, FLASH); 14454 } else { 14455 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14456 tw32(NVRAM_CFG1, nvcfg1); 14457 } 14458 14459 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 14460 tg3_flag(tp, 5780_CLASS)) { 14461 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 14462 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 14463 tp->nvram_jedecnum = JEDEC_ATMEL; 14464 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14465 tg3_flag_set(tp, NVRAM_BUFFERED); 14466 break; 14467 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 14468 tp->nvram_jedecnum = JEDEC_ATMEL; 14469 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 14470 break; 14471 case FLASH_VENDOR_ATMEL_EEPROM: 14472 tp->nvram_jedecnum = JEDEC_ATMEL; 14473 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14474 tg3_flag_set(tp, NVRAM_BUFFERED); 14475 break; 14476 case FLASH_VENDOR_ST: 14477 tp->nvram_jedecnum = JEDEC_ST; 14478 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 14479 tg3_flag_set(tp, NVRAM_BUFFERED); 14480 break; 14481 case FLASH_VENDOR_SAIFUN: 14482 tp->nvram_jedecnum = JEDEC_SAIFUN; 14483 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 14484 break; 14485 case FLASH_VENDOR_SST_SMALL: 14486 case FLASH_VENDOR_SST_LARGE: 14487 tp->nvram_jedecnum = JEDEC_SST; 14488 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 14489 break; 14490 } 14491 } else { 14492 tp->nvram_jedecnum = JEDEC_ATMEL; 14493 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 14494 tg3_flag_set(tp, NVRAM_BUFFERED); 14495 } 14496 } 14497 14498 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 14499 { 14500 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 14501 case FLASH_5752PAGE_SIZE_256: 14502 tp->nvram_pagesize = 256; 14503 break; 14504 case FLASH_5752PAGE_SIZE_512: 14505 tp->nvram_pagesize = 512; 14506 break; 14507 case FLASH_5752PAGE_SIZE_1K: 14508 tp->nvram_pagesize = 1024; 14509 break; 14510 case FLASH_5752PAGE_SIZE_2K: 14511 tp->nvram_pagesize = 2048; 14512 break; 14513 case FLASH_5752PAGE_SIZE_4K: 14514 tp->nvram_pagesize = 4096; 14515 break; 14516 case FLASH_5752PAGE_SIZE_264: 14517 tp->nvram_pagesize = 264; 14518 break; 14519 case FLASH_5752PAGE_SIZE_528: 14520 tp->nvram_pagesize = 528; 14521 break; 14522 } 14523 } 14524 14525 static void tg3_get_5752_nvram_info(struct tg3 *tp) 14526 { 14527 u32 nvcfg1; 14528 14529 nvcfg1 = tr32(NVRAM_CFG1); 14530 14531 /* NVRAM protection for TPM */ 14532 if (nvcfg1 & (1 << 27)) 14533 tg3_flag_set(tp, PROTECTED_NVRAM); 14534 14535 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14536 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 14537 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 14538 tp->nvram_jedecnum = JEDEC_ATMEL; 14539 tg3_flag_set(tp, NVRAM_BUFFERED); 14540 break; 14541 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14542 tp->nvram_jedecnum = JEDEC_ATMEL; 14543 tg3_flag_set(tp, NVRAM_BUFFERED); 14544 tg3_flag_set(tp, FLASH); 14545 break; 14546 case FLASH_5752VENDOR_ST_M45PE10: 14547 case FLASH_5752VENDOR_ST_M45PE20: 14548 case FLASH_5752VENDOR_ST_M45PE40: 14549 tp->nvram_jedecnum = JEDEC_ST; 14550 tg3_flag_set(tp, NVRAM_BUFFERED); 14551 tg3_flag_set(tp, FLASH); 14552 break; 14553 } 14554 14555 if (tg3_flag(tp, FLASH)) { 14556 tg3_nvram_get_pagesize(tp, nvcfg1); 14557 } else { 14558 /* For eeprom, set pagesize to maximum eeprom size */ 14559 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14560 14561 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14562 tw32(NVRAM_CFG1, nvcfg1); 14563 } 14564 } 14565 14566 static void tg3_get_5755_nvram_info(struct tg3 *tp) 14567 { 14568 u32 nvcfg1, protect = 0; 14569 14570 nvcfg1 = tr32(NVRAM_CFG1); 14571 14572 /* NVRAM protection for TPM */ 14573 if (nvcfg1 & (1 << 27)) { 14574 tg3_flag_set(tp, PROTECTED_NVRAM); 14575 protect = 1; 14576 } 14577 14578 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14579 switch (nvcfg1) { 14580 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14581 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14582 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14583 case FLASH_5755VENDOR_ATMEL_FLASH_5: 14584 tp->nvram_jedecnum = JEDEC_ATMEL; 14585 tg3_flag_set(tp, NVRAM_BUFFERED); 14586 tg3_flag_set(tp, FLASH); 14587 tp->nvram_pagesize = 264; 14588 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 14589 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 14590 tp->nvram_size = (protect ? 0x3e200 : 14591 TG3_NVRAM_SIZE_512KB); 14592 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 14593 tp->nvram_size = (protect ? 0x1f200 : 14594 TG3_NVRAM_SIZE_256KB); 14595 else 14596 tp->nvram_size = (protect ? 0x1f200 : 14597 TG3_NVRAM_SIZE_128KB); 14598 break; 14599 case FLASH_5752VENDOR_ST_M45PE10: 14600 case FLASH_5752VENDOR_ST_M45PE20: 14601 case FLASH_5752VENDOR_ST_M45PE40: 14602 tp->nvram_jedecnum = JEDEC_ST; 14603 tg3_flag_set(tp, NVRAM_BUFFERED); 14604 tg3_flag_set(tp, FLASH); 14605 tp->nvram_pagesize = 256; 14606 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 14607 tp->nvram_size = (protect ? 14608 TG3_NVRAM_SIZE_64KB : 14609 TG3_NVRAM_SIZE_128KB); 14610 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 14611 tp->nvram_size = (protect ? 14612 TG3_NVRAM_SIZE_64KB : 14613 TG3_NVRAM_SIZE_256KB); 14614 else 14615 tp->nvram_size = (protect ? 14616 TG3_NVRAM_SIZE_128KB : 14617 TG3_NVRAM_SIZE_512KB); 14618 break; 14619 } 14620 } 14621 14622 static void tg3_get_5787_nvram_info(struct tg3 *tp) 14623 { 14624 u32 nvcfg1; 14625 14626 nvcfg1 = tr32(NVRAM_CFG1); 14627 14628 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14629 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 14630 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14631 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 14632 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14633 tp->nvram_jedecnum = JEDEC_ATMEL; 14634 tg3_flag_set(tp, NVRAM_BUFFERED); 14635 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14636 14637 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14638 tw32(NVRAM_CFG1, nvcfg1); 14639 break; 14640 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14641 case FLASH_5755VENDOR_ATMEL_FLASH_1: 14642 case FLASH_5755VENDOR_ATMEL_FLASH_2: 14643 case FLASH_5755VENDOR_ATMEL_FLASH_3: 14644 tp->nvram_jedecnum = JEDEC_ATMEL; 14645 tg3_flag_set(tp, NVRAM_BUFFERED); 14646 tg3_flag_set(tp, FLASH); 14647 tp->nvram_pagesize = 264; 14648 break; 14649 case FLASH_5752VENDOR_ST_M45PE10: 14650 case FLASH_5752VENDOR_ST_M45PE20: 14651 case FLASH_5752VENDOR_ST_M45PE40: 14652 tp->nvram_jedecnum = JEDEC_ST; 14653 tg3_flag_set(tp, NVRAM_BUFFERED); 14654 tg3_flag_set(tp, FLASH); 14655 tp->nvram_pagesize = 256; 14656 break; 14657 } 14658 } 14659 14660 static void tg3_get_5761_nvram_info(struct tg3 *tp) 14661 { 14662 u32 nvcfg1, protect = 0; 14663 14664 nvcfg1 = tr32(NVRAM_CFG1); 14665 14666 /* NVRAM protection for TPM */ 14667 if (nvcfg1 & (1 << 27)) { 14668 tg3_flag_set(tp, PROTECTED_NVRAM); 14669 protect = 1; 14670 } 14671 14672 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; 14673 switch (nvcfg1) { 14674 case FLASH_5761VENDOR_ATMEL_ADB021D: 14675 case FLASH_5761VENDOR_ATMEL_ADB041D: 14676 case FLASH_5761VENDOR_ATMEL_ADB081D: 14677 case FLASH_5761VENDOR_ATMEL_ADB161D: 14678 case FLASH_5761VENDOR_ATMEL_MDB021D: 14679 case FLASH_5761VENDOR_ATMEL_MDB041D: 14680 case FLASH_5761VENDOR_ATMEL_MDB081D: 14681 case FLASH_5761VENDOR_ATMEL_MDB161D: 14682 tp->nvram_jedecnum = JEDEC_ATMEL; 14683 tg3_flag_set(tp, NVRAM_BUFFERED); 14684 tg3_flag_set(tp, FLASH); 14685 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14686 tp->nvram_pagesize = 256; 14687 break; 14688 case FLASH_5761VENDOR_ST_A_M45PE20: 14689 case FLASH_5761VENDOR_ST_A_M45PE40: 14690 case FLASH_5761VENDOR_ST_A_M45PE80: 14691 case FLASH_5761VENDOR_ST_A_M45PE16: 14692 case FLASH_5761VENDOR_ST_M_M45PE20: 14693 case FLASH_5761VENDOR_ST_M_M45PE40: 14694 case FLASH_5761VENDOR_ST_M_M45PE80: 14695 case FLASH_5761VENDOR_ST_M_M45PE16: 14696 tp->nvram_jedecnum = JEDEC_ST; 14697 tg3_flag_set(tp, NVRAM_BUFFERED); 14698 tg3_flag_set(tp, FLASH); 14699 tp->nvram_pagesize = 256; 14700 break; 14701 } 14702 14703 if (protect) { 14704 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); 14705 } else { 14706 switch (nvcfg1) { 14707 case FLASH_5761VENDOR_ATMEL_ADB161D: 14708 case FLASH_5761VENDOR_ATMEL_MDB161D: 14709 case FLASH_5761VENDOR_ST_A_M45PE16: 14710 case FLASH_5761VENDOR_ST_M_M45PE16: 14711 tp->nvram_size = TG3_NVRAM_SIZE_2MB; 14712 break; 14713 case FLASH_5761VENDOR_ATMEL_ADB081D: 14714 case FLASH_5761VENDOR_ATMEL_MDB081D: 14715 case FLASH_5761VENDOR_ST_A_M45PE80: 14716 case FLASH_5761VENDOR_ST_M_M45PE80: 14717 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14718 break; 14719 case FLASH_5761VENDOR_ATMEL_ADB041D: 14720 case FLASH_5761VENDOR_ATMEL_MDB041D: 14721 case FLASH_5761VENDOR_ST_A_M45PE40: 14722 case FLASH_5761VENDOR_ST_M_M45PE40: 14723 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14724 break; 14725 case FLASH_5761VENDOR_ATMEL_ADB021D: 14726 case FLASH_5761VENDOR_ATMEL_MDB021D: 14727 case FLASH_5761VENDOR_ST_A_M45PE20: 14728 case FLASH_5761VENDOR_ST_M_M45PE20: 14729 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14730 break; 14731 } 14732 } 14733 } 14734 14735 static void tg3_get_5906_nvram_info(struct tg3 *tp) 14736 { 14737 tp->nvram_jedecnum = JEDEC_ATMEL; 14738 tg3_flag_set(tp, NVRAM_BUFFERED); 14739 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14740 } 14741 14742 static void tg3_get_57780_nvram_info(struct tg3 *tp) 14743 { 14744 u32 nvcfg1; 14745 14746 nvcfg1 = tr32(NVRAM_CFG1); 14747 14748 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14749 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 14750 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 14751 tp->nvram_jedecnum = JEDEC_ATMEL; 14752 tg3_flag_set(tp, NVRAM_BUFFERED); 14753 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14754 14755 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14756 tw32(NVRAM_CFG1, nvcfg1); 14757 return; 14758 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14759 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14760 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14761 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14762 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14763 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14764 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14765 tp->nvram_jedecnum = JEDEC_ATMEL; 14766 tg3_flag_set(tp, NVRAM_BUFFERED); 14767 tg3_flag_set(tp, FLASH); 14768 14769 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14770 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 14771 case FLASH_57780VENDOR_ATMEL_AT45DB011D: 14772 case FLASH_57780VENDOR_ATMEL_AT45DB011B: 14773 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14774 break; 14775 case FLASH_57780VENDOR_ATMEL_AT45DB021D: 14776 case FLASH_57780VENDOR_ATMEL_AT45DB021B: 14777 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14778 break; 14779 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 14780 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 14781 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14782 break; 14783 } 14784 break; 14785 case FLASH_5752VENDOR_ST_M45PE10: 14786 case FLASH_5752VENDOR_ST_M45PE20: 14787 case FLASH_5752VENDOR_ST_M45PE40: 14788 tp->nvram_jedecnum = JEDEC_ST; 14789 tg3_flag_set(tp, NVRAM_BUFFERED); 14790 tg3_flag_set(tp, FLASH); 14791 14792 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14793 case FLASH_5752VENDOR_ST_M45PE10: 14794 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14795 break; 14796 case FLASH_5752VENDOR_ST_M45PE20: 14797 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14798 break; 14799 case FLASH_5752VENDOR_ST_M45PE40: 14800 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14801 break; 14802 } 14803 break; 14804 default: 14805 tg3_flag_set(tp, NO_NVRAM); 14806 return; 14807 } 14808 14809 tg3_nvram_get_pagesize(tp, nvcfg1); 14810 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14811 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14812 } 14813 14814 14815 static void tg3_get_5717_nvram_info(struct tg3 *tp) 14816 { 14817 u32 nvcfg1; 14818 14819 nvcfg1 = tr32(NVRAM_CFG1); 14820 14821 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14822 case FLASH_5717VENDOR_ATMEL_EEPROM: 14823 case FLASH_5717VENDOR_MICRO_EEPROM: 14824 tp->nvram_jedecnum = JEDEC_ATMEL; 14825 tg3_flag_set(tp, NVRAM_BUFFERED); 14826 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14827 14828 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14829 tw32(NVRAM_CFG1, nvcfg1); 14830 return; 14831 case FLASH_5717VENDOR_ATMEL_MDB011D: 14832 case FLASH_5717VENDOR_ATMEL_ADB011B: 14833 case FLASH_5717VENDOR_ATMEL_ADB011D: 14834 case FLASH_5717VENDOR_ATMEL_MDB021D: 14835 case FLASH_5717VENDOR_ATMEL_ADB021B: 14836 case FLASH_5717VENDOR_ATMEL_ADB021D: 14837 case FLASH_5717VENDOR_ATMEL_45USPT: 14838 tp->nvram_jedecnum = JEDEC_ATMEL; 14839 tg3_flag_set(tp, NVRAM_BUFFERED); 14840 tg3_flag_set(tp, FLASH); 14841 14842 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14843 case FLASH_5717VENDOR_ATMEL_MDB021D: 14844 /* Detect size with tg3_nvram_get_size() */ 14845 break; 14846 case FLASH_5717VENDOR_ATMEL_ADB021B: 14847 case FLASH_5717VENDOR_ATMEL_ADB021D: 14848 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14849 break; 14850 default: 14851 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14852 break; 14853 } 14854 break; 14855 case FLASH_5717VENDOR_ST_M_M25PE10: 14856 case FLASH_5717VENDOR_ST_A_M25PE10: 14857 case FLASH_5717VENDOR_ST_M_M45PE10: 14858 case FLASH_5717VENDOR_ST_A_M45PE10: 14859 case FLASH_5717VENDOR_ST_M_M25PE20: 14860 case FLASH_5717VENDOR_ST_A_M25PE20: 14861 case FLASH_5717VENDOR_ST_M_M45PE20: 14862 case FLASH_5717VENDOR_ST_A_M45PE20: 14863 case FLASH_5717VENDOR_ST_25USPT: 14864 case FLASH_5717VENDOR_ST_45USPT: 14865 tp->nvram_jedecnum = JEDEC_ST; 14866 tg3_flag_set(tp, NVRAM_BUFFERED); 14867 tg3_flag_set(tp, FLASH); 14868 14869 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 14870 case FLASH_5717VENDOR_ST_M_M25PE20: 14871 case FLASH_5717VENDOR_ST_M_M45PE20: 14872 /* Detect size with tg3_nvram_get_size() */ 14873 break; 14874 case FLASH_5717VENDOR_ST_A_M25PE20: 14875 case FLASH_5717VENDOR_ST_A_M45PE20: 14876 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14877 break; 14878 default: 14879 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14880 break; 14881 } 14882 break; 14883 default: 14884 tg3_flag_set(tp, NO_NVRAM); 14885 return; 14886 } 14887 14888 tg3_nvram_get_pagesize(tp, nvcfg1); 14889 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 14890 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14891 } 14892 14893 static void tg3_get_5720_nvram_info(struct tg3 *tp) 14894 { 14895 u32 nvcfg1, nvmpinstrp, nv_status; 14896 14897 nvcfg1 = tr32(NVRAM_CFG1); 14898 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; 14899 14900 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 14901 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { 14902 tg3_flag_set(tp, NO_NVRAM); 14903 return; 14904 } 14905 14906 switch (nvmpinstrp) { 14907 case FLASH_5762_MX25L_100: 14908 case FLASH_5762_MX25L_200: 14909 case FLASH_5762_MX25L_400: 14910 case FLASH_5762_MX25L_800: 14911 case FLASH_5762_MX25L_160_320: 14912 tp->nvram_pagesize = 4096; 14913 tp->nvram_jedecnum = JEDEC_MACRONIX; 14914 tg3_flag_set(tp, NVRAM_BUFFERED); 14915 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 14916 tg3_flag_set(tp, FLASH); 14917 nv_status = tr32(NVRAM_AUTOSENSE_STATUS); 14918 tp->nvram_size = 14919 (1 << (nv_status >> AUTOSENSE_DEVID & 14920 AUTOSENSE_DEVID_MASK) 14921 << AUTOSENSE_SIZE_IN_MB); 14922 return; 14923 14924 case FLASH_5762_EEPROM_HD: 14925 nvmpinstrp = FLASH_5720_EEPROM_HD; 14926 break; 14927 case FLASH_5762_EEPROM_LD: 14928 nvmpinstrp = FLASH_5720_EEPROM_LD; 14929 break; 14930 case FLASH_5720VENDOR_M_ST_M45PE20: 14931 /* This pinstrap supports multiple sizes, so force it 14932 * to read the actual size from location 0xf0. 14933 */ 14934 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; 14935 break; 14936 } 14937 } 14938 14939 switch (nvmpinstrp) { 14940 case FLASH_5720_EEPROM_HD: 14941 case FLASH_5720_EEPROM_LD: 14942 tp->nvram_jedecnum = JEDEC_ATMEL; 14943 tg3_flag_set(tp, NVRAM_BUFFERED); 14944 14945 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 14946 tw32(NVRAM_CFG1, nvcfg1); 14947 if (nvmpinstrp == FLASH_5720_EEPROM_HD) 14948 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 14949 else 14950 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; 14951 return; 14952 case FLASH_5720VENDOR_M_ATMEL_DB011D: 14953 case FLASH_5720VENDOR_A_ATMEL_DB011B: 14954 case FLASH_5720VENDOR_A_ATMEL_DB011D: 14955 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14956 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14957 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14958 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14959 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14960 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14961 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14962 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14963 case FLASH_5720VENDOR_ATMEL_45USPT: 14964 tp->nvram_jedecnum = JEDEC_ATMEL; 14965 tg3_flag_set(tp, NVRAM_BUFFERED); 14966 tg3_flag_set(tp, FLASH); 14967 14968 switch (nvmpinstrp) { 14969 case FLASH_5720VENDOR_M_ATMEL_DB021D: 14970 case FLASH_5720VENDOR_A_ATMEL_DB021B: 14971 case FLASH_5720VENDOR_A_ATMEL_DB021D: 14972 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 14973 break; 14974 case FLASH_5720VENDOR_M_ATMEL_DB041D: 14975 case FLASH_5720VENDOR_A_ATMEL_DB041B: 14976 case FLASH_5720VENDOR_A_ATMEL_DB041D: 14977 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 14978 break; 14979 case FLASH_5720VENDOR_M_ATMEL_DB081D: 14980 case FLASH_5720VENDOR_A_ATMEL_DB081D: 14981 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 14982 break; 14983 default: 14984 if (tg3_asic_rev(tp) != ASIC_REV_5762) 14985 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 14986 break; 14987 } 14988 break; 14989 case FLASH_5720VENDOR_M_ST_M25PE10: 14990 case FLASH_5720VENDOR_M_ST_M45PE10: 14991 case FLASH_5720VENDOR_A_ST_M25PE10: 14992 case FLASH_5720VENDOR_A_ST_M45PE10: 14993 case FLASH_5720VENDOR_M_ST_M25PE20: 14994 case FLASH_5720VENDOR_M_ST_M45PE20: 14995 case FLASH_5720VENDOR_A_ST_M25PE20: 14996 case FLASH_5720VENDOR_A_ST_M45PE20: 14997 case FLASH_5720VENDOR_M_ST_M25PE40: 14998 case FLASH_5720VENDOR_M_ST_M45PE40: 14999 case FLASH_5720VENDOR_A_ST_M25PE40: 15000 case FLASH_5720VENDOR_A_ST_M45PE40: 15001 case FLASH_5720VENDOR_M_ST_M25PE80: 15002 case FLASH_5720VENDOR_M_ST_M45PE80: 15003 case FLASH_5720VENDOR_A_ST_M25PE80: 15004 case FLASH_5720VENDOR_A_ST_M45PE80: 15005 case FLASH_5720VENDOR_ST_25USPT: 15006 case FLASH_5720VENDOR_ST_45USPT: 15007 tp->nvram_jedecnum = JEDEC_ST; 15008 tg3_flag_set(tp, NVRAM_BUFFERED); 15009 tg3_flag_set(tp, FLASH); 15010 15011 switch (nvmpinstrp) { 15012 case FLASH_5720VENDOR_M_ST_M25PE20: 15013 case FLASH_5720VENDOR_M_ST_M45PE20: 15014 case FLASH_5720VENDOR_A_ST_M25PE20: 15015 case FLASH_5720VENDOR_A_ST_M45PE20: 15016 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 15017 break; 15018 case FLASH_5720VENDOR_M_ST_M25PE40: 15019 case FLASH_5720VENDOR_M_ST_M45PE40: 15020 case FLASH_5720VENDOR_A_ST_M25PE40: 15021 case FLASH_5720VENDOR_A_ST_M45PE40: 15022 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 15023 break; 15024 case FLASH_5720VENDOR_M_ST_M25PE80: 15025 case FLASH_5720VENDOR_M_ST_M45PE80: 15026 case FLASH_5720VENDOR_A_ST_M25PE80: 15027 case FLASH_5720VENDOR_A_ST_M45PE80: 15028 tp->nvram_size = TG3_NVRAM_SIZE_1MB; 15029 break; 15030 default: 15031 if (tg3_asic_rev(tp) != ASIC_REV_5762) 15032 tp->nvram_size = TG3_NVRAM_SIZE_128KB; 15033 break; 15034 } 15035 break; 15036 default: 15037 tg3_flag_set(tp, NO_NVRAM); 15038 return; 15039 } 15040 15041 tg3_nvram_get_pagesize(tp, nvcfg1); 15042 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 15043 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 15044 15045 if (tg3_asic_rev(tp) == ASIC_REV_5762) { 15046 u32 val; 15047 15048 if (tg3_nvram_read(tp, 0, &val)) 15049 return; 15050 15051 if (val != TG3_EEPROM_MAGIC && 15052 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) 15053 tg3_flag_set(tp, NO_NVRAM); 15054 } 15055 } 15056 15057 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 15058 static void tg3_nvram_init(struct tg3 *tp) 15059 { 15060 if (tg3_flag(tp, IS_SSB_CORE)) { 15061 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ 15062 tg3_flag_clear(tp, NVRAM); 15063 tg3_flag_clear(tp, NVRAM_BUFFERED); 15064 tg3_flag_set(tp, NO_NVRAM); 15065 return; 15066 } 15067 15068 tw32_f(GRC_EEPROM_ADDR, 15069 (EEPROM_ADDR_FSM_RESET | 15070 (EEPROM_DEFAULT_CLOCK_PERIOD << 15071 EEPROM_ADDR_CLKPERD_SHIFT))); 15072 15073 msleep(1); 15074 15075 /* Enable seeprom accesses. */ 15076 tw32_f(GRC_LOCAL_CTRL, 15077 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 15078 udelay(100); 15079 15080 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15081 tg3_asic_rev(tp) != ASIC_REV_5701) { 15082 tg3_flag_set(tp, NVRAM); 15083 15084 if (tg3_nvram_lock(tp)) { 15085 netdev_warn(tp->dev, 15086 "Cannot get nvram lock, %s failed\n", 15087 __func__); 15088 return; 15089 } 15090 tg3_enable_nvram_access(tp); 15091 15092 tp->nvram_size = 0; 15093 15094 if (tg3_asic_rev(tp) == ASIC_REV_5752) 15095 tg3_get_5752_nvram_info(tp); 15096 else if (tg3_asic_rev(tp) == ASIC_REV_5755) 15097 tg3_get_5755_nvram_info(tp); 15098 else if (tg3_asic_rev(tp) == ASIC_REV_5787 || 15099 tg3_asic_rev(tp) == ASIC_REV_5784 || 15100 tg3_asic_rev(tp) == ASIC_REV_5785) 15101 tg3_get_5787_nvram_info(tp); 15102 else if (tg3_asic_rev(tp) == ASIC_REV_5761) 15103 tg3_get_5761_nvram_info(tp); 15104 else if (tg3_asic_rev(tp) == ASIC_REV_5906) 15105 tg3_get_5906_nvram_info(tp); 15106 else if (tg3_asic_rev(tp) == ASIC_REV_57780 || 15107 tg3_flag(tp, 57765_CLASS)) 15108 tg3_get_57780_nvram_info(tp); 15109 else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15110 tg3_asic_rev(tp) == ASIC_REV_5719) 15111 tg3_get_5717_nvram_info(tp); 15112 else if (tg3_asic_rev(tp) == ASIC_REV_5720 || 15113 tg3_asic_rev(tp) == ASIC_REV_5762) 15114 tg3_get_5720_nvram_info(tp); 15115 else 15116 tg3_get_nvram_info(tp); 15117 15118 if (tp->nvram_size == 0) 15119 tg3_get_nvram_size(tp); 15120 15121 tg3_disable_nvram_access(tp); 15122 tg3_nvram_unlock(tp); 15123 15124 } else { 15125 tg3_flag_clear(tp, NVRAM); 15126 tg3_flag_clear(tp, NVRAM_BUFFERED); 15127 15128 tg3_get_eeprom_size(tp); 15129 } 15130 } 15131 15132 struct subsys_tbl_ent { 15133 u16 subsys_vendor, subsys_devid; 15134 u32 phy_id; 15135 }; 15136 15137 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 15138 /* Broadcom boards. */ 15139 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15140 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 15141 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15142 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, 15143 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15144 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, 15145 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15146 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, 15147 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15148 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, 15149 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15150 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, 15151 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15152 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, 15153 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15154 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, 15155 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15156 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, 15157 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15158 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, 15159 { TG3PCI_SUBVENDOR_ID_BROADCOM, 15160 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, 15161 15162 /* 3com boards. */ 15163 { TG3PCI_SUBVENDOR_ID_3COM, 15164 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, 15165 { TG3PCI_SUBVENDOR_ID_3COM, 15166 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, 15167 { TG3PCI_SUBVENDOR_ID_3COM, 15168 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, 15169 { TG3PCI_SUBVENDOR_ID_3COM, 15170 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, 15171 { TG3PCI_SUBVENDOR_ID_3COM, 15172 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, 15173 15174 /* DELL boards. */ 15175 { TG3PCI_SUBVENDOR_ID_DELL, 15176 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, 15177 { TG3PCI_SUBVENDOR_ID_DELL, 15178 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, 15179 { TG3PCI_SUBVENDOR_ID_DELL, 15180 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, 15181 { TG3PCI_SUBVENDOR_ID_DELL, 15182 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, 15183 15184 /* Compaq boards. */ 15185 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15186 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, 15187 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15188 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, 15189 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15190 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, 15191 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15192 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, 15193 { TG3PCI_SUBVENDOR_ID_COMPAQ, 15194 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, 15195 15196 /* IBM boards. */ 15197 { TG3PCI_SUBVENDOR_ID_IBM, 15198 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 15199 }; 15200 15201 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) 15202 { 15203 int i; 15204 15205 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 15206 if ((subsys_id_to_phy_id[i].subsys_vendor == 15207 tp->pdev->subsystem_vendor) && 15208 (subsys_id_to_phy_id[i].subsys_devid == 15209 tp->pdev->subsystem_device)) 15210 return &subsys_id_to_phy_id[i]; 15211 } 15212 return NULL; 15213 } 15214 15215 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) 15216 { 15217 u32 val; 15218 15219 tp->phy_id = TG3_PHY_ID_INVALID; 15220 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15221 15222 /* Assume an onboard device and WOL capable by default. */ 15223 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15224 tg3_flag_set(tp, WOL_CAP); 15225 15226 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15227 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 15228 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15229 tg3_flag_set(tp, IS_NIC); 15230 } 15231 val = tr32(VCPU_CFGSHDW); 15232 if (val & VCPU_CFGSHDW_ASPM_DBNC) 15233 tg3_flag_set(tp, ASPM_WORKAROUND); 15234 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 15235 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 15236 tg3_flag_set(tp, WOL_ENABLE); 15237 device_set_wakeup_enable(&tp->pdev->dev, true); 15238 } 15239 goto done; 15240 } 15241 15242 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 15243 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 15244 u32 nic_cfg, led_cfg; 15245 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; 15246 u32 nic_phy_id, ver, eeprom_phy_id; 15247 int eeprom_phy_serdes = 0; 15248 15249 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 15250 tp->nic_sram_data_cfg = nic_cfg; 15251 15252 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 15253 ver >>= NIC_SRAM_DATA_VER_SHIFT; 15254 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15255 tg3_asic_rev(tp) != ASIC_REV_5701 && 15256 tg3_asic_rev(tp) != ASIC_REV_5703 && 15257 (ver > 0) && (ver < 0x100)) 15258 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 15259 15260 if (tg3_asic_rev(tp) == ASIC_REV_5785) 15261 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); 15262 15263 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 15264 tg3_asic_rev(tp) == ASIC_REV_5719 || 15265 tg3_asic_rev(tp) == ASIC_REV_5720) 15266 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); 15267 15268 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 15269 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 15270 eeprom_phy_serdes = 1; 15271 15272 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 15273 if (nic_phy_id != 0) { 15274 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 15275 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 15276 15277 eeprom_phy_id = (id1 >> 16) << 10; 15278 eeprom_phy_id |= (id2 & 0xfc00) << 16; 15279 eeprom_phy_id |= (id2 & 0x03ff) << 0; 15280 } else 15281 eeprom_phy_id = 0; 15282 15283 tp->phy_id = eeprom_phy_id; 15284 if (eeprom_phy_serdes) { 15285 if (!tg3_flag(tp, 5705_PLUS)) 15286 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15287 else 15288 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 15289 } 15290 15291 if (tg3_flag(tp, 5750_PLUS)) 15292 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 15293 SHASTA_EXT_LED_MODE_MASK); 15294 else 15295 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 15296 15297 switch (led_cfg) { 15298 default: 15299 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 15300 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15301 break; 15302 15303 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 15304 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15305 break; 15306 15307 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 15308 tp->led_ctrl = LED_CTRL_MODE_MAC; 15309 15310 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 15311 * read on some older 5700/5701 bootcode. 15312 */ 15313 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 15314 tg3_asic_rev(tp) == ASIC_REV_5701) 15315 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15316 15317 break; 15318 15319 case SHASTA_EXT_LED_SHARED: 15320 tp->led_ctrl = LED_CTRL_MODE_SHARED; 15321 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && 15322 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) 15323 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15324 LED_CTRL_MODE_PHY_2); 15325 15326 if (tg3_flag(tp, 5717_PLUS) || 15327 tg3_asic_rev(tp) == ASIC_REV_5762) 15328 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | 15329 LED_CTRL_BLINK_RATE_MASK; 15330 15331 break; 15332 15333 case SHASTA_EXT_LED_MAC: 15334 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 15335 break; 15336 15337 case SHASTA_EXT_LED_COMBO: 15338 tp->led_ctrl = LED_CTRL_MODE_COMBO; 15339 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) 15340 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 15341 LED_CTRL_MODE_PHY_2); 15342 break; 15343 15344 } 15345 15346 if ((tg3_asic_rev(tp) == ASIC_REV_5700 || 15347 tg3_asic_rev(tp) == ASIC_REV_5701) && 15348 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 15349 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 15350 15351 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) 15352 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 15353 15354 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 15355 tg3_flag_set(tp, EEPROM_WRITE_PROT); 15356 if ((tp->pdev->subsystem_vendor == 15357 PCI_VENDOR_ID_ARIMA) && 15358 (tp->pdev->subsystem_device == 0x205a || 15359 tp->pdev->subsystem_device == 0x2063)) 15360 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15361 } else { 15362 tg3_flag_clear(tp, EEPROM_WRITE_PROT); 15363 tg3_flag_set(tp, IS_NIC); 15364 } 15365 15366 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 15367 tg3_flag_set(tp, ENABLE_ASF); 15368 if (tg3_flag(tp, 5750_PLUS)) 15369 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 15370 } 15371 15372 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 15373 tg3_flag(tp, 5750_PLUS)) 15374 tg3_flag_set(tp, ENABLE_APE); 15375 15376 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 15377 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 15378 tg3_flag_clear(tp, WOL_CAP); 15379 15380 if (tg3_flag(tp, WOL_CAP) && 15381 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 15382 tg3_flag_set(tp, WOL_ENABLE); 15383 device_set_wakeup_enable(&tp->pdev->dev, true); 15384 } 15385 15386 if (cfg2 & (1 << 17)) 15387 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; 15388 15389 /* serdes signal pre-emphasis in register 0x590 set by */ 15390 /* bootcode if bit 18 is set */ 15391 if (cfg2 & (1 << 18)) 15392 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 15393 15394 if ((tg3_flag(tp, 57765_PLUS) || 15395 (tg3_asic_rev(tp) == ASIC_REV_5784 && 15396 tg3_chip_rev(tp) != CHIPREV_5784_AX)) && 15397 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 15398 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 15399 15400 if (tg3_flag(tp, PCI_EXPRESS)) { 15401 u32 cfg3; 15402 15403 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 15404 if (tg3_asic_rev(tp) != ASIC_REV_5785 && 15405 !tg3_flag(tp, 57765_PLUS) && 15406 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) 15407 tg3_flag_set(tp, ASPM_WORKAROUND); 15408 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) 15409 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; 15410 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) 15411 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; 15412 } 15413 15414 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 15415 tg3_flag_set(tp, RGMII_INBAND_DISABLE); 15416 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 15417 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); 15418 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 15419 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); 15420 15421 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) 15422 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; 15423 } 15424 done: 15425 if (tg3_flag(tp, WOL_CAP)) 15426 device_set_wakeup_enable(&tp->pdev->dev, 15427 tg3_flag(tp, WOL_ENABLE)); 15428 else 15429 device_set_wakeup_capable(&tp->pdev->dev, false); 15430 } 15431 15432 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) 15433 { 15434 int i, err; 15435 u32 val2, off = offset * 8; 15436 15437 err = tg3_nvram_lock(tp); 15438 if (err) 15439 return err; 15440 15441 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); 15442 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | 15443 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); 15444 tg3_ape_read32(tp, TG3_APE_OTP_CTRL); 15445 udelay(10); 15446 15447 for (i = 0; i < 100; i++) { 15448 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); 15449 if (val2 & APE_OTP_STATUS_CMD_DONE) { 15450 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); 15451 break; 15452 } 15453 udelay(10); 15454 } 15455 15456 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); 15457 15458 tg3_nvram_unlock(tp); 15459 if (val2 & APE_OTP_STATUS_CMD_DONE) 15460 return 0; 15461 15462 return -EBUSY; 15463 } 15464 15465 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 15466 { 15467 int i; 15468 u32 val; 15469 15470 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); 15471 tw32(OTP_CTRL, cmd); 15472 15473 /* Wait for up to 1 ms for command to execute. */ 15474 for (i = 0; i < 100; i++) { 15475 val = tr32(OTP_STATUS); 15476 if (val & OTP_STATUS_CMD_DONE) 15477 break; 15478 udelay(10); 15479 } 15480 15481 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; 15482 } 15483 15484 /* Read the gphy configuration from the OTP region of the chip. The gphy 15485 * configuration is a 32-bit value that straddles the alignment boundary. 15486 * We do two 32-bit reads and then shift and merge the results. 15487 */ 15488 static u32 tg3_read_otp_phycfg(struct tg3 *tp) 15489 { 15490 u32 bhalf_otp, thalf_otp; 15491 15492 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); 15493 15494 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) 15495 return 0; 15496 15497 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); 15498 15499 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15500 return 0; 15501 15502 thalf_otp = tr32(OTP_READ_DATA); 15503 15504 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); 15505 15506 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) 15507 return 0; 15508 15509 bhalf_otp = tr32(OTP_READ_DATA); 15510 15511 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 15512 } 15513 15514 static void tg3_phy_init_link_config(struct tg3 *tp) 15515 { 15516 u32 adv = ADVERTISED_Autoneg; 15517 15518 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { 15519 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) 15520 adv |= ADVERTISED_1000baseT_Half; 15521 adv |= ADVERTISED_1000baseT_Full; 15522 } 15523 15524 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 15525 adv |= ADVERTISED_100baseT_Half | 15526 ADVERTISED_100baseT_Full | 15527 ADVERTISED_10baseT_Half | 15528 ADVERTISED_10baseT_Full | 15529 ADVERTISED_TP; 15530 else 15531 adv |= ADVERTISED_FIBRE; 15532 15533 tp->link_config.advertising = adv; 15534 tp->link_config.speed = SPEED_UNKNOWN; 15535 tp->link_config.duplex = DUPLEX_UNKNOWN; 15536 tp->link_config.autoneg = AUTONEG_ENABLE; 15537 tp->link_config.active_speed = SPEED_UNKNOWN; 15538 tp->link_config.active_duplex = DUPLEX_UNKNOWN; 15539 15540 tp->old_link = -1; 15541 } 15542 15543 static int tg3_phy_probe(struct tg3 *tp) 15544 { 15545 u32 hw_phy_id_1, hw_phy_id_2; 15546 u32 hw_phy_id, hw_phy_id_masked; 15547 int err; 15548 15549 /* flow control autonegotiation is default behavior */ 15550 tg3_flag_set(tp, PAUSE_AUTONEG); 15551 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 15552 15553 if (tg3_flag(tp, ENABLE_APE)) { 15554 switch (tp->pci_fn) { 15555 case 0: 15556 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; 15557 break; 15558 case 1: 15559 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; 15560 break; 15561 case 2: 15562 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; 15563 break; 15564 case 3: 15565 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; 15566 break; 15567 } 15568 } 15569 15570 if (!tg3_flag(tp, ENABLE_ASF) && 15571 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15572 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) 15573 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 15574 TG3_PHYFLG_KEEP_LINK_ON_PWRDN); 15575 15576 if (tg3_flag(tp, USE_PHYLIB)) 15577 return tg3_phy_init(tp); 15578 15579 /* Reading the PHY ID register can conflict with ASF 15580 * firmware access to the PHY hardware. 15581 */ 15582 err = 0; 15583 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { 15584 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 15585 } else { 15586 /* Now read the physical PHY_ID from the chip and verify 15587 * that it is sane. If it doesn't look good, we fall back 15588 * to either the hard-coded table based PHY_ID and failing 15589 * that the value found in the eeprom area. 15590 */ 15591 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 15592 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 15593 15594 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 15595 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 15596 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 15597 15598 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; 15599 } 15600 15601 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { 15602 tp->phy_id = hw_phy_id; 15603 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) 15604 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15605 else 15606 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; 15607 } else { 15608 if (tp->phy_id != TG3_PHY_ID_INVALID) { 15609 /* Do nothing, phy ID already set up in 15610 * tg3_get_eeprom_hw_cfg(). 15611 */ 15612 } else { 15613 struct subsys_tbl_ent *p; 15614 15615 /* No eeprom signature? Try the hardcoded 15616 * subsys device table. 15617 */ 15618 p = tg3_lookup_by_subsys(tp); 15619 if (p) { 15620 tp->phy_id = p->phy_id; 15621 } else if (!tg3_flag(tp, IS_SSB_CORE)) { 15622 /* For now we saw the IDs 0xbc050cd0, 15623 * 0xbc050f80 and 0xbc050c30 on devices 15624 * connected to an BCM4785 and there are 15625 * probably more. Just assume that the phy is 15626 * supported when it is connected to a SSB core 15627 * for now. 15628 */ 15629 return -ENODEV; 15630 } 15631 15632 if (!tp->phy_id || 15633 tp->phy_id == TG3_PHY_ID_BCM8002) 15634 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 15635 } 15636 } 15637 15638 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15639 (tg3_asic_rev(tp) == ASIC_REV_5719 || 15640 tg3_asic_rev(tp) == ASIC_REV_5720 || 15641 tg3_asic_rev(tp) == ASIC_REV_57766 || 15642 tg3_asic_rev(tp) == ASIC_REV_5762 || 15643 (tg3_asic_rev(tp) == ASIC_REV_5717 && 15644 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 15645 (tg3_asic_rev(tp) == ASIC_REV_57765 && 15646 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { 15647 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 15648 15649 tp->eee.supported = SUPPORTED_100baseT_Full | 15650 SUPPORTED_1000baseT_Full; 15651 tp->eee.advertised = ADVERTISED_100baseT_Full | 15652 ADVERTISED_1000baseT_Full; 15653 tp->eee.eee_enabled = 1; 15654 tp->eee.tx_lpi_enabled = 1; 15655 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; 15656 } 15657 15658 tg3_phy_init_link_config(tp); 15659 15660 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && 15661 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 15662 !tg3_flag(tp, ENABLE_APE) && 15663 !tg3_flag(tp, ENABLE_ASF)) { 15664 u32 bmsr, dummy; 15665 15666 tg3_readphy(tp, MII_BMSR, &bmsr); 15667 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 15668 (bmsr & BMSR_LSTATUS)) 15669 goto skip_phy_reset; 15670 15671 err = tg3_phy_reset(tp); 15672 if (err) 15673 return err; 15674 15675 tg3_phy_set_wirespeed(tp); 15676 15677 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { 15678 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, 15679 tp->link_config.flowctrl); 15680 15681 tg3_writephy(tp, MII_BMCR, 15682 BMCR_ANENABLE | BMCR_ANRESTART); 15683 } 15684 } 15685 15686 skip_phy_reset: 15687 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 15688 err = tg3_init_5401phy_dsp(tp); 15689 if (err) 15690 return err; 15691 15692 err = tg3_init_5401phy_dsp(tp); 15693 } 15694 15695 return err; 15696 } 15697 15698 static void tg3_read_vpd(struct tg3 *tp) 15699 { 15700 u8 *vpd_data; 15701 unsigned int len, vpdlen; 15702 int i; 15703 15704 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); 15705 if (!vpd_data) 15706 goto out_no_vpd; 15707 15708 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15709 PCI_VPD_RO_KEYWORD_MFR_ID, &len); 15710 if (i < 0) 15711 goto partno; 15712 15713 if (len != 4 || memcmp(vpd_data + i, "1028", 4)) 15714 goto partno; 15715 15716 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15717 PCI_VPD_RO_KEYWORD_VENDOR0, &len); 15718 if (i < 0) 15719 goto partno; 15720 15721 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 15722 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i); 15723 15724 partno: 15725 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, 15726 PCI_VPD_RO_KEYWORD_PARTNO, &len); 15727 if (i < 0) 15728 goto out_not_found; 15729 15730 if (len > TG3_BPN_SIZE) 15731 goto out_not_found; 15732 15733 memcpy(tp->board_part_number, &vpd_data[i], len); 15734 15735 out_not_found: 15736 kfree(vpd_data); 15737 if (tp->board_part_number[0]) 15738 return; 15739 15740 out_no_vpd: 15741 if (tg3_asic_rev(tp) == ASIC_REV_5717) { 15742 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 15743 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) 15744 strcpy(tp->board_part_number, "BCM5717"); 15745 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 15746 strcpy(tp->board_part_number, "BCM5718"); 15747 else 15748 goto nomatch; 15749 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { 15750 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 15751 strcpy(tp->board_part_number, "BCM57780"); 15752 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) 15753 strcpy(tp->board_part_number, "BCM57760"); 15754 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) 15755 strcpy(tp->board_part_number, "BCM57790"); 15756 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) 15757 strcpy(tp->board_part_number, "BCM57788"); 15758 else 15759 goto nomatch; 15760 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { 15761 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) 15762 strcpy(tp->board_part_number, "BCM57761"); 15763 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) 15764 strcpy(tp->board_part_number, "BCM57765"); 15765 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) 15766 strcpy(tp->board_part_number, "BCM57781"); 15767 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) 15768 strcpy(tp->board_part_number, "BCM57785"); 15769 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) 15770 strcpy(tp->board_part_number, "BCM57791"); 15771 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) 15772 strcpy(tp->board_part_number, "BCM57795"); 15773 else 15774 goto nomatch; 15775 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { 15776 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) 15777 strcpy(tp->board_part_number, "BCM57762"); 15778 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) 15779 strcpy(tp->board_part_number, "BCM57766"); 15780 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) 15781 strcpy(tp->board_part_number, "BCM57782"); 15782 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 15783 strcpy(tp->board_part_number, "BCM57786"); 15784 else 15785 goto nomatch; 15786 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { 15787 strcpy(tp->board_part_number, "BCM95906"); 15788 } else { 15789 nomatch: 15790 strcpy(tp->board_part_number, "none"); 15791 } 15792 } 15793 15794 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 15795 { 15796 u32 val; 15797 15798 if (tg3_nvram_read(tp, offset, &val) || 15799 (val & 0xfc000000) != 0x0c000000 || 15800 tg3_nvram_read(tp, offset + 4, &val) || 15801 val != 0) 15802 return 0; 15803 15804 return 1; 15805 } 15806 15807 static void tg3_read_bc_ver(struct tg3 *tp) 15808 { 15809 u32 val, offset, start, ver_offset; 15810 int i, dst_off; 15811 bool newver = false; 15812 15813 if (tg3_nvram_read(tp, 0xc, &offset) || 15814 tg3_nvram_read(tp, 0x4, &start)) 15815 return; 15816 15817 offset = tg3_nvram_logical_addr(tp, offset); 15818 15819 if (tg3_nvram_read(tp, offset, &val)) 15820 return; 15821 15822 if ((val & 0xfc000000) == 0x0c000000) { 15823 if (tg3_nvram_read(tp, offset + 4, &val)) 15824 return; 15825 15826 if (val == 0) 15827 newver = true; 15828 } 15829 15830 dst_off = strlen(tp->fw_ver); 15831 15832 if (newver) { 15833 if (TG3_VER_SIZE - dst_off < 16 || 15834 tg3_nvram_read(tp, offset + 8, &ver_offset)) 15835 return; 15836 15837 offset = offset + ver_offset - start; 15838 for (i = 0; i < 16; i += 4) { 15839 __be32 v; 15840 if (tg3_nvram_read_be32(tp, offset + i, &v)) 15841 return; 15842 15843 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); 15844 } 15845 } else { 15846 u32 major, minor; 15847 15848 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) 15849 return; 15850 15851 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 15852 TG3_NVM_BCVER_MAJSFT; 15853 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 15854 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, 15855 "v%d.%02d", major, minor); 15856 } 15857 } 15858 15859 static void tg3_read_hwsb_ver(struct tg3 *tp) 15860 { 15861 u32 val, major, minor; 15862 15863 /* Use native endian representation */ 15864 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) 15865 return; 15866 15867 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> 15868 TG3_NVM_HWSB_CFG1_MAJSFT; 15869 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> 15870 TG3_NVM_HWSB_CFG1_MINSFT; 15871 15872 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 15873 } 15874 15875 static void tg3_read_sb_ver(struct tg3 *tp, u32 val) 15876 { 15877 u32 offset, major, minor, build; 15878 15879 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); 15880 15881 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 15882 return; 15883 15884 switch (val & TG3_EEPROM_SB_REVISION_MASK) { 15885 case TG3_EEPROM_SB_REVISION_0: 15886 offset = TG3_EEPROM_SB_F1R0_EDH_OFF; 15887 break; 15888 case TG3_EEPROM_SB_REVISION_2: 15889 offset = TG3_EEPROM_SB_F1R2_EDH_OFF; 15890 break; 15891 case TG3_EEPROM_SB_REVISION_3: 15892 offset = TG3_EEPROM_SB_F1R3_EDH_OFF; 15893 break; 15894 case TG3_EEPROM_SB_REVISION_4: 15895 offset = TG3_EEPROM_SB_F1R4_EDH_OFF; 15896 break; 15897 case TG3_EEPROM_SB_REVISION_5: 15898 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 15899 break; 15900 case TG3_EEPROM_SB_REVISION_6: 15901 offset = TG3_EEPROM_SB_F1R6_EDH_OFF; 15902 break; 15903 default: 15904 return; 15905 } 15906 15907 if (tg3_nvram_read(tp, offset, &val)) 15908 return; 15909 15910 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> 15911 TG3_EEPROM_SB_EDH_BLD_SHFT; 15912 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> 15913 TG3_EEPROM_SB_EDH_MAJ_SHFT; 15914 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; 15915 15916 if (minor > 99 || build > 26) 15917 return; 15918 15919 offset = strlen(tp->fw_ver); 15920 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, 15921 " v%d.%02d", major, minor); 15922 15923 if (build > 0) { 15924 offset = strlen(tp->fw_ver); 15925 if (offset < TG3_VER_SIZE - 1) 15926 tp->fw_ver[offset] = 'a' + build - 1; 15927 } 15928 } 15929 15930 static void tg3_read_mgmtfw_ver(struct tg3 *tp) 15931 { 15932 u32 val, offset, start; 15933 int i, vlen; 15934 15935 for (offset = TG3_NVM_DIR_START; 15936 offset < TG3_NVM_DIR_END; 15937 offset += TG3_NVM_DIRENT_SIZE) { 15938 if (tg3_nvram_read(tp, offset, &val)) 15939 return; 15940 15941 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) 15942 break; 15943 } 15944 15945 if (offset == TG3_NVM_DIR_END) 15946 return; 15947 15948 if (!tg3_flag(tp, 5705_PLUS)) 15949 start = 0x08000000; 15950 else if (tg3_nvram_read(tp, offset - 4, &start)) 15951 return; 15952 15953 if (tg3_nvram_read(tp, offset + 4, &offset) || 15954 !tg3_fw_img_is_valid(tp, offset) || 15955 tg3_nvram_read(tp, offset + 8, &val)) 15956 return; 15957 15958 offset += val - start; 15959 15960 vlen = strlen(tp->fw_ver); 15961 15962 tp->fw_ver[vlen++] = ','; 15963 tp->fw_ver[vlen++] = ' '; 15964 15965 for (i = 0; i < 4; i++) { 15966 __be32 v; 15967 if (tg3_nvram_read_be32(tp, offset, &v)) 15968 return; 15969 15970 offset += sizeof(v); 15971 15972 if (vlen > TG3_VER_SIZE - sizeof(v)) { 15973 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); 15974 break; 15975 } 15976 15977 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); 15978 vlen += sizeof(v); 15979 } 15980 } 15981 15982 static void tg3_probe_ncsi(struct tg3 *tp) 15983 { 15984 u32 apedata; 15985 15986 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 15987 if (apedata != APE_SEG_SIG_MAGIC) 15988 return; 15989 15990 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 15991 if (!(apedata & APE_FW_STATUS_READY)) 15992 return; 15993 15994 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 15995 tg3_flag_set(tp, APE_HAS_NCSI); 15996 } 15997 15998 static void tg3_read_dash_ver(struct tg3 *tp) 15999 { 16000 int vlen; 16001 u32 apedata; 16002 char *fwtype; 16003 16004 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 16005 16006 if (tg3_flag(tp, APE_HAS_NCSI)) 16007 fwtype = "NCSI"; 16008 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) 16009 fwtype = "SMASH"; 16010 else 16011 fwtype = "DASH"; 16012 16013 vlen = strlen(tp->fw_ver); 16014 16015 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", 16016 fwtype, 16017 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 16018 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 16019 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 16020 (apedata & APE_FW_VERSION_BLDMSK)); 16021 } 16022 16023 static void tg3_read_otp_ver(struct tg3 *tp) 16024 { 16025 u32 val, val2; 16026 16027 if (tg3_asic_rev(tp) != ASIC_REV_5762) 16028 return; 16029 16030 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && 16031 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && 16032 TG3_OTP_MAGIC0_VALID(val)) { 16033 u64 val64 = (u64) val << 32 | val2; 16034 u32 ver = 0; 16035 int i, vlen; 16036 16037 for (i = 0; i < 7; i++) { 16038 if ((val64 & 0xff) == 0) 16039 break; 16040 ver = val64 & 0xff; 16041 val64 >>= 8; 16042 } 16043 vlen = strlen(tp->fw_ver); 16044 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); 16045 } 16046 } 16047 16048 static void tg3_read_fw_ver(struct tg3 *tp) 16049 { 16050 u32 val; 16051 bool vpd_vers = false; 16052 16053 if (tp->fw_ver[0] != 0) 16054 vpd_vers = true; 16055 16056 if (tg3_flag(tp, NO_NVRAM)) { 16057 strcat(tp->fw_ver, "sb"); 16058 tg3_read_otp_ver(tp); 16059 return; 16060 } 16061 16062 if (tg3_nvram_read(tp, 0, &val)) 16063 return; 16064 16065 if (val == TG3_EEPROM_MAGIC) 16066 tg3_read_bc_ver(tp); 16067 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) 16068 tg3_read_sb_ver(tp, val); 16069 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 16070 tg3_read_hwsb_ver(tp); 16071 16072 if (tg3_flag(tp, ENABLE_ASF)) { 16073 if (tg3_flag(tp, ENABLE_APE)) { 16074 tg3_probe_ncsi(tp); 16075 if (!vpd_vers) 16076 tg3_read_dash_ver(tp); 16077 } else if (!vpd_vers) { 16078 tg3_read_mgmtfw_ver(tp); 16079 } 16080 } 16081 16082 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 16083 } 16084 16085 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 16086 { 16087 if (tg3_flag(tp, LRG_PROD_RING_CAP)) 16088 return TG3_RX_RET_MAX_SIZE_5717; 16089 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) 16090 return TG3_RX_RET_MAX_SIZE_5700; 16091 else 16092 return TG3_RX_RET_MAX_SIZE_5705; 16093 } 16094 16095 static const struct pci_device_id tg3_write_reorder_chipsets[] = { 16096 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 16097 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 16098 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, 16099 { }, 16100 }; 16101 16102 static struct pci_dev *tg3_find_peer(struct tg3 *tp) 16103 { 16104 struct pci_dev *peer; 16105 unsigned int func, devnr = tp->pdev->devfn & ~7; 16106 16107 for (func = 0; func < 8; func++) { 16108 peer = pci_get_slot(tp->pdev->bus, devnr | func); 16109 if (peer && peer != tp->pdev) 16110 break; 16111 pci_dev_put(peer); 16112 } 16113 /* 5704 can be configured in single-port mode, set peer to 16114 * tp->pdev in that case. 16115 */ 16116 if (!peer) { 16117 peer = tp->pdev; 16118 return peer; 16119 } 16120 16121 /* 16122 * We don't need to keep the refcount elevated; there's no way 16123 * to remove one half of this device without removing the other 16124 */ 16125 pci_dev_put(peer); 16126 16127 return peer; 16128 } 16129 16130 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 16131 { 16132 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 16133 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { 16134 u32 reg; 16135 16136 /* All devices that use the alternate 16137 * ASIC REV location have a CPMU. 16138 */ 16139 tg3_flag_set(tp, CPMU_PRESENT); 16140 16141 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16142 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 16143 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16144 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16145 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 16146 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 16147 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 16148 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 16149 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 16150 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 16151 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) 16152 reg = TG3PCI_GEN2_PRODID_ASICREV; 16153 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || 16154 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || 16155 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || 16156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || 16157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || 16158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || 16159 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || 16160 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || 16161 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || 16162 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) 16163 reg = TG3PCI_GEN15_PRODID_ASICREV; 16164 else 16165 reg = TG3PCI_PRODID_ASICREV; 16166 16167 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); 16168 } 16169 16170 /* Wrong chip ID in 5752 A0. This code can be removed later 16171 * as A0 is not in production. 16172 */ 16173 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) 16174 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 16175 16176 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) 16177 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; 16178 16179 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16180 tg3_asic_rev(tp) == ASIC_REV_5719 || 16181 tg3_asic_rev(tp) == ASIC_REV_5720) 16182 tg3_flag_set(tp, 5717_PLUS); 16183 16184 if (tg3_asic_rev(tp) == ASIC_REV_57765 || 16185 tg3_asic_rev(tp) == ASIC_REV_57766) 16186 tg3_flag_set(tp, 57765_CLASS); 16187 16188 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || 16189 tg3_asic_rev(tp) == ASIC_REV_5762) 16190 tg3_flag_set(tp, 57765_PLUS); 16191 16192 /* Intentionally exclude ASIC_REV_5906 */ 16193 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16194 tg3_asic_rev(tp) == ASIC_REV_5787 || 16195 tg3_asic_rev(tp) == ASIC_REV_5784 || 16196 tg3_asic_rev(tp) == ASIC_REV_5761 || 16197 tg3_asic_rev(tp) == ASIC_REV_5785 || 16198 tg3_asic_rev(tp) == ASIC_REV_57780 || 16199 tg3_flag(tp, 57765_PLUS)) 16200 tg3_flag_set(tp, 5755_PLUS); 16201 16202 if (tg3_asic_rev(tp) == ASIC_REV_5780 || 16203 tg3_asic_rev(tp) == ASIC_REV_5714) 16204 tg3_flag_set(tp, 5780_CLASS); 16205 16206 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 16207 tg3_asic_rev(tp) == ASIC_REV_5752 || 16208 tg3_asic_rev(tp) == ASIC_REV_5906 || 16209 tg3_flag(tp, 5755_PLUS) || 16210 tg3_flag(tp, 5780_CLASS)) 16211 tg3_flag_set(tp, 5750_PLUS); 16212 16213 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 16214 tg3_flag(tp, 5750_PLUS)) 16215 tg3_flag_set(tp, 5705_PLUS); 16216 } 16217 16218 static bool tg3_10_100_only_device(struct tg3 *tp, 16219 const struct pci_device_id *ent) 16220 { 16221 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; 16222 16223 if ((tg3_asic_rev(tp) == ASIC_REV_5703 && 16224 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 16225 (tp->phy_flags & TG3_PHYFLG_IS_FET)) 16226 return true; 16227 16228 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { 16229 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 16230 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) 16231 return true; 16232 } else { 16233 return true; 16234 } 16235 } 16236 16237 return false; 16238 } 16239 16240 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) 16241 { 16242 u32 misc_ctrl_reg; 16243 u32 pci_state_reg, grc_misc_cfg; 16244 u32 val; 16245 u16 pci_cmd; 16246 int err; 16247 16248 /* Force memory write invalidate off. If we leave it on, 16249 * then on 5700_BX chips we have to enable a workaround. 16250 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 16251 * to match the cacheline size. The Broadcom driver have this 16252 * workaround but turns MWI off all the times so never uses 16253 * it. This seems to suggest that the workaround is insufficient. 16254 */ 16255 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16256 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 16257 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16258 16259 /* Important! -- Make sure register accesses are byteswapped 16260 * correctly. Also, for those chips that require it, make 16261 * sure that indirect register accesses are enabled before 16262 * the first operation. 16263 */ 16264 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16265 &misc_ctrl_reg); 16266 tp->misc_host_ctrl |= (misc_ctrl_reg & 16267 MISC_HOST_CTRL_CHIPREV); 16268 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16269 tp->misc_host_ctrl); 16270 16271 tg3_detect_asic_rev(tp, misc_ctrl_reg); 16272 16273 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 16274 * we need to disable memory and use config. cycles 16275 * only to access all registers. The 5702/03 chips 16276 * can mistakenly decode the special cycles from the 16277 * ICH chipsets as memory write cycles, causing corruption 16278 * of register and memory space. Only certain ICH bridges 16279 * will drive special cycles with non-zero data during the 16280 * address phase which can fall within the 5703's address 16281 * range. This is not an ICH bug as the PCI spec allows 16282 * non-zero address during special cycles. However, only 16283 * these ICH bridges are known to drive non-zero addresses 16284 * during special cycles. 16285 * 16286 * Since special cycles do not cross PCI bridges, we only 16287 * enable this workaround if the 5703 is on the secondary 16288 * bus of these ICH bridges. 16289 */ 16290 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || 16291 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { 16292 static struct tg3_dev_id { 16293 u32 vendor; 16294 u32 device; 16295 u32 rev; 16296 } ich_chipsets[] = { 16297 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 16298 PCI_ANY_ID }, 16299 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 16300 PCI_ANY_ID }, 16301 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 16302 0xa }, 16303 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 16304 PCI_ANY_ID }, 16305 { }, 16306 }; 16307 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 16308 struct pci_dev *bridge = NULL; 16309 16310 while (pci_id->vendor != 0) { 16311 bridge = pci_get_device(pci_id->vendor, pci_id->device, 16312 bridge); 16313 if (!bridge) { 16314 pci_id++; 16315 continue; 16316 } 16317 if (pci_id->rev != PCI_ANY_ID) { 16318 if (bridge->revision > pci_id->rev) 16319 continue; 16320 } 16321 if (bridge->subordinate && 16322 (bridge->subordinate->number == 16323 tp->pdev->bus->number)) { 16324 tg3_flag_set(tp, ICH_WORKAROUND); 16325 pci_dev_put(bridge); 16326 break; 16327 } 16328 } 16329 } 16330 16331 if (tg3_asic_rev(tp) == ASIC_REV_5701) { 16332 static struct tg3_dev_id { 16333 u32 vendor; 16334 u32 device; 16335 } bridge_chipsets[] = { 16336 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, 16337 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, 16338 { }, 16339 }; 16340 struct tg3_dev_id *pci_id = &bridge_chipsets[0]; 16341 struct pci_dev *bridge = NULL; 16342 16343 while (pci_id->vendor != 0) { 16344 bridge = pci_get_device(pci_id->vendor, 16345 pci_id->device, 16346 bridge); 16347 if (!bridge) { 16348 pci_id++; 16349 continue; 16350 } 16351 if (bridge->subordinate && 16352 (bridge->subordinate->number <= 16353 tp->pdev->bus->number) && 16354 (bridge->subordinate->busn_res.end >= 16355 tp->pdev->bus->number)) { 16356 tg3_flag_set(tp, 5701_DMA_BUG); 16357 pci_dev_put(bridge); 16358 break; 16359 } 16360 } 16361 } 16362 16363 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 16364 * DMA addresses > 40-bit. This bridge may have other additional 16365 * 57xx devices behind it in some 4-port NIC designs for example. 16366 * Any tg3 device found behind the bridge will also need the 40-bit 16367 * DMA workaround. 16368 */ 16369 if (tg3_flag(tp, 5780_CLASS)) { 16370 tg3_flag_set(tp, 40BIT_DMA_BUG); 16371 tp->msi_cap = tp->pdev->msi_cap; 16372 } else { 16373 struct pci_dev *bridge = NULL; 16374 16375 do { 16376 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 16377 PCI_DEVICE_ID_SERVERWORKS_EPB, 16378 bridge); 16379 if (bridge && bridge->subordinate && 16380 (bridge->subordinate->number <= 16381 tp->pdev->bus->number) && 16382 (bridge->subordinate->busn_res.end >= 16383 tp->pdev->bus->number)) { 16384 tg3_flag_set(tp, 40BIT_DMA_BUG); 16385 pci_dev_put(bridge); 16386 break; 16387 } 16388 } while (bridge); 16389 } 16390 16391 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16392 tg3_asic_rev(tp) == ASIC_REV_5714) 16393 tp->pdev_peer = tg3_find_peer(tp); 16394 16395 /* Determine TSO capabilities */ 16396 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) 16397 ; /* Do nothing. HW bug. */ 16398 else if (tg3_flag(tp, 57765_PLUS)) 16399 tg3_flag_set(tp, HW_TSO_3); 16400 else if (tg3_flag(tp, 5755_PLUS) || 16401 tg3_asic_rev(tp) == ASIC_REV_5906) 16402 tg3_flag_set(tp, HW_TSO_2); 16403 else if (tg3_flag(tp, 5750_PLUS)) { 16404 tg3_flag_set(tp, HW_TSO_1); 16405 tg3_flag_set(tp, TSO_BUG); 16406 if (tg3_asic_rev(tp) == ASIC_REV_5750 && 16407 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) 16408 tg3_flag_clear(tp, TSO_BUG); 16409 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 16410 tg3_asic_rev(tp) != ASIC_REV_5701 && 16411 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 16412 tg3_flag_set(tp, FW_TSO); 16413 tg3_flag_set(tp, TSO_BUG); 16414 if (tg3_asic_rev(tp) == ASIC_REV_5705) 16415 tp->fw_needed = FIRMWARE_TG3TSO5; 16416 else 16417 tp->fw_needed = FIRMWARE_TG3TSO; 16418 } 16419 16420 /* Selectively allow TSO based on operating conditions */ 16421 if (tg3_flag(tp, HW_TSO_1) || 16422 tg3_flag(tp, HW_TSO_2) || 16423 tg3_flag(tp, HW_TSO_3) || 16424 tg3_flag(tp, FW_TSO)) { 16425 /* For firmware TSO, assume ASF is disabled. 16426 * We'll disable TSO later if we discover ASF 16427 * is enabled in tg3_get_eeprom_hw_cfg(). 16428 */ 16429 tg3_flag_set(tp, TSO_CAPABLE); 16430 } else { 16431 tg3_flag_clear(tp, TSO_CAPABLE); 16432 tg3_flag_clear(tp, TSO_BUG); 16433 tp->fw_needed = NULL; 16434 } 16435 16436 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 16437 tp->fw_needed = FIRMWARE_TG3; 16438 16439 if (tg3_asic_rev(tp) == ASIC_REV_57766) 16440 tp->fw_needed = FIRMWARE_TG357766; 16441 16442 tp->irq_max = 1; 16443 16444 if (tg3_flag(tp, 5750_PLUS)) { 16445 tg3_flag_set(tp, SUPPORT_MSI); 16446 if (tg3_chip_rev(tp) == CHIPREV_5750_AX || 16447 tg3_chip_rev(tp) == CHIPREV_5750_BX || 16448 (tg3_asic_rev(tp) == ASIC_REV_5714 && 16449 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && 16450 tp->pdev_peer == tp->pdev)) 16451 tg3_flag_clear(tp, SUPPORT_MSI); 16452 16453 if (tg3_flag(tp, 5755_PLUS) || 16454 tg3_asic_rev(tp) == ASIC_REV_5906) { 16455 tg3_flag_set(tp, 1SHOT_MSI); 16456 } 16457 16458 if (tg3_flag(tp, 57765_PLUS)) { 16459 tg3_flag_set(tp, SUPPORT_MSIX); 16460 tp->irq_max = TG3_IRQ_MAX_VECS; 16461 } 16462 } 16463 16464 tp->txq_max = 1; 16465 tp->rxq_max = 1; 16466 if (tp->irq_max > 1) { 16467 tp->rxq_max = TG3_RSS_MAX_NUM_QS; 16468 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); 16469 16470 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 16471 tg3_asic_rev(tp) == ASIC_REV_5720) 16472 tp->txq_max = tp->irq_max - 1; 16473 } 16474 16475 if (tg3_flag(tp, 5755_PLUS) || 16476 tg3_asic_rev(tp) == ASIC_REV_5906) 16477 tg3_flag_set(tp, SHORT_DMA_BUG); 16478 16479 if (tg3_asic_rev(tp) == ASIC_REV_5719) 16480 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; 16481 16482 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16483 tg3_asic_rev(tp) == ASIC_REV_5719 || 16484 tg3_asic_rev(tp) == ASIC_REV_5720 || 16485 tg3_asic_rev(tp) == ASIC_REV_5762) 16486 tg3_flag_set(tp, LRG_PROD_RING_CAP); 16487 16488 if (tg3_flag(tp, 57765_PLUS) && 16489 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) 16490 tg3_flag_set(tp, USE_JUMBO_BDFLAG); 16491 16492 if (!tg3_flag(tp, 5705_PLUS) || 16493 tg3_flag(tp, 5780_CLASS) || 16494 tg3_flag(tp, USE_JUMBO_BDFLAG)) 16495 tg3_flag_set(tp, JUMBO_CAPABLE); 16496 16497 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16498 &pci_state_reg); 16499 16500 if (pci_is_pcie(tp->pdev)) { 16501 u16 lnkctl; 16502 16503 tg3_flag_set(tp, PCI_EXPRESS); 16504 16505 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 16506 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 16507 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16508 tg3_flag_clear(tp, HW_TSO_2); 16509 tg3_flag_clear(tp, TSO_CAPABLE); 16510 } 16511 if (tg3_asic_rev(tp) == ASIC_REV_5784 || 16512 tg3_asic_rev(tp) == ASIC_REV_5761 || 16513 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || 16514 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) 16515 tg3_flag_set(tp, CLKREQ_BUG); 16516 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { 16517 tg3_flag_set(tp, L1PLLPD_EN); 16518 } 16519 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { 16520 /* BCM5785 devices are effectively PCIe devices, and should 16521 * follow PCIe codepaths, but do not have a PCIe capabilities 16522 * section. 16523 */ 16524 tg3_flag_set(tp, PCI_EXPRESS); 16525 } else if (!tg3_flag(tp, 5705_PLUS) || 16526 tg3_flag(tp, 5780_CLASS)) { 16527 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 16528 if (!tp->pcix_cap) { 16529 dev_err(&tp->pdev->dev, 16530 "Cannot find PCI-X capability, aborting\n"); 16531 return -EIO; 16532 } 16533 16534 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 16535 tg3_flag_set(tp, PCIX_MODE); 16536 } 16537 16538 /* If we have an AMD 762 or VIA K8T800 chipset, write 16539 * reordering to the mailbox registers done by the host 16540 * controller can cause major troubles. We read back from 16541 * every mailbox register write to force the writes to be 16542 * posted to the chip in order. 16543 */ 16544 if (pci_dev_present(tg3_write_reorder_chipsets) && 16545 !tg3_flag(tp, PCI_EXPRESS)) 16546 tg3_flag_set(tp, MBOX_WRITE_REORDER); 16547 16548 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 16549 &tp->pci_cacheline_sz); 16550 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16551 &tp->pci_lat_timer); 16552 if (tg3_asic_rev(tp) == ASIC_REV_5703 && 16553 tp->pci_lat_timer < 64) { 16554 tp->pci_lat_timer = 64; 16555 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 16556 tp->pci_lat_timer); 16557 } 16558 16559 /* Important! -- It is critical that the PCI-X hw workaround 16560 * situation is decided before the first MMIO register access. 16561 */ 16562 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { 16563 /* 5700 BX chips need to have their TX producer index 16564 * mailboxes written twice to workaround a bug. 16565 */ 16566 tg3_flag_set(tp, TXD_MBOX_HWBUG); 16567 16568 /* If we are in PCI-X mode, enable register write workaround. 16569 * 16570 * The workaround is to use indirect register accesses 16571 * for all chip writes not to mailbox registers. 16572 */ 16573 if (tg3_flag(tp, PCIX_MODE)) { 16574 u32 pm_reg; 16575 16576 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16577 16578 /* The chip can have it's power management PCI config 16579 * space registers clobbered due to this bug. 16580 * So explicitly force the chip into D0 here. 16581 */ 16582 pci_read_config_dword(tp->pdev, 16583 tp->pdev->pm_cap + PCI_PM_CTRL, 16584 &pm_reg); 16585 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16586 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16587 pci_write_config_dword(tp->pdev, 16588 tp->pdev->pm_cap + PCI_PM_CTRL, 16589 pm_reg); 16590 16591 /* Also, force SERR#/PERR# in PCI command. */ 16592 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16593 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 16594 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16595 } 16596 } 16597 16598 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 16599 tg3_flag_set(tp, PCI_HIGH_SPEED); 16600 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 16601 tg3_flag_set(tp, PCI_32BIT); 16602 16603 /* Chip-specific fixup from Broadcom driver */ 16604 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && 16605 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 16606 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 16607 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 16608 } 16609 16610 /* Default fast path register access methods */ 16611 tp->read32 = tg3_read32; 16612 tp->write32 = tg3_write32; 16613 tp->read32_mbox = tg3_read32; 16614 tp->write32_mbox = tg3_write32; 16615 tp->write32_tx_mbox = tg3_write32; 16616 tp->write32_rx_mbox = tg3_write32; 16617 16618 /* Various workaround register access methods */ 16619 if (tg3_flag(tp, PCIX_TARGET_HWBUG)) 16620 tp->write32 = tg3_write_indirect_reg32; 16621 else if (tg3_asic_rev(tp) == ASIC_REV_5701 || 16622 (tg3_flag(tp, PCI_EXPRESS) && 16623 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { 16624 /* 16625 * Back to back register writes can cause problems on these 16626 * chips, the workaround is to read back all reg writes 16627 * except those to mailbox regs. 16628 * 16629 * See tg3_write_indirect_reg32(). 16630 */ 16631 tp->write32 = tg3_write_flush_reg32; 16632 } 16633 16634 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { 16635 tp->write32_tx_mbox = tg3_write32_tx_mbox; 16636 if (tg3_flag(tp, MBOX_WRITE_REORDER)) 16637 tp->write32_rx_mbox = tg3_write_flush_reg32; 16638 } 16639 16640 if (tg3_flag(tp, ICH_WORKAROUND)) { 16641 tp->read32 = tg3_read_indirect_reg32; 16642 tp->write32 = tg3_write_indirect_reg32; 16643 tp->read32_mbox = tg3_read_indirect_mbox; 16644 tp->write32_mbox = tg3_write_indirect_mbox; 16645 tp->write32_tx_mbox = tg3_write_indirect_mbox; 16646 tp->write32_rx_mbox = tg3_write_indirect_mbox; 16647 16648 iounmap(tp->regs); 16649 tp->regs = NULL; 16650 16651 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 16652 pci_cmd &= ~PCI_COMMAND_MEMORY; 16653 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 16654 } 16655 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 16656 tp->read32_mbox = tg3_read32_mbox_5906; 16657 tp->write32_mbox = tg3_write32_mbox_5906; 16658 tp->write32_tx_mbox = tg3_write32_mbox_5906; 16659 tp->write32_rx_mbox = tg3_write32_mbox_5906; 16660 } 16661 16662 if (tp->write32 == tg3_write_indirect_reg32 || 16663 (tg3_flag(tp, PCIX_MODE) && 16664 (tg3_asic_rev(tp) == ASIC_REV_5700 || 16665 tg3_asic_rev(tp) == ASIC_REV_5701))) 16666 tg3_flag_set(tp, SRAM_USE_CONFIG); 16667 16668 /* The memory arbiter has to be enabled in order for SRAM accesses 16669 * to succeed. Normally on powerup the tg3 chip firmware will make 16670 * sure it is enabled, but other entities such as system netboot 16671 * code might disable it. 16672 */ 16673 val = tr32(MEMARB_MODE); 16674 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 16675 16676 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 16677 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 16678 tg3_flag(tp, 5780_CLASS)) { 16679 if (tg3_flag(tp, PCIX_MODE)) { 16680 pci_read_config_dword(tp->pdev, 16681 tp->pcix_cap + PCI_X_STATUS, 16682 &val); 16683 tp->pci_fn = val & 0x7; 16684 } 16685 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16686 tg3_asic_rev(tp) == ASIC_REV_5719 || 16687 tg3_asic_rev(tp) == ASIC_REV_5720) { 16688 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 16689 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) 16690 val = tr32(TG3_CPMU_STATUS); 16691 16692 if (tg3_asic_rev(tp) == ASIC_REV_5717) 16693 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; 16694 else 16695 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 16696 TG3_CPMU_STATUS_FSHFT_5719; 16697 } 16698 16699 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { 16700 tp->write32_tx_mbox = tg3_write_flush_reg32; 16701 tp->write32_rx_mbox = tg3_write_flush_reg32; 16702 } 16703 16704 /* Get eeprom hw config before calling tg3_set_power_state(). 16705 * In particular, the TG3_FLAG_IS_NIC flag must be 16706 * determined before calling tg3_set_power_state() so that 16707 * we know whether or not to switch out of Vaux power. 16708 * When the flag is set, it means that GPIO1 is used for eeprom 16709 * write protect and also implies that it is a LOM where GPIOs 16710 * are not used to switch power. 16711 */ 16712 tg3_get_eeprom_hw_cfg(tp); 16713 16714 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { 16715 tg3_flag_clear(tp, TSO_CAPABLE); 16716 tg3_flag_clear(tp, TSO_BUG); 16717 tp->fw_needed = NULL; 16718 } 16719 16720 if (tg3_flag(tp, ENABLE_APE)) { 16721 /* Allow reads and writes to the 16722 * APE register and memory space. 16723 */ 16724 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 16725 PCISTATE_ALLOW_APE_SHMEM_WR | 16726 PCISTATE_ALLOW_APE_PSPACE_WR; 16727 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 16728 pci_state_reg); 16729 16730 tg3_ape_lock_init(tp); 16731 tp->ape_hb_interval = 16732 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); 16733 } 16734 16735 /* Set up tp->grc_local_ctrl before calling 16736 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high 16737 * will bring 5700's external PHY out of reset. 16738 * It is also used as eeprom write protect on LOMs. 16739 */ 16740 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 16741 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16742 tg3_flag(tp, EEPROM_WRITE_PROT)) 16743 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 16744 GRC_LCLCTRL_GPIO_OUTPUT1); 16745 /* Unused GPIO3 must be driven as output on 5752 because there 16746 * are no pull-up resistors on unused GPIO pins. 16747 */ 16748 else if (tg3_asic_rev(tp) == ASIC_REV_5752) 16749 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 16750 16751 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16752 tg3_asic_rev(tp) == ASIC_REV_57780 || 16753 tg3_flag(tp, 57765_CLASS)) 16754 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16755 16756 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 16757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 16758 /* Turn off the debug UART. */ 16759 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 16760 if (tg3_flag(tp, IS_NIC)) 16761 /* Keep VMain power. */ 16762 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 16763 GRC_LCLCTRL_GPIO_OUTPUT0; 16764 } 16765 16766 if (tg3_asic_rev(tp) == ASIC_REV_5762) 16767 tp->grc_local_ctrl |= 16768 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; 16769 16770 /* Switch out of Vaux if it is a NIC */ 16771 tg3_pwrsrc_switch_to_vmain(tp); 16772 16773 /* Derive initial jumbo mode from MTU assigned in 16774 * ether_setup() via the alloc_etherdev() call 16775 */ 16776 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) 16777 tg3_flag_set(tp, JUMBO_RING_ENABLE); 16778 16779 /* Determine WakeOnLan speed to use. */ 16780 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16781 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16782 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16783 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { 16784 tg3_flag_clear(tp, WOL_SPEED_100MB); 16785 } else { 16786 tg3_flag_set(tp, WOL_SPEED_100MB); 16787 } 16788 16789 if (tg3_asic_rev(tp) == ASIC_REV_5906) 16790 tp->phy_flags |= TG3_PHYFLG_IS_FET; 16791 16792 /* A few boards don't want Ethernet@WireSpeed phy feature */ 16793 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 16794 (tg3_asic_rev(tp) == ASIC_REV_5705 && 16795 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && 16796 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || 16797 (tp->phy_flags & TG3_PHYFLG_IS_FET) || 16798 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 16799 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; 16800 16801 if (tg3_chip_rev(tp) == CHIPREV_5703_AX || 16802 tg3_chip_rev(tp) == CHIPREV_5704_AX) 16803 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; 16804 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) 16805 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 16806 16807 if (tg3_flag(tp, 5705_PLUS) && 16808 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 16809 tg3_asic_rev(tp) != ASIC_REV_5785 && 16810 tg3_asic_rev(tp) != ASIC_REV_57780 && 16811 !tg3_flag(tp, 57765_PLUS)) { 16812 if (tg3_asic_rev(tp) == ASIC_REV_5755 || 16813 tg3_asic_rev(tp) == ASIC_REV_5787 || 16814 tg3_asic_rev(tp) == ASIC_REV_5784 || 16815 tg3_asic_rev(tp) == ASIC_REV_5761) { 16816 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && 16817 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) 16818 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; 16819 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 16820 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; 16821 } else 16822 tp->phy_flags |= TG3_PHYFLG_BER_BUG; 16823 } 16824 16825 if (tg3_asic_rev(tp) == ASIC_REV_5784 && 16826 tg3_chip_rev(tp) != CHIPREV_5784_AX) { 16827 tp->phy_otp = tg3_read_otp_phycfg(tp); 16828 if (tp->phy_otp == 0) 16829 tp->phy_otp = TG3_OTP_DEFAULT; 16830 } 16831 16832 if (tg3_flag(tp, CPMU_PRESENT)) 16833 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 16834 else 16835 tp->mi_mode = MAC_MI_MODE_BASE; 16836 16837 tp->coalesce_mode = 0; 16838 if (tg3_chip_rev(tp) != CHIPREV_5700_AX && 16839 tg3_chip_rev(tp) != CHIPREV_5700_BX) 16840 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 16841 16842 /* Set these bits to enable statistics workaround. */ 16843 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 16844 tg3_asic_rev(tp) == ASIC_REV_5762 || 16845 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || 16846 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { 16847 tp->coalesce_mode |= HOSTCC_MODE_ATTN; 16848 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; 16849 } 16850 16851 if (tg3_asic_rev(tp) == ASIC_REV_5785 || 16852 tg3_asic_rev(tp) == ASIC_REV_57780) 16853 tg3_flag_set(tp, USE_PHYLIB); 16854 16855 err = tg3_mdio_init(tp); 16856 if (err) 16857 return err; 16858 16859 /* Initialize data/descriptor byte/word swapping. */ 16860 val = tr32(GRC_MODE); 16861 if (tg3_asic_rev(tp) == ASIC_REV_5720 || 16862 tg3_asic_rev(tp) == ASIC_REV_5762) 16863 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | 16864 GRC_MODE_WORD_SWAP_B2HRX_DATA | 16865 GRC_MODE_B2HRX_ENABLE | 16866 GRC_MODE_HTX2B_ENABLE | 16867 GRC_MODE_HOST_STACKUP); 16868 else 16869 val &= GRC_MODE_HOST_STACKUP; 16870 16871 tw32(GRC_MODE, val | tp->grc_mode); 16872 16873 tg3_switch_clocks(tp); 16874 16875 /* Clear this out for sanity. */ 16876 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 16877 16878 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ 16879 tw32(TG3PCI_REG_BASE_ADDR, 0); 16880 16881 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 16882 &pci_state_reg); 16883 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 16884 !tg3_flag(tp, PCIX_TARGET_HWBUG)) { 16885 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || 16886 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || 16887 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || 16888 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { 16889 void __iomem *sram_base; 16890 16891 /* Write some dummy words into the SRAM status block 16892 * area, see if it reads back correctly. If the return 16893 * value is bad, force enable the PCIX workaround. 16894 */ 16895 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 16896 16897 writel(0x00000000, sram_base); 16898 writel(0x00000000, sram_base + 4); 16899 writel(0xffffffff, sram_base + 4); 16900 if (readl(sram_base) != 0x00000000) 16901 tg3_flag_set(tp, PCIX_TARGET_HWBUG); 16902 } 16903 } 16904 16905 udelay(50); 16906 tg3_nvram_init(tp); 16907 16908 /* If the device has an NVRAM, no need to load patch firmware */ 16909 if (tg3_asic_rev(tp) == ASIC_REV_57766 && 16910 !tg3_flag(tp, NO_NVRAM)) 16911 tp->fw_needed = NULL; 16912 16913 grc_misc_cfg = tr32(GRC_MISC_CFG); 16914 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16915 16916 if (tg3_asic_rev(tp) == ASIC_REV_5705 && 16917 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 16918 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 16919 tg3_flag_set(tp, IS_5788); 16920 16921 if (!tg3_flag(tp, IS_5788) && 16922 tg3_asic_rev(tp) != ASIC_REV_5700) 16923 tg3_flag_set(tp, TAGGED_STATUS); 16924 if (tg3_flag(tp, TAGGED_STATUS)) { 16925 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 16926 HOSTCC_MODE_CLRTICK_TXBD); 16927 16928 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 16929 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 16930 tp->misc_host_ctrl); 16931 } 16932 16933 /* Preserve the APE MAC_MODE bits */ 16934 if (tg3_flag(tp, ENABLE_APE)) 16935 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 16936 else 16937 tp->mac_mode = 0; 16938 16939 if (tg3_10_100_only_device(tp, ent)) 16940 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 16941 16942 err = tg3_phy_probe(tp); 16943 if (err) { 16944 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); 16945 /* ... but do not return immediately ... */ 16946 tg3_mdio_fini(tp); 16947 } 16948 16949 tg3_read_vpd(tp); 16950 tg3_read_fw_ver(tp); 16951 16952 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 16953 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16954 } else { 16955 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16956 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16957 else 16958 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; 16959 } 16960 16961 /* 5700 {AX,BX} chips have a broken status block link 16962 * change bit implementation, so we must use the 16963 * status register in those cases. 16964 */ 16965 if (tg3_asic_rev(tp) == ASIC_REV_5700) 16966 tg3_flag_set(tp, USE_LINKCHG_REG); 16967 else 16968 tg3_flag_clear(tp, USE_LINKCHG_REG); 16969 16970 /* The led_ctrl is set during tg3_phy_probe, here we might 16971 * have to force the link status polling mechanism based 16972 * upon subsystem IDs. 16973 */ 16974 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 16975 tg3_asic_rev(tp) == ASIC_REV_5701 && 16976 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 16977 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 16978 tg3_flag_set(tp, USE_LINKCHG_REG); 16979 } 16980 16981 /* For all SERDES we poll the MAC status register. */ 16982 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 16983 tg3_flag_set(tp, POLL_SERDES); 16984 else 16985 tg3_flag_clear(tp, POLL_SERDES); 16986 16987 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) 16988 tg3_flag_set(tp, POLL_CPMU_LINK); 16989 16990 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; 16991 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 16992 if (tg3_asic_rev(tp) == ASIC_REV_5701 && 16993 tg3_flag(tp, PCIX_MODE)) { 16994 tp->rx_offset = NET_SKB_PAD; 16995 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 16996 tp->rx_copy_thresh = ~(u16)0; 16997 #endif 16998 } 16999 17000 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; 17001 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; 17002 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; 17003 17004 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; 17005 17006 /* Increment the rx prod index on the rx std ring by at most 17007 * 8 for these chips to workaround hw errata. 17008 */ 17009 if (tg3_asic_rev(tp) == ASIC_REV_5750 || 17010 tg3_asic_rev(tp) == ASIC_REV_5752 || 17011 tg3_asic_rev(tp) == ASIC_REV_5755) 17012 tp->rx_std_max_post = 8; 17013 17014 if (tg3_flag(tp, ASPM_WORKAROUND)) 17015 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 17016 PCIE_PWR_MGMT_L1_THRESH_MSK; 17017 17018 return err; 17019 } 17020 17021 static int tg3_get_device_address(struct tg3 *tp, u8 *addr) 17022 { 17023 u32 hi, lo, mac_offset; 17024 int addr_ok = 0; 17025 int err; 17026 17027 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) 17028 return 0; 17029 17030 if (tg3_flag(tp, IS_SSB_CORE)) { 17031 err = ssb_gige_get_macaddr(tp->pdev, addr); 17032 if (!err && is_valid_ether_addr(addr)) 17033 return 0; 17034 } 17035 17036 mac_offset = 0x7c; 17037 if (tg3_asic_rev(tp) == ASIC_REV_5704 || 17038 tg3_flag(tp, 5780_CLASS)) { 17039 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 17040 mac_offset = 0xcc; 17041 if (tg3_nvram_lock(tp)) 17042 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 17043 else 17044 tg3_nvram_unlock(tp); 17045 } else if (tg3_flag(tp, 5717_PLUS)) { 17046 if (tp->pci_fn & 1) 17047 mac_offset = 0xcc; 17048 if (tp->pci_fn > 1) 17049 mac_offset += 0x18c; 17050 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) 17051 mac_offset = 0x10; 17052 17053 /* First try to get it from MAC address mailbox. */ 17054 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 17055 if ((hi >> 16) == 0x484b) { 17056 addr[0] = (hi >> 8) & 0xff; 17057 addr[1] = (hi >> 0) & 0xff; 17058 17059 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 17060 addr[2] = (lo >> 24) & 0xff; 17061 addr[3] = (lo >> 16) & 0xff; 17062 addr[4] = (lo >> 8) & 0xff; 17063 addr[5] = (lo >> 0) & 0xff; 17064 17065 /* Some old bootcode may report a 0 MAC address in SRAM */ 17066 addr_ok = is_valid_ether_addr(addr); 17067 } 17068 if (!addr_ok) { 17069 /* Next, try NVRAM. */ 17070 if (!tg3_flag(tp, NO_NVRAM) && 17071 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 17072 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 17073 memcpy(&addr[0], ((char *)&hi) + 2, 2); 17074 memcpy(&addr[2], (char *)&lo, sizeof(lo)); 17075 } 17076 /* Finally just fetch it out of the MAC control regs. */ 17077 else { 17078 hi = tr32(MAC_ADDR_0_HIGH); 17079 lo = tr32(MAC_ADDR_0_LOW); 17080 17081 addr[5] = lo & 0xff; 17082 addr[4] = (lo >> 8) & 0xff; 17083 addr[3] = (lo >> 16) & 0xff; 17084 addr[2] = (lo >> 24) & 0xff; 17085 addr[1] = hi & 0xff; 17086 addr[0] = (hi >> 8) & 0xff; 17087 } 17088 } 17089 17090 if (!is_valid_ether_addr(addr)) 17091 return -EINVAL; 17092 return 0; 17093 } 17094 17095 #define BOUNDARY_SINGLE_CACHELINE 1 17096 #define BOUNDARY_MULTI_CACHELINE 2 17097 17098 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 17099 { 17100 int cacheline_size; 17101 u8 byte; 17102 int goal; 17103 17104 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 17105 if (byte == 0) 17106 cacheline_size = 1024; 17107 else 17108 cacheline_size = (int) byte * 4; 17109 17110 /* On 5703 and later chips, the boundary bits have no 17111 * effect. 17112 */ 17113 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17114 tg3_asic_rev(tp) != ASIC_REV_5701 && 17115 !tg3_flag(tp, PCI_EXPRESS)) 17116 goto out; 17117 17118 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC) 17119 goal = BOUNDARY_MULTI_CACHELINE; 17120 #else 17121 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 17122 goal = BOUNDARY_SINGLE_CACHELINE; 17123 #else 17124 goal = 0; 17125 #endif 17126 #endif 17127 17128 if (tg3_flag(tp, 57765_PLUS)) { 17129 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 17130 goto out; 17131 } 17132 17133 if (!goal) 17134 goto out; 17135 17136 /* PCI controllers on most RISC systems tend to disconnect 17137 * when a device tries to burst across a cache-line boundary. 17138 * Therefore, letting tg3 do so just wastes PCI bandwidth. 17139 * 17140 * Unfortunately, for PCI-E there are only limited 17141 * write-side controls for this, and thus for reads 17142 * we will still get the disconnects. We'll also waste 17143 * these PCI cycles for both read and write for chips 17144 * other than 5700 and 5701 which do not implement the 17145 * boundary bits. 17146 */ 17147 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { 17148 switch (cacheline_size) { 17149 case 16: 17150 case 32: 17151 case 64: 17152 case 128: 17153 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17154 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 17155 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 17156 } else { 17157 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17158 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17159 } 17160 break; 17161 17162 case 256: 17163 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 17164 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 17165 break; 17166 17167 default: 17168 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 17169 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 17170 break; 17171 } 17172 } else if (tg3_flag(tp, PCI_EXPRESS)) { 17173 switch (cacheline_size) { 17174 case 16: 17175 case 32: 17176 case 64: 17177 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17178 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17179 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 17180 break; 17181 } 17182 fallthrough; 17183 case 128: 17184 default: 17185 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 17186 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 17187 break; 17188 } 17189 } else { 17190 switch (cacheline_size) { 17191 case 16: 17192 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17193 val |= (DMA_RWCTRL_READ_BNDRY_16 | 17194 DMA_RWCTRL_WRITE_BNDRY_16); 17195 break; 17196 } 17197 fallthrough; 17198 case 32: 17199 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17200 val |= (DMA_RWCTRL_READ_BNDRY_32 | 17201 DMA_RWCTRL_WRITE_BNDRY_32); 17202 break; 17203 } 17204 fallthrough; 17205 case 64: 17206 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17207 val |= (DMA_RWCTRL_READ_BNDRY_64 | 17208 DMA_RWCTRL_WRITE_BNDRY_64); 17209 break; 17210 } 17211 fallthrough; 17212 case 128: 17213 if (goal == BOUNDARY_SINGLE_CACHELINE) { 17214 val |= (DMA_RWCTRL_READ_BNDRY_128 | 17215 DMA_RWCTRL_WRITE_BNDRY_128); 17216 break; 17217 } 17218 fallthrough; 17219 case 256: 17220 val |= (DMA_RWCTRL_READ_BNDRY_256 | 17221 DMA_RWCTRL_WRITE_BNDRY_256); 17222 break; 17223 case 512: 17224 val |= (DMA_RWCTRL_READ_BNDRY_512 | 17225 DMA_RWCTRL_WRITE_BNDRY_512); 17226 break; 17227 case 1024: 17228 default: 17229 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 17230 DMA_RWCTRL_WRITE_BNDRY_1024); 17231 break; 17232 } 17233 } 17234 17235 out: 17236 return val; 17237 } 17238 17239 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 17240 int size, bool to_device) 17241 { 17242 struct tg3_internal_buffer_desc test_desc; 17243 u32 sram_dma_descs; 17244 int i, ret; 17245 17246 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 17247 17248 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 17249 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 17250 tw32(RDMAC_STATUS, 0); 17251 tw32(WDMAC_STATUS, 0); 17252 17253 tw32(BUFMGR_MODE, 0); 17254 tw32(FTQ_RESET, 0); 17255 17256 test_desc.addr_hi = ((u64) buf_dma) >> 32; 17257 test_desc.addr_lo = buf_dma & 0xffffffff; 17258 test_desc.nic_mbuf = 0x00002100; 17259 test_desc.len = size; 17260 17261 /* 17262 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 17263 * the *second* time the tg3 driver was getting loaded after an 17264 * initial scan. 17265 * 17266 * Broadcom tells me: 17267 * ...the DMA engine is connected to the GRC block and a DMA 17268 * reset may affect the GRC block in some unpredictable way... 17269 * The behavior of resets to individual blocks has not been tested. 17270 * 17271 * Broadcom noted the GRC reset will also reset all sub-components. 17272 */ 17273 if (to_device) { 17274 test_desc.cqid_sqid = (13 << 8) | 2; 17275 17276 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 17277 udelay(40); 17278 } else { 17279 test_desc.cqid_sqid = (16 << 8) | 7; 17280 17281 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 17282 udelay(40); 17283 } 17284 test_desc.flags = 0x00000005; 17285 17286 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 17287 u32 val; 17288 17289 val = *(((u32 *)&test_desc) + i); 17290 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 17291 sram_dma_descs + (i * sizeof(u32))); 17292 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 17293 } 17294 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 17295 17296 if (to_device) 17297 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 17298 else 17299 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 17300 17301 ret = -ENODEV; 17302 for (i = 0; i < 40; i++) { 17303 u32 val; 17304 17305 if (to_device) 17306 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 17307 else 17308 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 17309 if ((val & 0xffff) == sram_dma_descs) { 17310 ret = 0; 17311 break; 17312 } 17313 17314 udelay(100); 17315 } 17316 17317 return ret; 17318 } 17319 17320 #define TEST_BUFFER_SIZE 0x2000 17321 17322 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { 17323 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 17324 { }, 17325 }; 17326 17327 static int tg3_test_dma(struct tg3 *tp) 17328 { 17329 dma_addr_t buf_dma; 17330 u32 *buf, saved_dma_rwctrl; 17331 int ret = 0; 17332 17333 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, 17334 &buf_dma, GFP_KERNEL); 17335 if (!buf) { 17336 ret = -ENOMEM; 17337 goto out_nofree; 17338 } 17339 17340 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 17341 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 17342 17343 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 17344 17345 if (tg3_flag(tp, 57765_PLUS)) 17346 goto out; 17347 17348 if (tg3_flag(tp, PCI_EXPRESS)) { 17349 /* DMA read watermark not used on PCIE */ 17350 tp->dma_rwctrl |= 0x00180000; 17351 } else if (!tg3_flag(tp, PCIX_MODE)) { 17352 if (tg3_asic_rev(tp) == ASIC_REV_5705 || 17353 tg3_asic_rev(tp) == ASIC_REV_5750) 17354 tp->dma_rwctrl |= 0x003f0000; 17355 else 17356 tp->dma_rwctrl |= 0x003f000f; 17357 } else { 17358 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17359 tg3_asic_rev(tp) == ASIC_REV_5704) { 17360 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 17361 u32 read_water = 0x7; 17362 17363 /* If the 5704 is behind the EPB bridge, we can 17364 * do the less restrictive ONE_DMA workaround for 17365 * better performance. 17366 */ 17367 if (tg3_flag(tp, 40BIT_DMA_BUG) && 17368 tg3_asic_rev(tp) == ASIC_REV_5704) 17369 tp->dma_rwctrl |= 0x8000; 17370 else if (ccval == 0x6 || ccval == 0x7) 17371 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17372 17373 if (tg3_asic_rev(tp) == ASIC_REV_5703) 17374 read_water = 4; 17375 /* Set bit 23 to enable PCIX hw bug fix */ 17376 tp->dma_rwctrl |= 17377 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | 17378 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | 17379 (1 << 23); 17380 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { 17381 /* 5780 always in PCIX mode */ 17382 tp->dma_rwctrl |= 0x00144000; 17383 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { 17384 /* 5714 always in PCIX mode */ 17385 tp->dma_rwctrl |= 0x00148000; 17386 } else { 17387 tp->dma_rwctrl |= 0x001b000f; 17388 } 17389 } 17390 if (tg3_flag(tp, ONE_DMA_AT_ONCE)) 17391 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 17392 17393 if (tg3_asic_rev(tp) == ASIC_REV_5703 || 17394 tg3_asic_rev(tp) == ASIC_REV_5704) 17395 tp->dma_rwctrl &= 0xfffffff0; 17396 17397 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 17398 tg3_asic_rev(tp) == ASIC_REV_5701) { 17399 /* Remove this if it causes problems for some boards. */ 17400 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 17401 17402 /* On 5700/5701 chips, we need to set this bit. 17403 * Otherwise the chip will issue cacheline transactions 17404 * to streamable DMA memory with not all the byte 17405 * enables turned on. This is an error on several 17406 * RISC PCI controllers, in particular sparc64. 17407 * 17408 * On 5703/5704 chips, this bit has been reassigned 17409 * a different meaning. In particular, it is used 17410 * on those chips to enable a PCI-X workaround. 17411 */ 17412 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 17413 } 17414 17415 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17416 17417 17418 if (tg3_asic_rev(tp) != ASIC_REV_5700 && 17419 tg3_asic_rev(tp) != ASIC_REV_5701) 17420 goto out; 17421 17422 /* It is best to perform DMA test with maximum write burst size 17423 * to expose the 5700/5701 write DMA bug. 17424 */ 17425 saved_dma_rwctrl = tp->dma_rwctrl; 17426 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17427 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17428 17429 while (1) { 17430 u32 *p = buf, i; 17431 17432 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 17433 p[i] = i; 17434 17435 /* Send the buffer to the chip. */ 17436 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); 17437 if (ret) { 17438 dev_err(&tp->pdev->dev, 17439 "%s: Buffer write failed. err = %d\n", 17440 __func__, ret); 17441 break; 17442 } 17443 17444 /* Now read it back. */ 17445 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); 17446 if (ret) { 17447 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 17448 "err = %d\n", __func__, ret); 17449 break; 17450 } 17451 17452 /* Verify it. */ 17453 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 17454 if (p[i] == i) 17455 continue; 17456 17457 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17458 DMA_RWCTRL_WRITE_BNDRY_16) { 17459 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17460 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17461 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17462 break; 17463 } else { 17464 dev_err(&tp->pdev->dev, 17465 "%s: Buffer corrupted on read back! " 17466 "(%d != %d)\n", __func__, p[i], i); 17467 ret = -ENODEV; 17468 goto out; 17469 } 17470 } 17471 17472 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 17473 /* Success. */ 17474 ret = 0; 17475 break; 17476 } 17477 } 17478 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 17479 DMA_RWCTRL_WRITE_BNDRY_16) { 17480 /* DMA test passed without adjusting DMA boundary, 17481 * now look for chipsets that are known to expose the 17482 * DMA bug without failing the test. 17483 */ 17484 if (pci_dev_present(tg3_dma_wait_state_chipsets)) { 17485 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 17486 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 17487 } else { 17488 /* Safe to use the calculated DMA boundary. */ 17489 tp->dma_rwctrl = saved_dma_rwctrl; 17490 } 17491 17492 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 17493 } 17494 17495 out: 17496 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); 17497 out_nofree: 17498 return ret; 17499 } 17500 17501 static void tg3_init_bufmgr_config(struct tg3 *tp) 17502 { 17503 if (tg3_flag(tp, 57765_PLUS)) { 17504 tp->bufmgr_config.mbuf_read_dma_low_water = 17505 DEFAULT_MB_RDMA_LOW_WATER_5705; 17506 tp->bufmgr_config.mbuf_mac_rx_low_water = 17507 DEFAULT_MB_MACRX_LOW_WATER_57765; 17508 tp->bufmgr_config.mbuf_high_water = 17509 DEFAULT_MB_HIGH_WATER_57765; 17510 17511 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17512 DEFAULT_MB_RDMA_LOW_WATER_5705; 17513 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17514 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 17515 tp->bufmgr_config.mbuf_high_water_jumbo = 17516 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 17517 } else if (tg3_flag(tp, 5705_PLUS)) { 17518 tp->bufmgr_config.mbuf_read_dma_low_water = 17519 DEFAULT_MB_RDMA_LOW_WATER_5705; 17520 tp->bufmgr_config.mbuf_mac_rx_low_water = 17521 DEFAULT_MB_MACRX_LOW_WATER_5705; 17522 tp->bufmgr_config.mbuf_high_water = 17523 DEFAULT_MB_HIGH_WATER_5705; 17524 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 17525 tp->bufmgr_config.mbuf_mac_rx_low_water = 17526 DEFAULT_MB_MACRX_LOW_WATER_5906; 17527 tp->bufmgr_config.mbuf_high_water = 17528 DEFAULT_MB_HIGH_WATER_5906; 17529 } 17530 17531 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17532 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 17533 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17534 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 17535 tp->bufmgr_config.mbuf_high_water_jumbo = 17536 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 17537 } else { 17538 tp->bufmgr_config.mbuf_read_dma_low_water = 17539 DEFAULT_MB_RDMA_LOW_WATER; 17540 tp->bufmgr_config.mbuf_mac_rx_low_water = 17541 DEFAULT_MB_MACRX_LOW_WATER; 17542 tp->bufmgr_config.mbuf_high_water = 17543 DEFAULT_MB_HIGH_WATER; 17544 17545 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 17546 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 17547 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 17548 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 17549 tp->bufmgr_config.mbuf_high_water_jumbo = 17550 DEFAULT_MB_HIGH_WATER_JUMBO; 17551 } 17552 17553 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 17554 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 17555 } 17556 17557 static char *tg3_phy_string(struct tg3 *tp) 17558 { 17559 switch (tp->phy_id & TG3_PHY_ID_MASK) { 17560 case TG3_PHY_ID_BCM5400: return "5400"; 17561 case TG3_PHY_ID_BCM5401: return "5401"; 17562 case TG3_PHY_ID_BCM5411: return "5411"; 17563 case TG3_PHY_ID_BCM5701: return "5701"; 17564 case TG3_PHY_ID_BCM5703: return "5703"; 17565 case TG3_PHY_ID_BCM5704: return "5704"; 17566 case TG3_PHY_ID_BCM5705: return "5705"; 17567 case TG3_PHY_ID_BCM5750: return "5750"; 17568 case TG3_PHY_ID_BCM5752: return "5752"; 17569 case TG3_PHY_ID_BCM5714: return "5714"; 17570 case TG3_PHY_ID_BCM5780: return "5780"; 17571 case TG3_PHY_ID_BCM5755: return "5755"; 17572 case TG3_PHY_ID_BCM5787: return "5787"; 17573 case TG3_PHY_ID_BCM5784: return "5784"; 17574 case TG3_PHY_ID_BCM5756: return "5722/5756"; 17575 case TG3_PHY_ID_BCM5906: return "5906"; 17576 case TG3_PHY_ID_BCM5761: return "5761"; 17577 case TG3_PHY_ID_BCM5718C: return "5718C"; 17578 case TG3_PHY_ID_BCM5718S: return "5718S"; 17579 case TG3_PHY_ID_BCM57765: return "57765"; 17580 case TG3_PHY_ID_BCM5719C: return "5719C"; 17581 case TG3_PHY_ID_BCM5720C: return "5720C"; 17582 case TG3_PHY_ID_BCM5762: return "5762C"; 17583 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 17584 case 0: return "serdes"; 17585 default: return "unknown"; 17586 } 17587 } 17588 17589 static char *tg3_bus_string(struct tg3 *tp, char *str) 17590 { 17591 if (tg3_flag(tp, PCI_EXPRESS)) { 17592 strcpy(str, "PCI Express"); 17593 return str; 17594 } else if (tg3_flag(tp, PCIX_MODE)) { 17595 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 17596 17597 strcpy(str, "PCIX:"); 17598 17599 if ((clock_ctrl == 7) || 17600 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 17601 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 17602 strcat(str, "133MHz"); 17603 else if (clock_ctrl == 0) 17604 strcat(str, "33MHz"); 17605 else if (clock_ctrl == 2) 17606 strcat(str, "50MHz"); 17607 else if (clock_ctrl == 4) 17608 strcat(str, "66MHz"); 17609 else if (clock_ctrl == 6) 17610 strcat(str, "100MHz"); 17611 } else { 17612 strcpy(str, "PCI:"); 17613 if (tg3_flag(tp, PCI_HIGH_SPEED)) 17614 strcat(str, "66MHz"); 17615 else 17616 strcat(str, "33MHz"); 17617 } 17618 if (tg3_flag(tp, PCI_32BIT)) 17619 strcat(str, ":32-bit"); 17620 else 17621 strcat(str, ":64-bit"); 17622 return str; 17623 } 17624 17625 static void tg3_init_coal(struct tg3 *tp) 17626 { 17627 struct ethtool_coalesce *ec = &tp->coal; 17628 17629 memset(ec, 0, sizeof(*ec)); 17630 ec->cmd = ETHTOOL_GCOALESCE; 17631 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 17632 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 17633 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 17634 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 17635 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 17636 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 17637 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 17638 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 17639 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 17640 17641 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 17642 HOSTCC_MODE_CLRTICK_TXBD)) { 17643 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 17644 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 17645 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 17646 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 17647 } 17648 17649 if (tg3_flag(tp, 5705_PLUS)) { 17650 ec->rx_coalesce_usecs_irq = 0; 17651 ec->tx_coalesce_usecs_irq = 0; 17652 ec->stats_block_coalesce_usecs = 0; 17653 } 17654 } 17655 17656 static int tg3_init_one(struct pci_dev *pdev, 17657 const struct pci_device_id *ent) 17658 { 17659 struct net_device *dev; 17660 struct tg3 *tp; 17661 int i, err; 17662 u32 sndmbx, rcvmbx, intmbx; 17663 char str[40]; 17664 u64 dma_mask, persist_dma_mask; 17665 netdev_features_t features = 0; 17666 u8 addr[ETH_ALEN] __aligned(2); 17667 17668 err = pci_enable_device(pdev); 17669 if (err) { 17670 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 17671 return err; 17672 } 17673 17674 err = pci_request_regions(pdev, DRV_MODULE_NAME); 17675 if (err) { 17676 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 17677 goto err_out_disable_pdev; 17678 } 17679 17680 pci_set_master(pdev); 17681 17682 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 17683 if (!dev) { 17684 err = -ENOMEM; 17685 goto err_out_free_res; 17686 } 17687 17688 SET_NETDEV_DEV(dev, &pdev->dev); 17689 17690 tp = netdev_priv(dev); 17691 tp->pdev = pdev; 17692 tp->dev = dev; 17693 tp->rx_mode = TG3_DEF_RX_MODE; 17694 tp->tx_mode = TG3_DEF_TX_MODE; 17695 tp->irq_sync = 1; 17696 tp->pcierr_recovery = false; 17697 17698 if (tg3_debug > 0) 17699 tp->msg_enable = tg3_debug; 17700 else 17701 tp->msg_enable = TG3_DEF_MSG_ENABLE; 17702 17703 if (pdev_is_ssb_gige_core(pdev)) { 17704 tg3_flag_set(tp, IS_SSB_CORE); 17705 if (ssb_gige_must_flush_posted_writes(pdev)) 17706 tg3_flag_set(tp, FLUSH_POSTED_WRITES); 17707 if (ssb_gige_one_dma_at_once(pdev)) 17708 tg3_flag_set(tp, ONE_DMA_AT_ONCE); 17709 if (ssb_gige_have_roboswitch(pdev)) { 17710 tg3_flag_set(tp, USE_PHYLIB); 17711 tg3_flag_set(tp, ROBOSWITCH); 17712 } 17713 if (ssb_gige_is_rgmii(pdev)) 17714 tg3_flag_set(tp, RGMII_MODE); 17715 } 17716 17717 /* The word/byte swap controls here control register access byte 17718 * swapping. DMA data byte swapping is controlled in the GRC_MODE 17719 * setting below. 17720 */ 17721 tp->misc_host_ctrl = 17722 MISC_HOST_CTRL_MASK_PCI_INT | 17723 MISC_HOST_CTRL_WORD_SWAP | 17724 MISC_HOST_CTRL_INDIR_ACCESS | 17725 MISC_HOST_CTRL_PCISTATE_RW; 17726 17727 /* The NONFRM (non-frame) byte/word swap controls take effect 17728 * on descriptor entries, anything which isn't packet data. 17729 * 17730 * The StrongARM chips on the board (one for tx, one for rx) 17731 * are running in big-endian mode. 17732 */ 17733 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 17734 GRC_MODE_WSWAP_NONFRM_DATA); 17735 #ifdef __BIG_ENDIAN 17736 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 17737 #endif 17738 spin_lock_init(&tp->lock); 17739 spin_lock_init(&tp->indirect_lock); 17740 INIT_WORK(&tp->reset_task, tg3_reset_task); 17741 17742 tp->regs = pci_ioremap_bar(pdev, BAR_0); 17743 if (!tp->regs) { 17744 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 17745 err = -ENOMEM; 17746 goto err_out_free_dev; 17747 } 17748 17749 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 17750 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || 17751 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 17752 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 17753 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 17754 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || 17755 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 17756 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 17757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || 17758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || 17759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || 17760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || 17761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || 17762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || 17763 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { 17764 tg3_flag_set(tp, ENABLE_APE); 17765 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 17766 if (!tp->aperegs) { 17767 dev_err(&pdev->dev, 17768 "Cannot map APE registers, aborting\n"); 17769 err = -ENOMEM; 17770 goto err_out_iounmap; 17771 } 17772 } 17773 17774 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 17775 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 17776 17777 dev->ethtool_ops = &tg3_ethtool_ops; 17778 dev->watchdog_timeo = TG3_TX_TIMEOUT; 17779 dev->netdev_ops = &tg3_netdev_ops; 17780 dev->irq = pdev->irq; 17781 17782 err = tg3_get_invariants(tp, ent); 17783 if (err) { 17784 dev_err(&pdev->dev, 17785 "Problem fetching invariants of chip, aborting\n"); 17786 goto err_out_apeunmap; 17787 } 17788 17789 /* The EPB bridge inside 5714, 5715, and 5780 and any 17790 * device behind the EPB cannot support DMA addresses > 40-bit. 17791 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 17792 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 17793 * do DMA address check in __tg3_start_xmit(). 17794 */ 17795 if (tg3_flag(tp, IS_5788)) 17796 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 17797 else if (tg3_flag(tp, 40BIT_DMA_BUG)) { 17798 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 17799 #ifdef CONFIG_HIGHMEM 17800 dma_mask = DMA_BIT_MASK(64); 17801 #endif 17802 } else 17803 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 17804 17805 /* Configure DMA attributes. */ 17806 if (dma_mask > DMA_BIT_MASK(32)) { 17807 err = dma_set_mask(&pdev->dev, dma_mask); 17808 if (!err) { 17809 features |= NETIF_F_HIGHDMA; 17810 err = dma_set_coherent_mask(&pdev->dev, 17811 persist_dma_mask); 17812 if (err < 0) { 17813 dev_err(&pdev->dev, "Unable to obtain 64 bit " 17814 "DMA for consistent allocations\n"); 17815 goto err_out_apeunmap; 17816 } 17817 } 17818 } 17819 if (err || dma_mask == DMA_BIT_MASK(32)) { 17820 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 17821 if (err) { 17822 dev_err(&pdev->dev, 17823 "No usable DMA configuration, aborting\n"); 17824 goto err_out_apeunmap; 17825 } 17826 } 17827 17828 tg3_init_bufmgr_config(tp); 17829 17830 /* 5700 B0 chips do not support checksumming correctly due 17831 * to hardware bugs. 17832 */ 17833 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { 17834 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 17835 17836 if (tg3_flag(tp, 5755_PLUS)) 17837 features |= NETIF_F_IPV6_CSUM; 17838 } 17839 17840 /* TSO is on by default on chips that support hardware TSO. 17841 * Firmware TSO on older chips gives lower performance, so it 17842 * is off by default, but can be enabled using ethtool. 17843 */ 17844 if ((tg3_flag(tp, HW_TSO_1) || 17845 tg3_flag(tp, HW_TSO_2) || 17846 tg3_flag(tp, HW_TSO_3)) && 17847 (features & NETIF_F_IP_CSUM)) 17848 features |= NETIF_F_TSO; 17849 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { 17850 if (features & NETIF_F_IPV6_CSUM) 17851 features |= NETIF_F_TSO6; 17852 if (tg3_flag(tp, HW_TSO_3) || 17853 tg3_asic_rev(tp) == ASIC_REV_5761 || 17854 (tg3_asic_rev(tp) == ASIC_REV_5784 && 17855 tg3_chip_rev(tp) != CHIPREV_5784_AX) || 17856 tg3_asic_rev(tp) == ASIC_REV_5785 || 17857 tg3_asic_rev(tp) == ASIC_REV_57780) 17858 features |= NETIF_F_TSO_ECN; 17859 } 17860 17861 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | 17862 NETIF_F_HW_VLAN_CTAG_RX; 17863 dev->vlan_features |= features; 17864 17865 /* 17866 * Add loopback capability only for a subset of devices that support 17867 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY 17868 * loopback for the remaining devices. 17869 */ 17870 if (tg3_asic_rev(tp) != ASIC_REV_5780 && 17871 !tg3_flag(tp, CPMU_PRESENT)) 17872 /* Add the loopback capability */ 17873 features |= NETIF_F_LOOPBACK; 17874 17875 dev->hw_features |= features; 17876 dev->priv_flags |= IFF_UNICAST_FLT; 17877 17878 /* MTU range: 60 - 9000 or 1500, depending on hardware */ 17879 dev->min_mtu = TG3_MIN_MTU; 17880 dev->max_mtu = TG3_MAX_MTU(tp); 17881 17882 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && 17883 !tg3_flag(tp, TSO_CAPABLE) && 17884 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 17885 tg3_flag_set(tp, MAX_RXPEND_64); 17886 tp->rx_pending = 63; 17887 } 17888 17889 err = tg3_get_device_address(tp, addr); 17890 if (err) { 17891 dev_err(&pdev->dev, 17892 "Could not obtain valid ethernet address, aborting\n"); 17893 goto err_out_apeunmap; 17894 } 17895 eth_hw_addr_set(dev, addr); 17896 17897 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17898 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17899 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17900 for (i = 0; i < tp->irq_max; i++) { 17901 struct tg3_napi *tnapi = &tp->napi[i]; 17902 17903 tnapi->tp = tp; 17904 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; 17905 17906 tnapi->int_mbox = intmbx; 17907 intmbx += 0x8; 17908 17909 tnapi->consmbox = rcvmbx; 17910 tnapi->prodmbox = sndmbx; 17911 17912 if (i) 17913 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 17914 else 17915 tnapi->coal_now = HOSTCC_MODE_NOW; 17916 17917 if (!tg3_flag(tp, SUPPORT_MSIX)) 17918 break; 17919 17920 /* 17921 * If we support MSIX, we'll be using RSS. If we're using 17922 * RSS, the first vector only handles link interrupts and the 17923 * remaining vectors handle rx and tx interrupts. Reuse the 17924 * mailbox values for the next iteration. The values we setup 17925 * above are still useful for the single vectored mode. 17926 */ 17927 if (!i) 17928 continue; 17929 17930 rcvmbx += 0x8; 17931 17932 if (sndmbx & 0x4) 17933 sndmbx -= 0x4; 17934 else 17935 sndmbx += 0xc; 17936 } 17937 17938 /* 17939 * Reset chip in case UNDI or EFI driver did not shutdown 17940 * DMA self test will enable WDMAC and we'll see (spurious) 17941 * pending DMA on the PCI bus at that point. 17942 */ 17943 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17944 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17945 tg3_full_lock(tp, 0); 17946 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17947 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17948 tg3_full_unlock(tp); 17949 } 17950 17951 err = tg3_test_dma(tp); 17952 if (err) { 17953 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17954 goto err_out_apeunmap; 17955 } 17956 17957 tg3_init_coal(tp); 17958 17959 pci_set_drvdata(pdev, dev); 17960 17961 if (tg3_asic_rev(tp) == ASIC_REV_5719 || 17962 tg3_asic_rev(tp) == ASIC_REV_5720 || 17963 tg3_asic_rev(tp) == ASIC_REV_5762) 17964 tg3_flag_set(tp, PTP_CAPABLE); 17965 17966 tg3_timer_init(tp); 17967 17968 tg3_carrier_off(tp); 17969 17970 err = register_netdev(dev); 17971 if (err) { 17972 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 17973 goto err_out_apeunmap; 17974 } 17975 17976 if (tg3_flag(tp, PTP_CAPABLE)) { 17977 tg3_ptp_init(tp); 17978 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, 17979 &tp->pdev->dev); 17980 if (IS_ERR(tp->ptp_clock)) 17981 tp->ptp_clock = NULL; 17982 } 17983 17984 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17985 tp->board_part_number, 17986 tg3_chip_rev_id(tp), 17987 tg3_bus_string(tp, str), 17988 dev->dev_addr); 17989 17990 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { 17991 char *ethtype; 17992 17993 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 17994 ethtype = "10/100Base-TX"; 17995 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 17996 ethtype = "1000Base-SX"; 17997 else 17998 ethtype = "10/100/1000Base-T"; 17999 18000 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 18001 "(WireSpeed[%d], EEE[%d])\n", 18002 tg3_phy_string(tp), ethtype, 18003 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, 18004 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); 18005 } 18006 18007 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 18008 (dev->features & NETIF_F_RXCSUM) != 0, 18009 tg3_flag(tp, USE_LINKCHG_REG) != 0, 18010 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 18011 tg3_flag(tp, ENABLE_ASF) != 0, 18012 tg3_flag(tp, TSO_CAPABLE) != 0); 18013 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 18014 tp->dma_rwctrl, 18015 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 18016 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 18017 18018 pci_save_state(pdev); 18019 18020 return 0; 18021 18022 err_out_apeunmap: 18023 if (tp->aperegs) { 18024 iounmap(tp->aperegs); 18025 tp->aperegs = NULL; 18026 } 18027 18028 err_out_iounmap: 18029 if (tp->regs) { 18030 iounmap(tp->regs); 18031 tp->regs = NULL; 18032 } 18033 18034 err_out_free_dev: 18035 free_netdev(dev); 18036 18037 err_out_free_res: 18038 pci_release_regions(pdev); 18039 18040 err_out_disable_pdev: 18041 if (pci_is_enabled(pdev)) 18042 pci_disable_device(pdev); 18043 return err; 18044 } 18045 18046 static void tg3_remove_one(struct pci_dev *pdev) 18047 { 18048 struct net_device *dev = pci_get_drvdata(pdev); 18049 18050 if (dev) { 18051 struct tg3 *tp = netdev_priv(dev); 18052 18053 tg3_ptp_fini(tp); 18054 18055 release_firmware(tp->fw); 18056 18057 tg3_reset_task_cancel(tp); 18058 18059 if (tg3_flag(tp, USE_PHYLIB)) { 18060 tg3_phy_fini(tp); 18061 tg3_mdio_fini(tp); 18062 } 18063 18064 unregister_netdev(dev); 18065 if (tp->aperegs) { 18066 iounmap(tp->aperegs); 18067 tp->aperegs = NULL; 18068 } 18069 if (tp->regs) { 18070 iounmap(tp->regs); 18071 tp->regs = NULL; 18072 } 18073 free_netdev(dev); 18074 pci_release_regions(pdev); 18075 pci_disable_device(pdev); 18076 } 18077 } 18078 18079 #ifdef CONFIG_PM_SLEEP 18080 static int tg3_suspend(struct device *device) 18081 { 18082 struct net_device *dev = dev_get_drvdata(device); 18083 struct tg3 *tp = netdev_priv(dev); 18084 int err = 0; 18085 18086 rtnl_lock(); 18087 18088 if (!netif_running(dev)) 18089 goto unlock; 18090 18091 tg3_reset_task_cancel(tp); 18092 tg3_phy_stop(tp); 18093 tg3_netif_stop(tp); 18094 18095 tg3_timer_stop(tp); 18096 18097 tg3_full_lock(tp, 1); 18098 tg3_disable_ints(tp); 18099 tg3_full_unlock(tp); 18100 18101 netif_device_detach(dev); 18102 18103 tg3_full_lock(tp, 0); 18104 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 18105 tg3_flag_clear(tp, INIT_COMPLETE); 18106 tg3_full_unlock(tp); 18107 18108 err = tg3_power_down_prepare(tp); 18109 if (err) { 18110 int err2; 18111 18112 tg3_full_lock(tp, 0); 18113 18114 tg3_flag_set(tp, INIT_COMPLETE); 18115 err2 = tg3_restart_hw(tp, true); 18116 if (err2) 18117 goto out; 18118 18119 tg3_timer_start(tp); 18120 18121 netif_device_attach(dev); 18122 tg3_netif_start(tp); 18123 18124 out: 18125 tg3_full_unlock(tp); 18126 18127 if (!err2) 18128 tg3_phy_start(tp); 18129 } 18130 18131 unlock: 18132 rtnl_unlock(); 18133 return err; 18134 } 18135 18136 static int tg3_resume(struct device *device) 18137 { 18138 struct net_device *dev = dev_get_drvdata(device); 18139 struct tg3 *tp = netdev_priv(dev); 18140 int err = 0; 18141 18142 rtnl_lock(); 18143 18144 if (!netif_running(dev)) 18145 goto unlock; 18146 18147 netif_device_attach(dev); 18148 18149 tg3_full_lock(tp, 0); 18150 18151 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18152 18153 tg3_flag_set(tp, INIT_COMPLETE); 18154 err = tg3_restart_hw(tp, 18155 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); 18156 if (err) 18157 goto out; 18158 18159 tg3_timer_start(tp); 18160 18161 tg3_netif_start(tp); 18162 18163 out: 18164 tg3_full_unlock(tp); 18165 18166 if (!err) 18167 tg3_phy_start(tp); 18168 18169 unlock: 18170 rtnl_unlock(); 18171 return err; 18172 } 18173 #endif /* CONFIG_PM_SLEEP */ 18174 18175 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 18176 18177 static void tg3_shutdown(struct pci_dev *pdev) 18178 { 18179 struct net_device *dev = pci_get_drvdata(pdev); 18180 struct tg3 *tp = netdev_priv(dev); 18181 18182 tg3_reset_task_cancel(tp); 18183 18184 rtnl_lock(); 18185 18186 netif_device_detach(dev); 18187 18188 if (netif_running(dev)) 18189 dev_close(dev); 18190 18191 if (system_state == SYSTEM_POWER_OFF) 18192 tg3_power_down(tp); 18193 18194 rtnl_unlock(); 18195 18196 pci_disable_device(pdev); 18197 } 18198 18199 /** 18200 * tg3_io_error_detected - called when PCI error is detected 18201 * @pdev: Pointer to PCI device 18202 * @state: The current pci connection state 18203 * 18204 * This function is called after a PCI bus error affecting 18205 * this device has been detected. 18206 */ 18207 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, 18208 pci_channel_state_t state) 18209 { 18210 struct net_device *netdev = pci_get_drvdata(pdev); 18211 struct tg3 *tp = netdev_priv(netdev); 18212 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; 18213 18214 netdev_info(netdev, "PCI I/O error detected\n"); 18215 18216 /* Want to make sure that the reset task doesn't run */ 18217 tg3_reset_task_cancel(tp); 18218 18219 rtnl_lock(); 18220 18221 /* Could be second call or maybe we don't have netdev yet */ 18222 if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) 18223 goto done; 18224 18225 /* We needn't recover from permanent error */ 18226 if (state == pci_channel_io_frozen) 18227 tp->pcierr_recovery = true; 18228 18229 tg3_phy_stop(tp); 18230 18231 tg3_netif_stop(tp); 18232 18233 tg3_timer_stop(tp); 18234 18235 netif_device_detach(netdev); 18236 18237 /* Clean up software state, even if MMIO is blocked */ 18238 tg3_full_lock(tp, 0); 18239 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 18240 tg3_full_unlock(tp); 18241 18242 done: 18243 if (state == pci_channel_io_perm_failure) { 18244 if (netdev) { 18245 tg3_napi_enable(tp); 18246 dev_close(netdev); 18247 } 18248 err = PCI_ERS_RESULT_DISCONNECT; 18249 } else { 18250 pci_disable_device(pdev); 18251 } 18252 18253 rtnl_unlock(); 18254 18255 return err; 18256 } 18257 18258 /** 18259 * tg3_io_slot_reset - called after the pci bus has been reset. 18260 * @pdev: Pointer to PCI device 18261 * 18262 * Restart the card from scratch, as if from a cold-boot. 18263 * At this point, the card has exprienced a hard reset, 18264 * followed by fixups by BIOS, and has its config space 18265 * set up identically to what it was at cold boot. 18266 */ 18267 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) 18268 { 18269 struct net_device *netdev = pci_get_drvdata(pdev); 18270 struct tg3 *tp = netdev_priv(netdev); 18271 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 18272 int err; 18273 18274 rtnl_lock(); 18275 18276 if (pci_enable_device(pdev)) { 18277 dev_err(&pdev->dev, 18278 "Cannot re-enable PCI device after reset.\n"); 18279 goto done; 18280 } 18281 18282 pci_set_master(pdev); 18283 pci_restore_state(pdev); 18284 pci_save_state(pdev); 18285 18286 if (!netdev || !netif_running(netdev)) { 18287 rc = PCI_ERS_RESULT_RECOVERED; 18288 goto done; 18289 } 18290 18291 err = tg3_power_up(tp); 18292 if (err) 18293 goto done; 18294 18295 rc = PCI_ERS_RESULT_RECOVERED; 18296 18297 done: 18298 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { 18299 tg3_napi_enable(tp); 18300 dev_close(netdev); 18301 } 18302 rtnl_unlock(); 18303 18304 return rc; 18305 } 18306 18307 /** 18308 * tg3_io_resume - called when traffic can start flowing again. 18309 * @pdev: Pointer to PCI device 18310 * 18311 * This callback is called when the error recovery driver tells 18312 * us that its OK to resume normal operation. 18313 */ 18314 static void tg3_io_resume(struct pci_dev *pdev) 18315 { 18316 struct net_device *netdev = pci_get_drvdata(pdev); 18317 struct tg3 *tp = netdev_priv(netdev); 18318 int err; 18319 18320 rtnl_lock(); 18321 18322 if (!netdev || !netif_running(netdev)) 18323 goto done; 18324 18325 tg3_full_lock(tp, 0); 18326 tg3_ape_driver_state_change(tp, RESET_KIND_INIT); 18327 tg3_flag_set(tp, INIT_COMPLETE); 18328 err = tg3_restart_hw(tp, true); 18329 if (err) { 18330 tg3_full_unlock(tp); 18331 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 18332 goto done; 18333 } 18334 18335 netif_device_attach(netdev); 18336 18337 tg3_timer_start(tp); 18338 18339 tg3_netif_start(tp); 18340 18341 tg3_full_unlock(tp); 18342 18343 tg3_phy_start(tp); 18344 18345 done: 18346 tp->pcierr_recovery = false; 18347 rtnl_unlock(); 18348 } 18349 18350 static const struct pci_error_handlers tg3_err_handler = { 18351 .error_detected = tg3_io_error_detected, 18352 .slot_reset = tg3_io_slot_reset, 18353 .resume = tg3_io_resume 18354 }; 18355 18356 static struct pci_driver tg3_driver = { 18357 .name = DRV_MODULE_NAME, 18358 .id_table = tg3_pci_tbl, 18359 .probe = tg3_init_one, 18360 .remove = tg3_remove_one, 18361 .err_handler = &tg3_err_handler, 18362 .driver.pm = &tg3_pm_ops, 18363 .shutdown = tg3_shutdown, 18364 }; 18365 18366 module_pci_driver(tg3_driver); 18367