1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */ 2 /* 3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com> 4 5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c] 6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c] 7 Copyright 2001 Manfred Spraul [natsemi.c] 8 Copyright 1999-2001 by Donald Becker. [natsemi.c] 9 Written 1997-2001 by Donald Becker. [8139too.c] 10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c] 11 12 This software may be used and distributed according to the terms of 13 the GNU General Public License (GPL), incorporated herein by reference. 14 Drivers based on or derived from this code fall under the GPL and must 15 retain the authorship, copyright and license notice. This file is not 16 a complete program and may only be used when the entire operating 17 system is licensed under the GPL. 18 19 See the file COPYING in this distribution for more information. 20 21 Contributors: 22 23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br> 24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br> 25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br> 26 27 TODO: 28 * Test Tx checksumming thoroughly 29 30 Low priority TODO: 31 * Complete reset on PciErr 32 * Consider Rx interrupt mitigation using TimerIntr 33 * Investigate using skb->priority with h/w VLAN priority 34 * Investigate using High Priority Tx Queue with skb->priority 35 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error 36 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error 37 * Implement Tx software interrupt mitigation via 38 Tx descriptor bit 39 * The real minimum of CP_MIN_MTU is 4 bytes. However, 40 for this to be supported, one must(?) turn on packet padding. 41 * Support external MII transceivers (patch available) 42 43 NOTES: 44 * TX checksumming is considered experimental. It is off by 45 default, use ethtool to turn it on. 46 47 */ 48 49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 50 51 #define DRV_NAME "8139cp" 52 #define DRV_VERSION "1.3" 53 #define DRV_RELDATE "Mar 22, 2004" 54 55 56 #include <linux/module.h> 57 #include <linux/moduleparam.h> 58 #include <linux/kernel.h> 59 #include <linux/compiler.h> 60 #include <linux/netdevice.h> 61 #include <linux/etherdevice.h> 62 #include <linux/init.h> 63 #include <linux/interrupt.h> 64 #include <linux/pci.h> 65 #include <linux/dma-mapping.h> 66 #include <linux/delay.h> 67 #include <linux/ethtool.h> 68 #include <linux/gfp.h> 69 #include <linux/mii.h> 70 #include <linux/if_vlan.h> 71 #include <linux/crc32.h> 72 #include <linux/in.h> 73 #include <linux/ip.h> 74 #include <linux/tcp.h> 75 #include <linux/udp.h> 76 #include <linux/cache.h> 77 #include <asm/io.h> 78 #include <asm/irq.h> 79 #include <asm/uaccess.h> 80 81 /* These identify the driver base version and may not be removed. */ 82 static char version[] = 83 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; 84 85 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); 86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); 87 MODULE_VERSION(DRV_VERSION); 88 MODULE_LICENSE("GPL"); 89 90 static int debug = -1; 91 module_param(debug, int, 0); 92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); 93 94 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 95 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ 96 static int multicast_filter_limit = 32; 97 module_param(multicast_filter_limit, int, 0); 98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); 99 100 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ 101 NETIF_MSG_PROBE | \ 102 NETIF_MSG_LINK) 103 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */ 104 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */ 105 #define CP_REGS_SIZE (0xff + 1) 106 #define CP_REGS_VER 1 /* version 1 */ 107 #define CP_RX_RING_SIZE 64 108 #define CP_TX_RING_SIZE 64 109 #define CP_RING_BYTES \ 110 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \ 111 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \ 112 CP_STATS_SIZE) 113 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1)) 114 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1)) 115 #define TX_BUFFS_AVAIL(CP) \ 116 (((CP)->tx_tail <= (CP)->tx_head) ? \ 117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \ 118 (CP)->tx_tail - (CP)->tx_head - 1) 119 120 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 121 #define CP_INTERNAL_PHY 32 122 123 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ 124 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */ 125 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */ 126 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 127 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */ 128 129 /* Time in jiffies before concluding the transmitter is hung. */ 130 #define TX_TIMEOUT (6*HZ) 131 132 /* hardware minimum and maximum for a single frame's data payload */ 133 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */ 134 #define CP_MAX_MTU 4096 135 136 enum { 137 /* NIC register offsets */ 138 MAC0 = 0x00, /* Ethernet hardware address. */ 139 MAR0 = 0x08, /* Multicast filter. */ 140 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */ 141 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */ 142 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */ 143 Cmd = 0x37, /* Command register */ 144 IntrMask = 0x3C, /* Interrupt mask */ 145 IntrStatus = 0x3E, /* Interrupt status */ 146 TxConfig = 0x40, /* Tx configuration */ 147 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */ 148 RxConfig = 0x44, /* Rx configuration */ 149 RxMissed = 0x4C, /* 24 bits valid, write clears */ 150 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */ 151 Config1 = 0x52, /* Config1 */ 152 Config3 = 0x59, /* Config3 */ 153 Config4 = 0x5A, /* Config4 */ 154 MultiIntr = 0x5C, /* Multiple interrupt select */ 155 BasicModeCtrl = 0x62, /* MII BMCR */ 156 BasicModeStatus = 0x64, /* MII BMSR */ 157 NWayAdvert = 0x66, /* MII ADVERTISE */ 158 NWayLPAR = 0x68, /* MII LPA */ 159 NWayExpansion = 0x6A, /* MII Expansion */ 160 TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */ 161 Config5 = 0xD8, /* Config5 */ 162 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ 163 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ 164 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */ 165 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */ 166 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */ 167 TxThresh = 0xEC, /* Early Tx threshold */ 168 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */ 169 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */ 170 171 /* Tx and Rx status descriptors */ 172 DescOwn = (1 << 31), /* Descriptor is owned by NIC */ 173 RingEnd = (1 << 30), /* End of descriptor ring */ 174 FirstFrag = (1 << 29), /* First segment of a packet */ 175 LastFrag = (1 << 28), /* Final segment of a packet */ 176 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ 177 MSSShift = 16, /* MSS value position */ 178 MSSMask = 0xfff, /* MSS value: 11 bits */ 179 TxError = (1 << 23), /* Tx error summary */ 180 RxError = (1 << 20), /* Rx error summary */ 181 IPCS = (1 << 18), /* Calculate IP checksum */ 182 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */ 183 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */ 184 TxVlanTag = (1 << 17), /* Add VLAN tag */ 185 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */ 186 IPFail = (1 << 15), /* IP checksum failed */ 187 UDPFail = (1 << 14), /* UDP/IP checksum failed */ 188 TCPFail = (1 << 13), /* TCP/IP checksum failed */ 189 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */ 190 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */ 191 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */ 192 RxProtoTCP = 1, 193 RxProtoUDP = 2, 194 RxProtoIP = 3, 195 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */ 196 TxOWC = (1 << 22), /* Tx Out-of-window collision */ 197 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */ 198 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */ 199 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */ 200 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */ 201 RxErrFrame = (1 << 27), /* Rx frame alignment error */ 202 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */ 203 RxErrCRC = (1 << 18), /* Rx CRC error */ 204 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */ 205 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */ 206 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */ 207 208 /* StatsAddr register */ 209 DumpStats = (1 << 3), /* Begin stats dump */ 210 211 /* RxConfig register */ 212 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */ 213 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */ 214 AcceptErr = 0x20, /* Accept packets with CRC errors */ 215 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */ 216 AcceptBroadcast = 0x08, /* Accept broadcast packets */ 217 AcceptMulticast = 0x04, /* Accept multicast packets */ 218 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */ 219 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */ 220 221 /* IntrMask / IntrStatus registers */ 222 PciErr = (1 << 15), /* System error on the PCI bus */ 223 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */ 224 LenChg = (1 << 13), /* Cable length change */ 225 SWInt = (1 << 8), /* Software-requested interrupt */ 226 TxEmpty = (1 << 7), /* No Tx descriptors available */ 227 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */ 228 LinkChg = (1 << 5), /* Packet underrun, or link change */ 229 RxEmpty = (1 << 4), /* No Rx descriptors available */ 230 TxErr = (1 << 3), /* Tx error */ 231 TxOK = (1 << 2), /* Tx packet sent */ 232 RxErr = (1 << 1), /* Rx error */ 233 RxOK = (1 << 0), /* Rx packet received */ 234 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers, 235 but hardware likes to raise it */ 236 237 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty | 238 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK | 239 RxErr | RxOK | IntrResvd, 240 241 /* C mode command register */ 242 CmdReset = (1 << 4), /* Enable to reset; self-clearing */ 243 RxOn = (1 << 3), /* Rx mode enable */ 244 TxOn = (1 << 2), /* Tx mode enable */ 245 246 /* C+ mode command register */ 247 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */ 248 RxChkSum = (1 << 5), /* Rx checksum offload enable */ 249 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */ 250 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */ 251 CpRxOn = (1 << 1), /* Rx mode enable */ 252 CpTxOn = (1 << 0), /* Tx mode enable */ 253 254 /* Cfg9436 EEPROM control register */ 255 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */ 256 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */ 257 258 /* TxConfig register */ 259 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */ 260 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 261 262 /* Early Tx Threshold register */ 263 TxThreshMask = 0x3f, /* Mask bits 5-0 */ 264 TxThreshMax = 2048, /* Max early Tx threshold */ 265 266 /* Config1 register */ 267 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */ 268 LWACT = (1 << 4), /* LWAKE active mode */ 269 PMEnable = (1 << 0), /* Enable various PM features of chip */ 270 271 /* Config3 register */ 272 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */ 273 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ 274 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ 275 276 /* Config4 register */ 277 LWPTN = (1 << 1), /* LWAKE Pattern */ 278 LWPME = (1 << 4), /* LANWAKE vs PMEB */ 279 280 /* Config5 register */ 281 BWF = (1 << 6), /* Accept Broadcast wakeup frame */ 282 MWF = (1 << 5), /* Accept Multicast wakeup frame */ 283 UWF = (1 << 4), /* Accept Unicast wakeup frame */ 284 LANWake = (1 << 1), /* Enable LANWake signal */ 285 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ 286 287 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty, 288 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr, 289 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask, 290 }; 291 292 static const unsigned int cp_rx_config = 293 (RX_FIFO_THRESH << RxCfgFIFOShift) | 294 (RX_DMA_BURST << RxCfgDMAShift); 295 296 struct cp_desc { 297 __le32 opts1; 298 __le32 opts2; 299 __le64 addr; 300 }; 301 302 struct cp_dma_stats { 303 __le64 tx_ok; 304 __le64 rx_ok; 305 __le64 tx_err; 306 __le32 rx_err; 307 __le16 rx_fifo; 308 __le16 frame_align; 309 __le32 tx_ok_1col; 310 __le32 tx_ok_mcol; 311 __le64 rx_ok_phys; 312 __le64 rx_ok_bcast; 313 __le32 rx_ok_mcast; 314 __le16 tx_abort; 315 __le16 tx_underrun; 316 } __packed; 317 318 struct cp_extra_stats { 319 unsigned long rx_frags; 320 }; 321 322 struct cp_private { 323 void __iomem *regs; 324 struct net_device *dev; 325 spinlock_t lock; 326 u32 msg_enable; 327 328 struct napi_struct napi; 329 330 struct pci_dev *pdev; 331 u32 rx_config; 332 u16 cpcmd; 333 334 struct cp_extra_stats cp_stats; 335 336 unsigned rx_head ____cacheline_aligned; 337 unsigned rx_tail; 338 struct cp_desc *rx_ring; 339 struct sk_buff *rx_skb[CP_RX_RING_SIZE]; 340 341 unsigned tx_head ____cacheline_aligned; 342 unsigned tx_tail; 343 struct cp_desc *tx_ring; 344 struct sk_buff *tx_skb[CP_TX_RING_SIZE]; 345 u32 tx_opts[CP_TX_RING_SIZE]; 346 347 unsigned rx_buf_sz; 348 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ 349 350 dma_addr_t ring_dma; 351 352 struct mii_if_info mii_if; 353 }; 354 355 #define cpr8(reg) readb(cp->regs + (reg)) 356 #define cpr16(reg) readw(cp->regs + (reg)) 357 #define cpr32(reg) readl(cp->regs + (reg)) 358 #define cpw8(reg,val) writeb((val), cp->regs + (reg)) 359 #define cpw16(reg,val) writew((val), cp->regs + (reg)) 360 #define cpw32(reg,val) writel((val), cp->regs + (reg)) 361 #define cpw8_f(reg,val) do { \ 362 writeb((val), cp->regs + (reg)); \ 363 readb(cp->regs + (reg)); \ 364 } while (0) 365 #define cpw16_f(reg,val) do { \ 366 writew((val), cp->regs + (reg)); \ 367 readw(cp->regs + (reg)); \ 368 } while (0) 369 #define cpw32_f(reg,val) do { \ 370 writel((val), cp->regs + (reg)); \ 371 readl(cp->regs + (reg)); \ 372 } while (0) 373 374 375 static void __cp_set_rx_mode (struct net_device *dev); 376 static void cp_tx (struct cp_private *cp); 377 static void cp_clean_rings (struct cp_private *cp); 378 #ifdef CONFIG_NET_POLL_CONTROLLER 379 static void cp_poll_controller(struct net_device *dev); 380 #endif 381 static int cp_get_eeprom_len(struct net_device *dev); 382 static int cp_get_eeprom(struct net_device *dev, 383 struct ethtool_eeprom *eeprom, u8 *data); 384 static int cp_set_eeprom(struct net_device *dev, 385 struct ethtool_eeprom *eeprom, u8 *data); 386 387 static struct { 388 const char str[ETH_GSTRING_LEN]; 389 } ethtool_stats_keys[] = { 390 { "tx_ok" }, 391 { "rx_ok" }, 392 { "tx_err" }, 393 { "rx_err" }, 394 { "rx_fifo" }, 395 { "frame_align" }, 396 { "tx_ok_1col" }, 397 { "tx_ok_mcol" }, 398 { "rx_ok_phys" }, 399 { "rx_ok_bcast" }, 400 { "rx_ok_mcast" }, 401 { "tx_abort" }, 402 { "tx_underrun" }, 403 { "rx_frags" }, 404 }; 405 406 407 static inline void cp_set_rxbufsize (struct cp_private *cp) 408 { 409 unsigned int mtu = cp->dev->mtu; 410 411 if (mtu > ETH_DATA_LEN) 412 /* MTU + ethernet header + FCS + optional VLAN tag */ 413 cp->rx_buf_sz = mtu + ETH_HLEN + 8; 414 else 415 cp->rx_buf_sz = PKT_BUF_SZ; 416 } 417 418 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, 419 struct cp_desc *desc) 420 { 421 u32 opts2 = le32_to_cpu(desc->opts2); 422 423 skb->protocol = eth_type_trans (skb, cp->dev); 424 425 cp->dev->stats.rx_packets++; 426 cp->dev->stats.rx_bytes += skb->len; 427 428 if (opts2 & RxVlanTagged) 429 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); 430 431 napi_gro_receive(&cp->napi, skb); 432 } 433 434 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, 435 u32 status, u32 len) 436 { 437 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", 438 rx_tail, status, len); 439 cp->dev->stats.rx_errors++; 440 if (status & RxErrFrame) 441 cp->dev->stats.rx_frame_errors++; 442 if (status & RxErrCRC) 443 cp->dev->stats.rx_crc_errors++; 444 if ((status & RxErrRunt) || (status & RxErrLong)) 445 cp->dev->stats.rx_length_errors++; 446 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) 447 cp->dev->stats.rx_length_errors++; 448 if (status & RxErrFIFO) 449 cp->dev->stats.rx_fifo_errors++; 450 } 451 452 static inline unsigned int cp_rx_csum_ok (u32 status) 453 { 454 unsigned int protocol = (status >> 16) & 0x3; 455 456 if (((protocol == RxProtoTCP) && !(status & TCPFail)) || 457 ((protocol == RxProtoUDP) && !(status & UDPFail))) 458 return 1; 459 else 460 return 0; 461 } 462 463 static int cp_rx_poll(struct napi_struct *napi, int budget) 464 { 465 struct cp_private *cp = container_of(napi, struct cp_private, napi); 466 struct net_device *dev = cp->dev; 467 unsigned int rx_tail = cp->rx_tail; 468 int rx; 469 470 rx_status_loop: 471 rx = 0; 472 cpw16(IntrStatus, cp_rx_intr_mask); 473 474 while (rx < budget) { 475 u32 status, len; 476 dma_addr_t mapping, new_mapping; 477 struct sk_buff *skb, *new_skb; 478 struct cp_desc *desc; 479 const unsigned buflen = cp->rx_buf_sz; 480 481 skb = cp->rx_skb[rx_tail]; 482 BUG_ON(!skb); 483 484 desc = &cp->rx_ring[rx_tail]; 485 status = le32_to_cpu(desc->opts1); 486 if (status & DescOwn) 487 break; 488 489 len = (status & 0x1fff) - 4; 490 mapping = le64_to_cpu(desc->addr); 491 492 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { 493 /* we don't support incoming fragmented frames. 494 * instead, we attempt to ensure that the 495 * pre-allocated RX skbs are properly sized such 496 * that RX fragments are never encountered 497 */ 498 cp_rx_err_acct(cp, rx_tail, status, len); 499 dev->stats.rx_dropped++; 500 cp->cp_stats.rx_frags++; 501 goto rx_next; 502 } 503 504 if (status & (RxError | RxErrFIFO)) { 505 cp_rx_err_acct(cp, rx_tail, status, len); 506 goto rx_next; 507 } 508 509 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", 510 rx_tail, status, len); 511 512 new_skb = napi_alloc_skb(napi, buflen); 513 if (!new_skb) { 514 dev->stats.rx_dropped++; 515 goto rx_next; 516 } 517 518 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, 519 PCI_DMA_FROMDEVICE); 520 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { 521 dev->stats.rx_dropped++; 522 kfree_skb(new_skb); 523 goto rx_next; 524 } 525 526 dma_unmap_single(&cp->pdev->dev, mapping, 527 buflen, PCI_DMA_FROMDEVICE); 528 529 /* Handle checksum offloading for incoming packets. */ 530 if (cp_rx_csum_ok(status)) 531 skb->ip_summed = CHECKSUM_UNNECESSARY; 532 else 533 skb_checksum_none_assert(skb); 534 535 skb_put(skb, len); 536 537 cp->rx_skb[rx_tail] = new_skb; 538 539 cp_rx_skb(cp, skb, desc); 540 rx++; 541 mapping = new_mapping; 542 543 rx_next: 544 cp->rx_ring[rx_tail].opts2 = 0; 545 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); 546 if (rx_tail == (CP_RX_RING_SIZE - 1)) 547 desc->opts1 = cpu_to_le32(DescOwn | RingEnd | 548 cp->rx_buf_sz); 549 else 550 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); 551 rx_tail = NEXT_RX(rx_tail); 552 } 553 554 cp->rx_tail = rx_tail; 555 556 /* if we did not reach work limit, then we're done with 557 * this round of polling 558 */ 559 if (rx < budget) { 560 unsigned long flags; 561 562 if (cpr16(IntrStatus) & cp_rx_intr_mask) 563 goto rx_status_loop; 564 565 napi_gro_flush(napi, false); 566 spin_lock_irqsave(&cp->lock, flags); 567 __napi_complete(napi); 568 cpw16_f(IntrMask, cp_intr_mask); 569 spin_unlock_irqrestore(&cp->lock, flags); 570 } 571 572 return rx; 573 } 574 575 static irqreturn_t cp_interrupt (int irq, void *dev_instance) 576 { 577 struct net_device *dev = dev_instance; 578 struct cp_private *cp; 579 int handled = 0; 580 u16 status; 581 582 if (unlikely(dev == NULL)) 583 return IRQ_NONE; 584 cp = netdev_priv(dev); 585 586 spin_lock(&cp->lock); 587 588 status = cpr16(IntrStatus); 589 if (!status || (status == 0xFFFF)) 590 goto out_unlock; 591 592 handled = 1; 593 594 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", 595 status, cpr8(Cmd), cpr16(CpCmd)); 596 597 cpw16(IntrStatus, status & ~cp_rx_intr_mask); 598 599 /* close possible race's with dev_close */ 600 if (unlikely(!netif_running(dev))) { 601 cpw16(IntrMask, 0); 602 goto out_unlock; 603 } 604 605 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) 606 if (napi_schedule_prep(&cp->napi)) { 607 cpw16_f(IntrMask, cp_norx_intr_mask); 608 __napi_schedule(&cp->napi); 609 } 610 611 if (status & (TxOK | TxErr | TxEmpty | SWInt)) 612 cp_tx(cp); 613 if (status & LinkChg) 614 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); 615 616 617 if (status & PciErr) { 618 u16 pci_status; 619 620 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); 621 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); 622 netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n", 623 status, pci_status); 624 625 /* TODO: reset hardware */ 626 } 627 628 out_unlock: 629 spin_unlock(&cp->lock); 630 631 return IRQ_RETVAL(handled); 632 } 633 634 #ifdef CONFIG_NET_POLL_CONTROLLER 635 /* 636 * Polling receive - used by netconsole and other diagnostic tools 637 * to allow network i/o with interrupts disabled. 638 */ 639 static void cp_poll_controller(struct net_device *dev) 640 { 641 struct cp_private *cp = netdev_priv(dev); 642 const int irq = cp->pdev->irq; 643 644 disable_irq(irq); 645 cp_interrupt(irq, dev); 646 enable_irq(irq); 647 } 648 #endif 649 650 static void cp_tx (struct cp_private *cp) 651 { 652 unsigned tx_head = cp->tx_head; 653 unsigned tx_tail = cp->tx_tail; 654 unsigned bytes_compl = 0, pkts_compl = 0; 655 656 while (tx_tail != tx_head) { 657 struct cp_desc *txd = cp->tx_ring + tx_tail; 658 struct sk_buff *skb; 659 u32 status; 660 661 rmb(); 662 status = le32_to_cpu(txd->opts1); 663 if (status & DescOwn) 664 break; 665 666 skb = cp->tx_skb[tx_tail]; 667 BUG_ON(!skb); 668 669 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), 670 cp->tx_opts[tx_tail] & 0xffff, 671 PCI_DMA_TODEVICE); 672 673 if (status & LastFrag) { 674 if (status & (TxError | TxFIFOUnder)) { 675 netif_dbg(cp, tx_err, cp->dev, 676 "tx err, status 0x%x\n", status); 677 cp->dev->stats.tx_errors++; 678 if (status & TxOWC) 679 cp->dev->stats.tx_window_errors++; 680 if (status & TxMaxCol) 681 cp->dev->stats.tx_aborted_errors++; 682 if (status & TxLinkFail) 683 cp->dev->stats.tx_carrier_errors++; 684 if (status & TxFIFOUnder) 685 cp->dev->stats.tx_fifo_errors++; 686 } else { 687 cp->dev->stats.collisions += 688 ((status >> TxColCntShift) & TxColCntMask); 689 cp->dev->stats.tx_packets++; 690 cp->dev->stats.tx_bytes += skb->len; 691 netif_dbg(cp, tx_done, cp->dev, 692 "tx done, slot %d\n", tx_tail); 693 } 694 bytes_compl += skb->len; 695 pkts_compl++; 696 dev_kfree_skb_irq(skb); 697 } 698 699 cp->tx_skb[tx_tail] = NULL; 700 701 tx_tail = NEXT_TX(tx_tail); 702 } 703 704 cp->tx_tail = tx_tail; 705 706 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); 707 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) 708 netif_wake_queue(cp->dev); 709 } 710 711 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) 712 { 713 return skb_vlan_tag_present(skb) ? 714 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; 715 } 716 717 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, 718 int first, int entry_last) 719 { 720 int frag, index; 721 struct cp_desc *txd; 722 skb_frag_t *this_frag; 723 for (frag = 0; frag+first < entry_last; frag++) { 724 index = first+frag; 725 cp->tx_skb[index] = NULL; 726 txd = &cp->tx_ring[index]; 727 this_frag = &skb_shinfo(skb)->frags[frag]; 728 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), 729 skb_frag_size(this_frag), PCI_DMA_TODEVICE); 730 } 731 } 732 733 static netdev_tx_t cp_start_xmit (struct sk_buff *skb, 734 struct net_device *dev) 735 { 736 struct cp_private *cp = netdev_priv(dev); 737 unsigned entry; 738 u32 eor, opts1; 739 unsigned long intr_flags; 740 __le32 opts2; 741 int mss = 0; 742 743 spin_lock_irqsave(&cp->lock, intr_flags); 744 745 /* This is a hard error, log it. */ 746 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { 747 netif_stop_queue(dev); 748 spin_unlock_irqrestore(&cp->lock, intr_flags); 749 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 750 return NETDEV_TX_BUSY; 751 } 752 753 entry = cp->tx_head; 754 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 755 mss = skb_shinfo(skb)->gso_size; 756 757 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); 758 opts1 = DescOwn; 759 if (mss) 760 opts1 |= LargeSend | ((mss & MSSMask) << MSSShift); 761 else if (skb->ip_summed == CHECKSUM_PARTIAL) { 762 const struct iphdr *ip = ip_hdr(skb); 763 if (ip->protocol == IPPROTO_TCP) 764 opts1 |= IPCS | TCPCS; 765 else if (ip->protocol == IPPROTO_UDP) 766 opts1 |= IPCS | UDPCS; 767 else { 768 WARN_ONCE(1, 769 "Net bug: asked to checksum invalid Legacy IP packet\n"); 770 goto out_dma_error; 771 } 772 } 773 774 if (skb_shinfo(skb)->nr_frags == 0) { 775 struct cp_desc *txd = &cp->tx_ring[entry]; 776 u32 len; 777 dma_addr_t mapping; 778 779 len = skb->len; 780 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); 781 if (dma_mapping_error(&cp->pdev->dev, mapping)) 782 goto out_dma_error; 783 784 txd->opts2 = opts2; 785 txd->addr = cpu_to_le64(mapping); 786 wmb(); 787 788 opts1 |= eor | len | FirstFrag | LastFrag; 789 790 txd->opts1 = cpu_to_le32(opts1); 791 wmb(); 792 793 cp->tx_skb[entry] = skb; 794 cp->tx_opts[entry] = opts1; 795 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", 796 entry, skb->len); 797 } else { 798 struct cp_desc *txd; 799 u32 first_len, first_eor, ctrl; 800 dma_addr_t first_mapping; 801 int frag, first_entry = entry; 802 803 /* We must give this initial chunk to the device last. 804 * Otherwise we could race with the device. 805 */ 806 first_eor = eor; 807 first_len = skb_headlen(skb); 808 first_mapping = dma_map_single(&cp->pdev->dev, skb->data, 809 first_len, PCI_DMA_TODEVICE); 810 if (dma_mapping_error(&cp->pdev->dev, first_mapping)) 811 goto out_dma_error; 812 813 cp->tx_skb[entry] = skb; 814 815 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 816 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 817 u32 len; 818 dma_addr_t mapping; 819 820 entry = NEXT_TX(entry); 821 822 len = skb_frag_size(this_frag); 823 mapping = dma_map_single(&cp->pdev->dev, 824 skb_frag_address(this_frag), 825 len, PCI_DMA_TODEVICE); 826 if (dma_mapping_error(&cp->pdev->dev, mapping)) { 827 unwind_tx_frag_mapping(cp, skb, first_entry, entry); 828 goto out_dma_error; 829 } 830 831 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 832 833 ctrl = opts1 | eor | len; 834 835 if (frag == skb_shinfo(skb)->nr_frags - 1) 836 ctrl |= LastFrag; 837 838 txd = &cp->tx_ring[entry]; 839 txd->opts2 = opts2; 840 txd->addr = cpu_to_le64(mapping); 841 wmb(); 842 843 txd->opts1 = cpu_to_le32(ctrl); 844 wmb(); 845 846 cp->tx_opts[entry] = ctrl; 847 cp->tx_skb[entry] = skb; 848 } 849 850 txd = &cp->tx_ring[first_entry]; 851 txd->opts2 = opts2; 852 txd->addr = cpu_to_le64(first_mapping); 853 wmb(); 854 855 ctrl = opts1 | first_eor | first_len | FirstFrag; 856 txd->opts1 = cpu_to_le32(ctrl); 857 wmb(); 858 859 cp->tx_opts[first_entry] = ctrl; 860 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n", 861 first_entry, entry, skb->len); 862 } 863 cp->tx_head = NEXT_TX(entry); 864 865 netdev_sent_queue(dev, skb->len); 866 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 867 netif_stop_queue(dev); 868 869 out_unlock: 870 spin_unlock_irqrestore(&cp->lock, intr_flags); 871 872 cpw8(TxPoll, NormalTxPoll); 873 874 return NETDEV_TX_OK; 875 out_dma_error: 876 dev_kfree_skb_any(skb); 877 cp->dev->stats.tx_dropped++; 878 goto out_unlock; 879 } 880 881 /* Set or clear the multicast filter for this adaptor. 882 This routine is not state sensitive and need not be SMP locked. */ 883 884 static void __cp_set_rx_mode (struct net_device *dev) 885 { 886 struct cp_private *cp = netdev_priv(dev); 887 u32 mc_filter[2]; /* Multicast hash filter */ 888 int rx_mode; 889 890 /* Note: do not reorder, GCC is clever about common statements. */ 891 if (dev->flags & IFF_PROMISC) { 892 /* Unconditionally log net taps. */ 893 rx_mode = 894 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 895 AcceptAllPhys; 896 mc_filter[1] = mc_filter[0] = 0xffffffff; 897 } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 898 (dev->flags & IFF_ALLMULTI)) { 899 /* Too many to filter perfectly -- accept all multicasts. */ 900 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 901 mc_filter[1] = mc_filter[0] = 0xffffffff; 902 } else { 903 struct netdev_hw_addr *ha; 904 rx_mode = AcceptBroadcast | AcceptMyPhys; 905 mc_filter[1] = mc_filter[0] = 0; 906 netdev_for_each_mc_addr(ha, dev) { 907 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; 908 909 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 910 rx_mode |= AcceptMulticast; 911 } 912 } 913 914 /* We can safely update without stopping the chip. */ 915 cp->rx_config = cp_rx_config | rx_mode; 916 cpw32_f(RxConfig, cp->rx_config); 917 918 cpw32_f (MAR0 + 0, mc_filter[0]); 919 cpw32_f (MAR0 + 4, mc_filter[1]); 920 } 921 922 static void cp_set_rx_mode (struct net_device *dev) 923 { 924 unsigned long flags; 925 struct cp_private *cp = netdev_priv(dev); 926 927 spin_lock_irqsave (&cp->lock, flags); 928 __cp_set_rx_mode(dev); 929 spin_unlock_irqrestore (&cp->lock, flags); 930 } 931 932 static void __cp_get_stats(struct cp_private *cp) 933 { 934 /* only lower 24 bits valid; write any value to clear */ 935 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); 936 cpw32 (RxMissed, 0); 937 } 938 939 static struct net_device_stats *cp_get_stats(struct net_device *dev) 940 { 941 struct cp_private *cp = netdev_priv(dev); 942 unsigned long flags; 943 944 /* The chip only need report frame silently dropped. */ 945 spin_lock_irqsave(&cp->lock, flags); 946 if (netif_running(dev) && netif_device_present(dev)) 947 __cp_get_stats(cp); 948 spin_unlock_irqrestore(&cp->lock, flags); 949 950 return &dev->stats; 951 } 952 953 static void cp_stop_hw (struct cp_private *cp) 954 { 955 cpw16(IntrStatus, ~(cpr16(IntrStatus))); 956 cpw16_f(IntrMask, 0); 957 cpw8(Cmd, 0); 958 cpw16_f(CpCmd, 0); 959 cpw16_f(IntrStatus, ~(cpr16(IntrStatus))); 960 961 cp->rx_tail = 0; 962 cp->tx_head = cp->tx_tail = 0; 963 964 netdev_reset_queue(cp->dev); 965 } 966 967 static void cp_reset_hw (struct cp_private *cp) 968 { 969 unsigned work = 1000; 970 971 cpw8(Cmd, CmdReset); 972 973 while (work--) { 974 if (!(cpr8(Cmd) & CmdReset)) 975 return; 976 977 schedule_timeout_uninterruptible(10); 978 } 979 980 netdev_err(cp->dev, "hardware reset timeout\n"); 981 } 982 983 static inline void cp_start_hw (struct cp_private *cp) 984 { 985 dma_addr_t ring_dma; 986 987 cpw16(CpCmd, cp->cpcmd); 988 989 /* 990 * These (at least TxRingAddr) need to be configured after the 991 * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33 992 * (C+ Command Register) recommends that these and more be configured 993 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware 994 * it's been observed that the TxRingAddr is actually reset to garbage 995 * when C+ mode Tx is enabled in CpCmd. 996 */ 997 cpw32_f(HiTxRingAddr, 0); 998 cpw32_f(HiTxRingAddr + 4, 0); 999 1000 ring_dma = cp->ring_dma; 1001 cpw32_f(RxRingAddr, ring_dma & 0xffffffff); 1002 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); 1003 1004 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; 1005 cpw32_f(TxRingAddr, ring_dma & 0xffffffff); 1006 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); 1007 1008 /* 1009 * Strictly speaking, the datasheet says this should be enabled 1010 * *before* setting the descriptor addresses. But what, then, would 1011 * prevent it from doing DMA to random unconfigured addresses? 1012 * This variant appears to work fine. 1013 */ 1014 cpw8(Cmd, RxOn | TxOn); 1015 1016 netdev_reset_queue(cp->dev); 1017 } 1018 1019 static void cp_enable_irq(struct cp_private *cp) 1020 { 1021 cpw16_f(IntrMask, cp_intr_mask); 1022 } 1023 1024 static void cp_init_hw (struct cp_private *cp) 1025 { 1026 struct net_device *dev = cp->dev; 1027 1028 cp_reset_hw(cp); 1029 1030 cpw8_f (Cfg9346, Cfg9346_Unlock); 1031 1032 /* Restore our idea of the MAC address. */ 1033 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); 1034 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); 1035 1036 cp_start_hw(cp); 1037 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ 1038 1039 __cp_set_rx_mode(dev); 1040 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift)); 1041 1042 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable); 1043 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */ 1044 cpw8(Config3, PARMEnable); 1045 cp->wol_enabled = 0; 1046 1047 cpw8(Config5, cpr8(Config5) & PMEStatus); 1048 1049 cpw16(MultiIntr, 0); 1050 1051 cpw8_f(Cfg9346, Cfg9346_Lock); 1052 } 1053 1054 static int cp_refill_rx(struct cp_private *cp) 1055 { 1056 struct net_device *dev = cp->dev; 1057 unsigned i; 1058 1059 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1060 struct sk_buff *skb; 1061 dma_addr_t mapping; 1062 1063 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); 1064 if (!skb) 1065 goto err_out; 1066 1067 mapping = dma_map_single(&cp->pdev->dev, skb->data, 1068 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1069 if (dma_mapping_error(&cp->pdev->dev, mapping)) { 1070 kfree_skb(skb); 1071 goto err_out; 1072 } 1073 cp->rx_skb[i] = skb; 1074 1075 cp->rx_ring[i].opts2 = 0; 1076 cp->rx_ring[i].addr = cpu_to_le64(mapping); 1077 if (i == (CP_RX_RING_SIZE - 1)) 1078 cp->rx_ring[i].opts1 = 1079 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); 1080 else 1081 cp->rx_ring[i].opts1 = 1082 cpu_to_le32(DescOwn | cp->rx_buf_sz); 1083 } 1084 1085 return 0; 1086 1087 err_out: 1088 cp_clean_rings(cp); 1089 return -ENOMEM; 1090 } 1091 1092 static void cp_init_rings_index (struct cp_private *cp) 1093 { 1094 cp->rx_tail = 0; 1095 cp->tx_head = cp->tx_tail = 0; 1096 } 1097 1098 static int cp_init_rings (struct cp_private *cp) 1099 { 1100 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1101 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); 1102 memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); 1103 1104 cp_init_rings_index(cp); 1105 1106 return cp_refill_rx (cp); 1107 } 1108 1109 static int cp_alloc_rings (struct cp_private *cp) 1110 { 1111 struct device *d = &cp->pdev->dev; 1112 void *mem; 1113 int rc; 1114 1115 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); 1116 if (!mem) 1117 return -ENOMEM; 1118 1119 cp->rx_ring = mem; 1120 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; 1121 1122 rc = cp_init_rings(cp); 1123 if (rc < 0) 1124 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); 1125 1126 return rc; 1127 } 1128 1129 static void cp_clean_rings (struct cp_private *cp) 1130 { 1131 struct cp_desc *desc; 1132 unsigned i; 1133 1134 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1135 if (cp->rx_skb[i]) { 1136 desc = cp->rx_ring + i; 1137 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), 1138 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1139 dev_kfree_skb_any(cp->rx_skb[i]); 1140 } 1141 } 1142 1143 for (i = 0; i < CP_TX_RING_SIZE; i++) { 1144 if (cp->tx_skb[i]) { 1145 struct sk_buff *skb = cp->tx_skb[i]; 1146 1147 desc = cp->tx_ring + i; 1148 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), 1149 le32_to_cpu(desc->opts1) & 0xffff, 1150 PCI_DMA_TODEVICE); 1151 if (le32_to_cpu(desc->opts1) & LastFrag) 1152 dev_kfree_skb_any(skb); 1153 cp->dev->stats.tx_dropped++; 1154 } 1155 } 1156 netdev_reset_queue(cp->dev); 1157 1158 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); 1159 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1160 memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); 1161 1162 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); 1163 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); 1164 } 1165 1166 static void cp_free_rings (struct cp_private *cp) 1167 { 1168 cp_clean_rings(cp); 1169 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, 1170 cp->ring_dma); 1171 cp->rx_ring = NULL; 1172 cp->tx_ring = NULL; 1173 } 1174 1175 static int cp_open (struct net_device *dev) 1176 { 1177 struct cp_private *cp = netdev_priv(dev); 1178 const int irq = cp->pdev->irq; 1179 int rc; 1180 1181 netif_dbg(cp, ifup, dev, "enabling interface\n"); 1182 1183 rc = cp_alloc_rings(cp); 1184 if (rc) 1185 return rc; 1186 1187 napi_enable(&cp->napi); 1188 1189 cp_init_hw(cp); 1190 1191 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev); 1192 if (rc) 1193 goto err_out_hw; 1194 1195 cp_enable_irq(cp); 1196 1197 netif_carrier_off(dev); 1198 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); 1199 netif_start_queue(dev); 1200 1201 return 0; 1202 1203 err_out_hw: 1204 napi_disable(&cp->napi); 1205 cp_stop_hw(cp); 1206 cp_free_rings(cp); 1207 return rc; 1208 } 1209 1210 static int cp_close (struct net_device *dev) 1211 { 1212 struct cp_private *cp = netdev_priv(dev); 1213 unsigned long flags; 1214 1215 napi_disable(&cp->napi); 1216 1217 netif_dbg(cp, ifdown, dev, "disabling interface\n"); 1218 1219 spin_lock_irqsave(&cp->lock, flags); 1220 1221 netif_stop_queue(dev); 1222 netif_carrier_off(dev); 1223 1224 cp_stop_hw(cp); 1225 1226 spin_unlock_irqrestore(&cp->lock, flags); 1227 1228 free_irq(cp->pdev->irq, dev); 1229 1230 cp_free_rings(cp); 1231 return 0; 1232 } 1233 1234 static void cp_tx_timeout(struct net_device *dev) 1235 { 1236 struct cp_private *cp = netdev_priv(dev); 1237 unsigned long flags; 1238 int rc, i; 1239 1240 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", 1241 cpr8(Cmd), cpr16(CpCmd), 1242 cpr16(IntrStatus), cpr16(IntrMask)); 1243 1244 spin_lock_irqsave(&cp->lock, flags); 1245 1246 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n", 1247 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc)); 1248 for (i = 0; i < CP_TX_RING_SIZE; i++) { 1249 netif_dbg(cp, tx_err, cp->dev, 1250 "TX slot %d @%p: %08x (%08x) %08x %llx %p\n", 1251 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1), 1252 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2), 1253 le64_to_cpu(cp->tx_ring[i].addr), 1254 cp->tx_skb[i]); 1255 } 1256 1257 cp_stop_hw(cp); 1258 cp_clean_rings(cp); 1259 rc = cp_init_rings(cp); 1260 cp_start_hw(cp); 1261 __cp_set_rx_mode(dev); 1262 cpw16_f(IntrMask, cp_norx_intr_mask); 1263 1264 netif_wake_queue(dev); 1265 napi_schedule_irqoff(&cp->napi); 1266 1267 spin_unlock_irqrestore(&cp->lock, flags); 1268 } 1269 1270 static int cp_change_mtu(struct net_device *dev, int new_mtu) 1271 { 1272 struct cp_private *cp = netdev_priv(dev); 1273 1274 /* check for invalid MTU, according to hardware limits */ 1275 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) 1276 return -EINVAL; 1277 1278 /* if network interface not up, no need for complexity */ 1279 if (!netif_running(dev)) { 1280 dev->mtu = new_mtu; 1281 cp_set_rxbufsize(cp); /* set new rx buf size */ 1282 return 0; 1283 } 1284 1285 /* network IS up, close it, reset MTU, and come up again. */ 1286 cp_close(dev); 1287 dev->mtu = new_mtu; 1288 cp_set_rxbufsize(cp); 1289 return cp_open(dev); 1290 } 1291 1292 static const char mii_2_8139_map[8] = { 1293 BasicModeCtrl, 1294 BasicModeStatus, 1295 0, 1296 0, 1297 NWayAdvert, 1298 NWayLPAR, 1299 NWayExpansion, 1300 0 1301 }; 1302 1303 static int mdio_read(struct net_device *dev, int phy_id, int location) 1304 { 1305 struct cp_private *cp = netdev_priv(dev); 1306 1307 return location < 8 && mii_2_8139_map[location] ? 1308 readw(cp->regs + mii_2_8139_map[location]) : 0; 1309 } 1310 1311 1312 static void mdio_write(struct net_device *dev, int phy_id, int location, 1313 int value) 1314 { 1315 struct cp_private *cp = netdev_priv(dev); 1316 1317 if (location == 0) { 1318 cpw8(Cfg9346, Cfg9346_Unlock); 1319 cpw16(BasicModeCtrl, value); 1320 cpw8(Cfg9346, Cfg9346_Lock); 1321 } else if (location < 8 && mii_2_8139_map[location]) 1322 cpw16(mii_2_8139_map[location], value); 1323 } 1324 1325 /* Set the ethtool Wake-on-LAN settings */ 1326 static int netdev_set_wol (struct cp_private *cp, 1327 const struct ethtool_wolinfo *wol) 1328 { 1329 u8 options; 1330 1331 options = cpr8 (Config3) & ~(LinkUp | MagicPacket); 1332 /* If WOL is being disabled, no need for complexity */ 1333 if (wol->wolopts) { 1334 if (wol->wolopts & WAKE_PHY) options |= LinkUp; 1335 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket; 1336 } 1337 1338 cpw8 (Cfg9346, Cfg9346_Unlock); 1339 cpw8 (Config3, options); 1340 cpw8 (Cfg9346, Cfg9346_Lock); 1341 1342 options = 0; /* Paranoia setting */ 1343 options = cpr8 (Config5) & ~(UWF | MWF | BWF); 1344 /* If WOL is being disabled, no need for complexity */ 1345 if (wol->wolopts) { 1346 if (wol->wolopts & WAKE_UCAST) options |= UWF; 1347 if (wol->wolopts & WAKE_BCAST) options |= BWF; 1348 if (wol->wolopts & WAKE_MCAST) options |= MWF; 1349 } 1350 1351 cpw8 (Config5, options); 1352 1353 cp->wol_enabled = (wol->wolopts) ? 1 : 0; 1354 1355 return 0; 1356 } 1357 1358 /* Get the ethtool Wake-on-LAN settings */ 1359 static void netdev_get_wol (struct cp_private *cp, 1360 struct ethtool_wolinfo *wol) 1361 { 1362 u8 options; 1363 1364 wol->wolopts = 0; /* Start from scratch */ 1365 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC | 1366 WAKE_MCAST | WAKE_UCAST; 1367 /* We don't need to go on if WOL is disabled */ 1368 if (!cp->wol_enabled) return; 1369 1370 options = cpr8 (Config3); 1371 if (options & LinkUp) wol->wolopts |= WAKE_PHY; 1372 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; 1373 1374 options = 0; /* Paranoia setting */ 1375 options = cpr8 (Config5); 1376 if (options & UWF) wol->wolopts |= WAKE_UCAST; 1377 if (options & BWF) wol->wolopts |= WAKE_BCAST; 1378 if (options & MWF) wol->wolopts |= WAKE_MCAST; 1379 } 1380 1381 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) 1382 { 1383 struct cp_private *cp = netdev_priv(dev); 1384 1385 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1386 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1387 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); 1388 } 1389 1390 static void cp_get_ringparam(struct net_device *dev, 1391 struct ethtool_ringparam *ring) 1392 { 1393 ring->rx_max_pending = CP_RX_RING_SIZE; 1394 ring->tx_max_pending = CP_TX_RING_SIZE; 1395 ring->rx_pending = CP_RX_RING_SIZE; 1396 ring->tx_pending = CP_TX_RING_SIZE; 1397 } 1398 1399 static int cp_get_regs_len(struct net_device *dev) 1400 { 1401 return CP_REGS_SIZE; 1402 } 1403 1404 static int cp_get_sset_count (struct net_device *dev, int sset) 1405 { 1406 switch (sset) { 1407 case ETH_SS_STATS: 1408 return CP_NUM_STATS; 1409 default: 1410 return -EOPNOTSUPP; 1411 } 1412 } 1413 1414 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1415 { 1416 struct cp_private *cp = netdev_priv(dev); 1417 int rc; 1418 unsigned long flags; 1419 1420 spin_lock_irqsave(&cp->lock, flags); 1421 rc = mii_ethtool_gset(&cp->mii_if, cmd); 1422 spin_unlock_irqrestore(&cp->lock, flags); 1423 1424 return rc; 1425 } 1426 1427 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1428 { 1429 struct cp_private *cp = netdev_priv(dev); 1430 int rc; 1431 unsigned long flags; 1432 1433 spin_lock_irqsave(&cp->lock, flags); 1434 rc = mii_ethtool_sset(&cp->mii_if, cmd); 1435 spin_unlock_irqrestore(&cp->lock, flags); 1436 1437 return rc; 1438 } 1439 1440 static int cp_nway_reset(struct net_device *dev) 1441 { 1442 struct cp_private *cp = netdev_priv(dev); 1443 return mii_nway_restart(&cp->mii_if); 1444 } 1445 1446 static u32 cp_get_msglevel(struct net_device *dev) 1447 { 1448 struct cp_private *cp = netdev_priv(dev); 1449 return cp->msg_enable; 1450 } 1451 1452 static void cp_set_msglevel(struct net_device *dev, u32 value) 1453 { 1454 struct cp_private *cp = netdev_priv(dev); 1455 cp->msg_enable = value; 1456 } 1457 1458 static int cp_set_features(struct net_device *dev, netdev_features_t features) 1459 { 1460 struct cp_private *cp = netdev_priv(dev); 1461 unsigned long flags; 1462 1463 if (!((dev->features ^ features) & NETIF_F_RXCSUM)) 1464 return 0; 1465 1466 spin_lock_irqsave(&cp->lock, flags); 1467 1468 if (features & NETIF_F_RXCSUM) 1469 cp->cpcmd |= RxChkSum; 1470 else 1471 cp->cpcmd &= ~RxChkSum; 1472 1473 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1474 cp->cpcmd |= RxVlanOn; 1475 else 1476 cp->cpcmd &= ~RxVlanOn; 1477 1478 cpw16_f(CpCmd, cp->cpcmd); 1479 spin_unlock_irqrestore(&cp->lock, flags); 1480 1481 return 0; 1482 } 1483 1484 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1485 void *p) 1486 { 1487 struct cp_private *cp = netdev_priv(dev); 1488 unsigned long flags; 1489 1490 if (regs->len < CP_REGS_SIZE) 1491 return /* -EINVAL */; 1492 1493 regs->version = CP_REGS_VER; 1494 1495 spin_lock_irqsave(&cp->lock, flags); 1496 memcpy_fromio(p, cp->regs, CP_REGS_SIZE); 1497 spin_unlock_irqrestore(&cp->lock, flags); 1498 } 1499 1500 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol) 1501 { 1502 struct cp_private *cp = netdev_priv(dev); 1503 unsigned long flags; 1504 1505 spin_lock_irqsave (&cp->lock, flags); 1506 netdev_get_wol (cp, wol); 1507 spin_unlock_irqrestore (&cp->lock, flags); 1508 } 1509 1510 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol) 1511 { 1512 struct cp_private *cp = netdev_priv(dev); 1513 unsigned long flags; 1514 int rc; 1515 1516 spin_lock_irqsave (&cp->lock, flags); 1517 rc = netdev_set_wol (cp, wol); 1518 spin_unlock_irqrestore (&cp->lock, flags); 1519 1520 return rc; 1521 } 1522 1523 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 1524 { 1525 switch (stringset) { 1526 case ETH_SS_STATS: 1527 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); 1528 break; 1529 default: 1530 BUG(); 1531 break; 1532 } 1533 } 1534 1535 static void cp_get_ethtool_stats (struct net_device *dev, 1536 struct ethtool_stats *estats, u64 *tmp_stats) 1537 { 1538 struct cp_private *cp = netdev_priv(dev); 1539 struct cp_dma_stats *nic_stats; 1540 dma_addr_t dma; 1541 int i; 1542 1543 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), 1544 &dma, GFP_KERNEL); 1545 if (!nic_stats) 1546 return; 1547 1548 /* begin NIC statistics dump */ 1549 cpw32(StatsAddr + 4, (u64)dma >> 32); 1550 cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats); 1551 cpr32(StatsAddr); 1552 1553 for (i = 0; i < 1000; i++) { 1554 if ((cpr32(StatsAddr) & DumpStats) == 0) 1555 break; 1556 udelay(10); 1557 } 1558 cpw32(StatsAddr, 0); 1559 cpw32(StatsAddr + 4, 0); 1560 cpr32(StatsAddr); 1561 1562 i = 0; 1563 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok); 1564 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok); 1565 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err); 1566 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err); 1567 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo); 1568 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align); 1569 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col); 1570 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol); 1571 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys); 1572 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast); 1573 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast); 1574 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort); 1575 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun); 1576 tmp_stats[i++] = cp->cp_stats.rx_frags; 1577 BUG_ON(i != CP_NUM_STATS); 1578 1579 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); 1580 } 1581 1582 static const struct ethtool_ops cp_ethtool_ops = { 1583 .get_drvinfo = cp_get_drvinfo, 1584 .get_regs_len = cp_get_regs_len, 1585 .get_sset_count = cp_get_sset_count, 1586 .get_settings = cp_get_settings, 1587 .set_settings = cp_set_settings, 1588 .nway_reset = cp_nway_reset, 1589 .get_link = ethtool_op_get_link, 1590 .get_msglevel = cp_get_msglevel, 1591 .set_msglevel = cp_set_msglevel, 1592 .get_regs = cp_get_regs, 1593 .get_wol = cp_get_wol, 1594 .set_wol = cp_set_wol, 1595 .get_strings = cp_get_strings, 1596 .get_ethtool_stats = cp_get_ethtool_stats, 1597 .get_eeprom_len = cp_get_eeprom_len, 1598 .get_eeprom = cp_get_eeprom, 1599 .set_eeprom = cp_set_eeprom, 1600 .get_ringparam = cp_get_ringparam, 1601 }; 1602 1603 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1604 { 1605 struct cp_private *cp = netdev_priv(dev); 1606 int rc; 1607 unsigned long flags; 1608 1609 if (!netif_running(dev)) 1610 return -EINVAL; 1611 1612 spin_lock_irqsave(&cp->lock, flags); 1613 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL); 1614 spin_unlock_irqrestore(&cp->lock, flags); 1615 return rc; 1616 } 1617 1618 static int cp_set_mac_address(struct net_device *dev, void *p) 1619 { 1620 struct cp_private *cp = netdev_priv(dev); 1621 struct sockaddr *addr = p; 1622 1623 if (!is_valid_ether_addr(addr->sa_data)) 1624 return -EADDRNOTAVAIL; 1625 1626 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1627 1628 spin_lock_irq(&cp->lock); 1629 1630 cpw8_f(Cfg9346, Cfg9346_Unlock); 1631 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); 1632 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); 1633 cpw8_f(Cfg9346, Cfg9346_Lock); 1634 1635 spin_unlock_irq(&cp->lock); 1636 1637 return 0; 1638 } 1639 1640 /* Serial EEPROM section. */ 1641 1642 /* EEPROM_Ctrl bits. */ 1643 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ 1644 #define EE_CS 0x08 /* EEPROM chip select. */ 1645 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ 1646 #define EE_WRITE_0 0x00 1647 #define EE_WRITE_1 0x02 1648 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */ 1649 #define EE_ENB (0x80 | EE_CS) 1650 1651 /* Delay between EEPROM clock transitions. 1652 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. 1653 */ 1654 1655 #define eeprom_delay() readb(ee_addr) 1656 1657 /* The EEPROM commands include the alway-set leading bit. */ 1658 #define EE_EXTEND_CMD (4) 1659 #define EE_WRITE_CMD (5) 1660 #define EE_READ_CMD (6) 1661 #define EE_ERASE_CMD (7) 1662 1663 #define EE_EWDS_ADDR (0) 1664 #define EE_WRAL_ADDR (1) 1665 #define EE_ERAL_ADDR (2) 1666 #define EE_EWEN_ADDR (3) 1667 1668 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139 1669 1670 static void eeprom_cmd_start(void __iomem *ee_addr) 1671 { 1672 writeb (EE_ENB & ~EE_CS, ee_addr); 1673 writeb (EE_ENB, ee_addr); 1674 eeprom_delay (); 1675 } 1676 1677 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len) 1678 { 1679 int i; 1680 1681 /* Shift the command bits out. */ 1682 for (i = cmd_len - 1; i >= 0; i--) { 1683 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0; 1684 writeb (EE_ENB | dataval, ee_addr); 1685 eeprom_delay (); 1686 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); 1687 eeprom_delay (); 1688 } 1689 writeb (EE_ENB, ee_addr); 1690 eeprom_delay (); 1691 } 1692 1693 static void eeprom_cmd_end(void __iomem *ee_addr) 1694 { 1695 writeb(0, ee_addr); 1696 eeprom_delay (); 1697 } 1698 1699 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd, 1700 int addr_len) 1701 { 1702 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2)); 1703 1704 eeprom_cmd_start(ee_addr); 1705 eeprom_cmd(ee_addr, cmd, 3 + addr_len); 1706 eeprom_cmd_end(ee_addr); 1707 } 1708 1709 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len) 1710 { 1711 int i; 1712 u16 retval = 0; 1713 void __iomem *ee_addr = ioaddr + Cfg9346; 1714 int read_cmd = location | (EE_READ_CMD << addr_len); 1715 1716 eeprom_cmd_start(ee_addr); 1717 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len); 1718 1719 for (i = 16; i > 0; i--) { 1720 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr); 1721 eeprom_delay (); 1722 retval = 1723 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 : 1724 0); 1725 writeb (EE_ENB, ee_addr); 1726 eeprom_delay (); 1727 } 1728 1729 eeprom_cmd_end(ee_addr); 1730 1731 return retval; 1732 } 1733 1734 static void write_eeprom(void __iomem *ioaddr, int location, u16 val, 1735 int addr_len) 1736 { 1737 int i; 1738 void __iomem *ee_addr = ioaddr + Cfg9346; 1739 int write_cmd = location | (EE_WRITE_CMD << addr_len); 1740 1741 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len); 1742 1743 eeprom_cmd_start(ee_addr); 1744 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len); 1745 eeprom_cmd(ee_addr, val, 16); 1746 eeprom_cmd_end(ee_addr); 1747 1748 eeprom_cmd_start(ee_addr); 1749 for (i = 0; i < 20000; i++) 1750 if (readb(ee_addr) & EE_DATA_READ) 1751 break; 1752 eeprom_cmd_end(ee_addr); 1753 1754 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len); 1755 } 1756 1757 static int cp_get_eeprom_len(struct net_device *dev) 1758 { 1759 struct cp_private *cp = netdev_priv(dev); 1760 int size; 1761 1762 spin_lock_irq(&cp->lock); 1763 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128; 1764 spin_unlock_irq(&cp->lock); 1765 1766 return size; 1767 } 1768 1769 static int cp_get_eeprom(struct net_device *dev, 1770 struct ethtool_eeprom *eeprom, u8 *data) 1771 { 1772 struct cp_private *cp = netdev_priv(dev); 1773 unsigned int addr_len; 1774 u16 val; 1775 u32 offset = eeprom->offset >> 1; 1776 u32 len = eeprom->len; 1777 u32 i = 0; 1778 1779 eeprom->magic = CP_EEPROM_MAGIC; 1780 1781 spin_lock_irq(&cp->lock); 1782 1783 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; 1784 1785 if (eeprom->offset & 1) { 1786 val = read_eeprom(cp->regs, offset, addr_len); 1787 data[i++] = (u8)(val >> 8); 1788 offset++; 1789 } 1790 1791 while (i < len - 1) { 1792 val = read_eeprom(cp->regs, offset, addr_len); 1793 data[i++] = (u8)val; 1794 data[i++] = (u8)(val >> 8); 1795 offset++; 1796 } 1797 1798 if (i < len) { 1799 val = read_eeprom(cp->regs, offset, addr_len); 1800 data[i] = (u8)val; 1801 } 1802 1803 spin_unlock_irq(&cp->lock); 1804 return 0; 1805 } 1806 1807 static int cp_set_eeprom(struct net_device *dev, 1808 struct ethtool_eeprom *eeprom, u8 *data) 1809 { 1810 struct cp_private *cp = netdev_priv(dev); 1811 unsigned int addr_len; 1812 u16 val; 1813 u32 offset = eeprom->offset >> 1; 1814 u32 len = eeprom->len; 1815 u32 i = 0; 1816 1817 if (eeprom->magic != CP_EEPROM_MAGIC) 1818 return -EINVAL; 1819 1820 spin_lock_irq(&cp->lock); 1821 1822 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; 1823 1824 if (eeprom->offset & 1) { 1825 val = read_eeprom(cp->regs, offset, addr_len) & 0xff; 1826 val |= (u16)data[i++] << 8; 1827 write_eeprom(cp->regs, offset, val, addr_len); 1828 offset++; 1829 } 1830 1831 while (i < len - 1) { 1832 val = (u16)data[i++]; 1833 val |= (u16)data[i++] << 8; 1834 write_eeprom(cp->regs, offset, val, addr_len); 1835 offset++; 1836 } 1837 1838 if (i < len) { 1839 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00; 1840 val |= (u16)data[i]; 1841 write_eeprom(cp->regs, offset, val, addr_len); 1842 } 1843 1844 spin_unlock_irq(&cp->lock); 1845 return 0; 1846 } 1847 1848 /* Put the board into D3cold state and wait for WakeUp signal */ 1849 static void cp_set_d3_state (struct cp_private *cp) 1850 { 1851 pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ 1852 pci_set_power_state (cp->pdev, PCI_D3hot); 1853 } 1854 1855 static const struct net_device_ops cp_netdev_ops = { 1856 .ndo_open = cp_open, 1857 .ndo_stop = cp_close, 1858 .ndo_validate_addr = eth_validate_addr, 1859 .ndo_set_mac_address = cp_set_mac_address, 1860 .ndo_set_rx_mode = cp_set_rx_mode, 1861 .ndo_get_stats = cp_get_stats, 1862 .ndo_do_ioctl = cp_ioctl, 1863 .ndo_start_xmit = cp_start_xmit, 1864 .ndo_tx_timeout = cp_tx_timeout, 1865 .ndo_set_features = cp_set_features, 1866 .ndo_change_mtu = cp_change_mtu, 1867 1868 #ifdef CONFIG_NET_POLL_CONTROLLER 1869 .ndo_poll_controller = cp_poll_controller, 1870 #endif 1871 }; 1872 1873 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1874 { 1875 struct net_device *dev; 1876 struct cp_private *cp; 1877 int rc; 1878 void __iomem *regs; 1879 resource_size_t pciaddr; 1880 unsigned int addr_len, i, pci_using_dac; 1881 1882 pr_info_once("%s", version); 1883 1884 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && 1885 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { 1886 dev_info(&pdev->dev, 1887 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n", 1888 pdev->vendor, pdev->device, pdev->revision); 1889 return -ENODEV; 1890 } 1891 1892 dev = alloc_etherdev(sizeof(struct cp_private)); 1893 if (!dev) 1894 return -ENOMEM; 1895 SET_NETDEV_DEV(dev, &pdev->dev); 1896 1897 cp = netdev_priv(dev); 1898 cp->pdev = pdev; 1899 cp->dev = dev; 1900 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); 1901 spin_lock_init (&cp->lock); 1902 cp->mii_if.dev = dev; 1903 cp->mii_if.mdio_read = mdio_read; 1904 cp->mii_if.mdio_write = mdio_write; 1905 cp->mii_if.phy_id = CP_INTERNAL_PHY; 1906 cp->mii_if.phy_id_mask = 0x1f; 1907 cp->mii_if.reg_num_mask = 0x1f; 1908 cp_set_rxbufsize(cp); 1909 1910 rc = pci_enable_device(pdev); 1911 if (rc) 1912 goto err_out_free; 1913 1914 rc = pci_set_mwi(pdev); 1915 if (rc) 1916 goto err_out_disable; 1917 1918 rc = pci_request_regions(pdev, DRV_NAME); 1919 if (rc) 1920 goto err_out_mwi; 1921 1922 pciaddr = pci_resource_start(pdev, 1); 1923 if (!pciaddr) { 1924 rc = -EIO; 1925 dev_err(&pdev->dev, "no MMIO resource\n"); 1926 goto err_out_res; 1927 } 1928 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { 1929 rc = -EIO; 1930 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n", 1931 (unsigned long long)pci_resource_len(pdev, 1)); 1932 goto err_out_res; 1933 } 1934 1935 /* Configure DMA attributes. */ 1936 if ((sizeof(dma_addr_t) > 4) && 1937 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) && 1938 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 1939 pci_using_dac = 1; 1940 } else { 1941 pci_using_dac = 0; 1942 1943 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1944 if (rc) { 1945 dev_err(&pdev->dev, 1946 "No usable DMA configuration, aborting\n"); 1947 goto err_out_res; 1948 } 1949 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1950 if (rc) { 1951 dev_err(&pdev->dev, 1952 "No usable consistent DMA configuration, aborting\n"); 1953 goto err_out_res; 1954 } 1955 } 1956 1957 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | 1958 PCIMulRW | RxChkSum | CpRxOn | CpTxOn; 1959 1960 dev->features |= NETIF_F_RXCSUM; 1961 dev->hw_features |= NETIF_F_RXCSUM; 1962 1963 regs = ioremap(pciaddr, CP_REGS_SIZE); 1964 if (!regs) { 1965 rc = -EIO; 1966 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", 1967 (unsigned long long)pci_resource_len(pdev, 1), 1968 (unsigned long long)pciaddr); 1969 goto err_out_res; 1970 } 1971 cp->regs = regs; 1972 1973 cp_stop_hw(cp); 1974 1975 /* read MAC address from EEPROM */ 1976 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; 1977 for (i = 0; i < 3; i++) 1978 ((__le16 *) (dev->dev_addr))[i] = 1979 cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); 1980 1981 dev->netdev_ops = &cp_netdev_ops; 1982 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); 1983 dev->ethtool_ops = &cp_ethtool_ops; 1984 dev->watchdog_timeo = TX_TIMEOUT; 1985 1986 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 1987 1988 if (pci_using_dac) 1989 dev->features |= NETIF_F_HIGHDMA; 1990 1991 /* disabled by default until verified */ 1992 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1993 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 1994 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1995 NETIF_F_HIGHDMA; 1996 1997 rc = register_netdev(dev); 1998 if (rc) 1999 goto err_out_iomap; 2000 2001 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n", 2002 regs, dev->dev_addr, pdev->irq); 2003 2004 pci_set_drvdata(pdev, dev); 2005 2006 /* enable busmastering and memory-write-invalidate */ 2007 pci_set_master(pdev); 2008 2009 if (cp->wol_enabled) 2010 cp_set_d3_state (cp); 2011 2012 return 0; 2013 2014 err_out_iomap: 2015 iounmap(regs); 2016 err_out_res: 2017 pci_release_regions(pdev); 2018 err_out_mwi: 2019 pci_clear_mwi(pdev); 2020 err_out_disable: 2021 pci_disable_device(pdev); 2022 err_out_free: 2023 free_netdev(dev); 2024 return rc; 2025 } 2026 2027 static void cp_remove_one (struct pci_dev *pdev) 2028 { 2029 struct net_device *dev = pci_get_drvdata(pdev); 2030 struct cp_private *cp = netdev_priv(dev); 2031 2032 unregister_netdev(dev); 2033 iounmap(cp->regs); 2034 if (cp->wol_enabled) 2035 pci_set_power_state (pdev, PCI_D0); 2036 pci_release_regions(pdev); 2037 pci_clear_mwi(pdev); 2038 pci_disable_device(pdev); 2039 free_netdev(dev); 2040 } 2041 2042 #ifdef CONFIG_PM 2043 static int cp_suspend (struct pci_dev *pdev, pm_message_t state) 2044 { 2045 struct net_device *dev = pci_get_drvdata(pdev); 2046 struct cp_private *cp = netdev_priv(dev); 2047 unsigned long flags; 2048 2049 if (!netif_running(dev)) 2050 return 0; 2051 2052 netif_device_detach (dev); 2053 netif_stop_queue (dev); 2054 2055 spin_lock_irqsave (&cp->lock, flags); 2056 2057 /* Disable Rx and Tx */ 2058 cpw16 (IntrMask, 0); 2059 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn)); 2060 2061 spin_unlock_irqrestore (&cp->lock, flags); 2062 2063 pci_save_state(pdev); 2064 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); 2065 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2066 2067 return 0; 2068 } 2069 2070 static int cp_resume (struct pci_dev *pdev) 2071 { 2072 struct net_device *dev = pci_get_drvdata (pdev); 2073 struct cp_private *cp = netdev_priv(dev); 2074 unsigned long flags; 2075 2076 if (!netif_running(dev)) 2077 return 0; 2078 2079 netif_device_attach (dev); 2080 2081 pci_set_power_state(pdev, PCI_D0); 2082 pci_restore_state(pdev); 2083 pci_enable_wake(pdev, PCI_D0, 0); 2084 2085 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ 2086 cp_init_rings_index (cp); 2087 cp_init_hw (cp); 2088 cp_enable_irq(cp); 2089 netif_start_queue (dev); 2090 2091 spin_lock_irqsave (&cp->lock, flags); 2092 2093 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); 2094 2095 spin_unlock_irqrestore (&cp->lock, flags); 2096 2097 return 0; 2098 } 2099 #endif /* CONFIG_PM */ 2100 2101 static const struct pci_device_id cp_pci_tbl[] = { 2102 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, 2103 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, 2104 { }, 2105 }; 2106 MODULE_DEVICE_TABLE(pci, cp_pci_tbl); 2107 2108 static struct pci_driver cp_driver = { 2109 .name = DRV_NAME, 2110 .id_table = cp_pci_tbl, 2111 .probe = cp_init_one, 2112 .remove = cp_remove_one, 2113 #ifdef CONFIG_PM 2114 .resume = cp_resume, 2115 .suspend = cp_suspend, 2116 #endif 2117 }; 2118 2119 module_pci_driver(cp_driver); 2120