1 /******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/module.h> 32 #include <linux/types.h> 33 #include <linux/init.h> 34 #include <linux/pci.h> 35 #include <linux/vmalloc.h> 36 #include <linux/pagemap.h> 37 #include <linux/delay.h> 38 #include <linux/netdevice.h> 39 #include <linux/interrupt.h> 40 #include <linux/tcp.h> 41 #include <linux/ipv6.h> 42 #include <linux/slab.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <linux/mii.h> 46 #include <linux/ethtool.h> 47 #include <linux/if_vlan.h> 48 #include <linux/cpu.h> 49 #include <linux/smp.h> 50 #include <linux/pm_qos.h> 51 #include <linux/pm_runtime.h> 52 #include <linux/aer.h> 53 #include <linux/prefetch.h> 54 55 #include "e1000.h" 56 57 #define DRV_EXTRAVERSION "-k" 58 59 #define DRV_VERSION "1.9.5" DRV_EXTRAVERSION 60 char e1000e_driver_name[] = "e1000e"; 61 const char e1000e_driver_version[] = DRV_VERSION; 62 63 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 64 65 static const struct e1000_info *e1000_info_tbl[] = { 66 [board_82571] = &e1000_82571_info, 67 [board_82572] = &e1000_82572_info, 68 [board_82573] = &e1000_82573_info, 69 [board_82574] = &e1000_82574_info, 70 [board_82583] = &e1000_82583_info, 71 [board_80003es2lan] = &e1000_es2_info, 72 [board_ich8lan] = &e1000_ich8_info, 73 [board_ich9lan] = &e1000_ich9_info, 74 [board_ich10lan] = &e1000_ich10_info, 75 [board_pchlan] = &e1000_pch_info, 76 [board_pch2lan] = &e1000_pch2_info, 77 }; 78 79 struct e1000_reg_info { 80 u32 ofs; 81 char *name; 82 }; 83 84 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ 85 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ 86 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ 87 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ 88 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ 89 90 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ 91 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ 92 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ 93 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ 94 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ 95 96 static const struct e1000_reg_info e1000_reg_info_tbl[] = { 97 98 /* General Registers */ 99 {E1000_CTRL, "CTRL"}, 100 {E1000_STATUS, "STATUS"}, 101 {E1000_CTRL_EXT, "CTRL_EXT"}, 102 103 /* Interrupt Registers */ 104 {E1000_ICR, "ICR"}, 105 106 /* Rx Registers */ 107 {E1000_RCTL, "RCTL"}, 108 {E1000_RDLEN, "RDLEN"}, 109 {E1000_RDH, "RDH"}, 110 {E1000_RDT, "RDT"}, 111 {E1000_RDTR, "RDTR"}, 112 {E1000_RXDCTL(0), "RXDCTL"}, 113 {E1000_ERT, "ERT"}, 114 {E1000_RDBAL, "RDBAL"}, 115 {E1000_RDBAH, "RDBAH"}, 116 {E1000_RDFH, "RDFH"}, 117 {E1000_RDFT, "RDFT"}, 118 {E1000_RDFHS, "RDFHS"}, 119 {E1000_RDFTS, "RDFTS"}, 120 {E1000_RDFPC, "RDFPC"}, 121 122 /* Tx Registers */ 123 {E1000_TCTL, "TCTL"}, 124 {E1000_TDBAL, "TDBAL"}, 125 {E1000_TDBAH, "TDBAH"}, 126 {E1000_TDLEN, "TDLEN"}, 127 {E1000_TDH, "TDH"}, 128 {E1000_TDT, "TDT"}, 129 {E1000_TIDV, "TIDV"}, 130 {E1000_TXDCTL(0), "TXDCTL"}, 131 {E1000_TADV, "TADV"}, 132 {E1000_TARC(0), "TARC"}, 133 {E1000_TDFH, "TDFH"}, 134 {E1000_TDFT, "TDFT"}, 135 {E1000_TDFHS, "TDFHS"}, 136 {E1000_TDFTS, "TDFTS"}, 137 {E1000_TDFPC, "TDFPC"}, 138 139 /* List Terminator */ 140 {0, NULL} 141 }; 142 143 /* 144 * e1000_regdump - register printout routine 145 */ 146 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) 147 { 148 int n = 0; 149 char rname[16]; 150 u32 regs[8]; 151 152 switch (reginfo->ofs) { 153 case E1000_RXDCTL(0): 154 for (n = 0; n < 2; n++) 155 regs[n] = __er32(hw, E1000_RXDCTL(n)); 156 break; 157 case E1000_TXDCTL(0): 158 for (n = 0; n < 2; n++) 159 regs[n] = __er32(hw, E1000_TXDCTL(n)); 160 break; 161 case E1000_TARC(0): 162 for (n = 0; n < 2; n++) 163 regs[n] = __er32(hw, E1000_TARC(n)); 164 break; 165 default: 166 pr_info("%-15s %08x\n", 167 reginfo->name, __er32(hw, reginfo->ofs)); 168 return; 169 } 170 171 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); 172 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); 173 } 174 175 /* 176 * e1000e_dump - Print registers, Tx-ring and Rx-ring 177 */ 178 static void e1000e_dump(struct e1000_adapter *adapter) 179 { 180 struct net_device *netdev = adapter->netdev; 181 struct e1000_hw *hw = &adapter->hw; 182 struct e1000_reg_info *reginfo; 183 struct e1000_ring *tx_ring = adapter->tx_ring; 184 struct e1000_tx_desc *tx_desc; 185 struct my_u0 { 186 __le64 a; 187 __le64 b; 188 } *u0; 189 struct e1000_buffer *buffer_info; 190 struct e1000_ring *rx_ring = adapter->rx_ring; 191 union e1000_rx_desc_packet_split *rx_desc_ps; 192 union e1000_rx_desc_extended *rx_desc; 193 struct my_u1 { 194 __le64 a; 195 __le64 b; 196 __le64 c; 197 __le64 d; 198 } *u1; 199 u32 staterr; 200 int i = 0; 201 202 if (!netif_msg_hw(adapter)) 203 return; 204 205 /* Print netdevice Info */ 206 if (netdev) { 207 dev_info(&adapter->pdev->dev, "Net device Info\n"); 208 pr_info("Device Name state trans_start last_rx\n"); 209 pr_info("%-15s %016lX %016lX %016lX\n", 210 netdev->name, netdev->state, netdev->trans_start, 211 netdev->last_rx); 212 } 213 214 /* Print Registers */ 215 dev_info(&adapter->pdev->dev, "Register Dump\n"); 216 pr_info(" Register Name Value\n"); 217 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; 218 reginfo->name; reginfo++) { 219 e1000_regdump(hw, reginfo); 220 } 221 222 /* Print Tx Ring Summary */ 223 if (!netdev || !netif_running(netdev)) 224 return; 225 226 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); 227 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); 228 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 229 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", 230 0, tx_ring->next_to_use, tx_ring->next_to_clean, 231 (unsigned long long)buffer_info->dma, 232 buffer_info->length, 233 buffer_info->next_to_watch, 234 (unsigned long long)buffer_info->time_stamp); 235 236 /* Print Tx Ring */ 237 if (!netif_msg_tx_done(adapter)) 238 goto rx_ring_summary; 239 240 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); 241 242 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 243 * 244 * Legacy Transmit Descriptor 245 * +--------------------------------------------------------------+ 246 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 247 * +--------------------------------------------------------------+ 248 * 8 | Special | CSS | Status | CMD | CSO | Length | 249 * +--------------------------------------------------------------+ 250 * 63 48 47 36 35 32 31 24 23 16 15 0 251 * 252 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 253 * 63 48 47 40 39 32 31 16 15 8 7 0 254 * +----------------------------------------------------------------+ 255 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 256 * +----------------------------------------------------------------+ 257 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 258 * +----------------------------------------------------------------+ 259 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 260 * 261 * Extended Data Descriptor (DTYP=0x1) 262 * +----------------------------------------------------------------+ 263 * 0 | Buffer Address [63:0] | 264 * +----------------------------------------------------------------+ 265 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 266 * +----------------------------------------------------------------+ 267 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 268 */ 269 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); 270 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); 271 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); 272 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 273 const char *next_desc; 274 tx_desc = E1000_TX_DESC(*tx_ring, i); 275 buffer_info = &tx_ring->buffer_info[i]; 276 u0 = (struct my_u0 *)tx_desc; 277 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 278 next_desc = " NTC/U"; 279 else if (i == tx_ring->next_to_use) 280 next_desc = " NTU"; 281 else if (i == tx_ring->next_to_clean) 282 next_desc = " NTC"; 283 else 284 next_desc = ""; 285 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", 286 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : 287 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), 288 i, 289 (unsigned long long)le64_to_cpu(u0->a), 290 (unsigned long long)le64_to_cpu(u0->b), 291 (unsigned long long)buffer_info->dma, 292 buffer_info->length, buffer_info->next_to_watch, 293 (unsigned long long)buffer_info->time_stamp, 294 buffer_info->skb, next_desc); 295 296 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) 297 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 298 16, 1, phys_to_virt(buffer_info->dma), 299 buffer_info->length, true); 300 } 301 302 /* Print Rx Ring Summary */ 303 rx_ring_summary: 304 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); 305 pr_info("Queue [NTU] [NTC]\n"); 306 pr_info(" %5d %5X %5X\n", 307 0, rx_ring->next_to_use, rx_ring->next_to_clean); 308 309 /* Print Rx Ring */ 310 if (!netif_msg_rx_status(adapter)) 311 return; 312 313 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); 314 switch (adapter->rx_ps_pages) { 315 case 1: 316 case 2: 317 case 3: 318 /* [Extended] Packet Split Receive Descriptor Format 319 * 320 * +-----------------------------------------------------+ 321 * 0 | Buffer Address 0 [63:0] | 322 * +-----------------------------------------------------+ 323 * 8 | Buffer Address 1 [63:0] | 324 * +-----------------------------------------------------+ 325 * 16 | Buffer Address 2 [63:0] | 326 * +-----------------------------------------------------+ 327 * 24 | Buffer Address 3 [63:0] | 328 * +-----------------------------------------------------+ 329 */ 330 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); 331 /* [Extended] Receive Descriptor (Write-Back) Format 332 * 333 * 63 48 47 32 31 13 12 8 7 4 3 0 334 * +------------------------------------------------------+ 335 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | 336 * | Checksum | Ident | | Queue | | Type | 337 * +------------------------------------------------------+ 338 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 339 * +------------------------------------------------------+ 340 * 63 48 47 32 31 20 19 0 341 */ 342 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); 343 for (i = 0; i < rx_ring->count; i++) { 344 const char *next_desc; 345 buffer_info = &rx_ring->buffer_info[i]; 346 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); 347 u1 = (struct my_u1 *)rx_desc_ps; 348 staterr = 349 le32_to_cpu(rx_desc_ps->wb.middle.status_error); 350 351 if (i == rx_ring->next_to_use) 352 next_desc = " NTU"; 353 else if (i == rx_ring->next_to_clean) 354 next_desc = " NTC"; 355 else 356 next_desc = ""; 357 358 if (staterr & E1000_RXD_STAT_DD) { 359 /* Descriptor Done */ 360 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", 361 "RWB", i, 362 (unsigned long long)le64_to_cpu(u1->a), 363 (unsigned long long)le64_to_cpu(u1->b), 364 (unsigned long long)le64_to_cpu(u1->c), 365 (unsigned long long)le64_to_cpu(u1->d), 366 buffer_info->skb, next_desc); 367 } else { 368 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", 369 "R ", i, 370 (unsigned long long)le64_to_cpu(u1->a), 371 (unsigned long long)le64_to_cpu(u1->b), 372 (unsigned long long)le64_to_cpu(u1->c), 373 (unsigned long long)le64_to_cpu(u1->d), 374 (unsigned long long)buffer_info->dma, 375 buffer_info->skb, next_desc); 376 377 if (netif_msg_pktdata(adapter)) 378 print_hex_dump(KERN_INFO, "", 379 DUMP_PREFIX_ADDRESS, 16, 1, 380 phys_to_virt(buffer_info->dma), 381 adapter->rx_ps_bsize0, true); 382 } 383 } 384 break; 385 default: 386 case 0: 387 /* Extended Receive Descriptor (Read) Format 388 * 389 * +-----------------------------------------------------+ 390 * 0 | Buffer Address [63:0] | 391 * +-----------------------------------------------------+ 392 * 8 | Reserved | 393 * +-----------------------------------------------------+ 394 */ 395 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); 396 /* Extended Receive Descriptor (Write-Back) Format 397 * 398 * 63 48 47 32 31 24 23 4 3 0 399 * +------------------------------------------------------+ 400 * | RSS Hash | | | | 401 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | 402 * | Packet | IP | | | Type | 403 * | Checksum | Ident | | | | 404 * +------------------------------------------------------+ 405 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 406 * +------------------------------------------------------+ 407 * 63 48 47 32 31 20 19 0 408 */ 409 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); 410 411 for (i = 0; i < rx_ring->count; i++) { 412 const char *next_desc; 413 414 buffer_info = &rx_ring->buffer_info[i]; 415 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 416 u1 = (struct my_u1 *)rx_desc; 417 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 418 419 if (i == rx_ring->next_to_use) 420 next_desc = " NTU"; 421 else if (i == rx_ring->next_to_clean) 422 next_desc = " NTC"; 423 else 424 next_desc = ""; 425 426 if (staterr & E1000_RXD_STAT_DD) { 427 /* Descriptor Done */ 428 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", 429 "RWB", i, 430 (unsigned long long)le64_to_cpu(u1->a), 431 (unsigned long long)le64_to_cpu(u1->b), 432 buffer_info->skb, next_desc); 433 } else { 434 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", 435 "R ", i, 436 (unsigned long long)le64_to_cpu(u1->a), 437 (unsigned long long)le64_to_cpu(u1->b), 438 (unsigned long long)buffer_info->dma, 439 buffer_info->skb, next_desc); 440 441 if (netif_msg_pktdata(adapter)) 442 print_hex_dump(KERN_INFO, "", 443 DUMP_PREFIX_ADDRESS, 16, 444 1, 445 phys_to_virt 446 (buffer_info->dma), 447 adapter->rx_buffer_len, 448 true); 449 } 450 } 451 } 452 } 453 454 /** 455 * e1000_desc_unused - calculate if we have unused descriptors 456 **/ 457 static int e1000_desc_unused(struct e1000_ring *ring) 458 { 459 if (ring->next_to_clean > ring->next_to_use) 460 return ring->next_to_clean - ring->next_to_use - 1; 461 462 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 463 } 464 465 /** 466 * e1000_receive_skb - helper function to handle Rx indications 467 * @adapter: board private structure 468 * @status: descriptor status field as written by hardware 469 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 470 * @skb: pointer to sk_buff to be indicated to stack 471 **/ 472 static void e1000_receive_skb(struct e1000_adapter *adapter, 473 struct net_device *netdev, struct sk_buff *skb, 474 u8 status, __le16 vlan) 475 { 476 u16 tag = le16_to_cpu(vlan); 477 skb->protocol = eth_type_trans(skb, netdev); 478 479 if (status & E1000_RXD_STAT_VP) 480 __vlan_hwaccel_put_tag(skb, tag); 481 482 napi_gro_receive(&adapter->napi, skb); 483 } 484 485 /** 486 * e1000_rx_checksum - Receive Checksum Offload 487 * @adapter: board private structure 488 * @status_err: receive descriptor status and error fields 489 * @csum: receive descriptor csum field 490 * @sk_buff: socket buffer with received data 491 **/ 492 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 493 __le16 csum, struct sk_buff *skb) 494 { 495 u16 status = (u16)status_err; 496 u8 errors = (u8)(status_err >> 24); 497 498 skb_checksum_none_assert(skb); 499 500 /* Rx checksum disabled */ 501 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) 502 return; 503 504 /* Ignore Checksum bit is set */ 505 if (status & E1000_RXD_STAT_IXSM) 506 return; 507 508 /* TCP/UDP checksum error bit is set */ 509 if (errors & E1000_RXD_ERR_TCPE) { 510 /* let the stack verify checksum errors */ 511 adapter->hw_csum_err++; 512 return; 513 } 514 515 /* TCP/UDP Checksum has not been calculated */ 516 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 517 return; 518 519 /* It must be a TCP or UDP packet with a valid checksum */ 520 if (status & E1000_RXD_STAT_TCPCS) { 521 /* TCP checksum is good */ 522 skb->ip_summed = CHECKSUM_UNNECESSARY; 523 } else { 524 /* 525 * IP fragment with UDP payload 526 * Hardware complements the payload checksum, so we undo it 527 * and then put the value in host order for further stack use. 528 */ 529 __sum16 sum = (__force __sum16)swab16((__force u16)csum); 530 skb->csum = csum_unfold(~sum); 531 skb->ip_summed = CHECKSUM_COMPLETE; 532 } 533 adapter->hw_csum_good++; 534 } 535 536 /** 537 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() 538 * @hw: pointer to the HW structure 539 * @tail: address of tail descriptor register 540 * @i: value to write to tail descriptor register 541 * 542 * When updating the tail register, the ME could be accessing Host CSR 543 * registers at the same time. Normally, this is handled in h/w by an 544 * arbiter but on some parts there is a bug that acknowledges Host accesses 545 * later than it should which could result in the descriptor register to 546 * have an incorrect value. Workaround this by checking the FWSM register 547 * which has bit 24 set while ME is accessing Host CSR registers, wait 548 * if it is set and try again a number of times. 549 **/ 550 static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail, 551 unsigned int i) 552 { 553 unsigned int j = 0; 554 555 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && 556 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) 557 udelay(50); 558 559 writel(i, tail); 560 561 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) 562 return E1000_ERR_SWFW_SYNC; 563 564 return 0; 565 } 566 567 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) 568 { 569 struct e1000_adapter *adapter = rx_ring->adapter; 570 struct e1000_hw *hw = &adapter->hw; 571 572 if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) { 573 u32 rctl = er32(RCTL); 574 ew32(RCTL, rctl & ~E1000_RCTL_EN); 575 e_err("ME firmware caused invalid RDT - resetting\n"); 576 schedule_work(&adapter->reset_task); 577 } 578 } 579 580 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) 581 { 582 struct e1000_adapter *adapter = tx_ring->adapter; 583 struct e1000_hw *hw = &adapter->hw; 584 585 if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) { 586 u32 tctl = er32(TCTL); 587 ew32(TCTL, tctl & ~E1000_TCTL_EN); 588 e_err("ME firmware caused invalid TDT - resetting\n"); 589 schedule_work(&adapter->reset_task); 590 } 591 } 592 593 /** 594 * e1000_alloc_rx_buffers - Replace used receive buffers 595 * @rx_ring: Rx descriptor ring 596 **/ 597 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, 598 int cleaned_count, gfp_t gfp) 599 { 600 struct e1000_adapter *adapter = rx_ring->adapter; 601 struct net_device *netdev = adapter->netdev; 602 struct pci_dev *pdev = adapter->pdev; 603 union e1000_rx_desc_extended *rx_desc; 604 struct e1000_buffer *buffer_info; 605 struct sk_buff *skb; 606 unsigned int i; 607 unsigned int bufsz = adapter->rx_buffer_len; 608 609 i = rx_ring->next_to_use; 610 buffer_info = &rx_ring->buffer_info[i]; 611 612 while (cleaned_count--) { 613 skb = buffer_info->skb; 614 if (skb) { 615 skb_trim(skb, 0); 616 goto map_skb; 617 } 618 619 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 620 if (!skb) { 621 /* Better luck next round */ 622 adapter->alloc_rx_buff_failed++; 623 break; 624 } 625 626 buffer_info->skb = skb; 627 map_skb: 628 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 629 adapter->rx_buffer_len, 630 DMA_FROM_DEVICE); 631 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 632 dev_err(&pdev->dev, "Rx DMA map failed\n"); 633 adapter->rx_dma_failed++; 634 break; 635 } 636 637 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 638 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 639 640 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 641 /* 642 * Force memory writes to complete before letting h/w 643 * know there are new descriptors to fetch. (Only 644 * applicable for weak-ordered memory model archs, 645 * such as IA-64). 646 */ 647 wmb(); 648 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 649 e1000e_update_rdt_wa(rx_ring, i); 650 else 651 writel(i, rx_ring->tail); 652 } 653 i++; 654 if (i == rx_ring->count) 655 i = 0; 656 buffer_info = &rx_ring->buffer_info[i]; 657 } 658 659 rx_ring->next_to_use = i; 660 } 661 662 /** 663 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 664 * @rx_ring: Rx descriptor ring 665 **/ 666 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, 667 int cleaned_count, gfp_t gfp) 668 { 669 struct e1000_adapter *adapter = rx_ring->adapter; 670 struct net_device *netdev = adapter->netdev; 671 struct pci_dev *pdev = adapter->pdev; 672 union e1000_rx_desc_packet_split *rx_desc; 673 struct e1000_buffer *buffer_info; 674 struct e1000_ps_page *ps_page; 675 struct sk_buff *skb; 676 unsigned int i, j; 677 678 i = rx_ring->next_to_use; 679 buffer_info = &rx_ring->buffer_info[i]; 680 681 while (cleaned_count--) { 682 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 683 684 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 685 ps_page = &buffer_info->ps_pages[j]; 686 if (j >= adapter->rx_ps_pages) { 687 /* all unused desc entries get hw null ptr */ 688 rx_desc->read.buffer_addr[j + 1] = 689 ~cpu_to_le64(0); 690 continue; 691 } 692 if (!ps_page->page) { 693 ps_page->page = alloc_page(gfp); 694 if (!ps_page->page) { 695 adapter->alloc_rx_buff_failed++; 696 goto no_buffers; 697 } 698 ps_page->dma = dma_map_page(&pdev->dev, 699 ps_page->page, 700 0, PAGE_SIZE, 701 DMA_FROM_DEVICE); 702 if (dma_mapping_error(&pdev->dev, 703 ps_page->dma)) { 704 dev_err(&adapter->pdev->dev, 705 "Rx DMA page map failed\n"); 706 adapter->rx_dma_failed++; 707 goto no_buffers; 708 } 709 } 710 /* 711 * Refresh the desc even if buffer_addrs 712 * didn't change because each write-back 713 * erases this info. 714 */ 715 rx_desc->read.buffer_addr[j + 1] = 716 cpu_to_le64(ps_page->dma); 717 } 718 719 skb = __netdev_alloc_skb_ip_align(netdev, 720 adapter->rx_ps_bsize0, 721 gfp); 722 723 if (!skb) { 724 adapter->alloc_rx_buff_failed++; 725 break; 726 } 727 728 buffer_info->skb = skb; 729 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 730 adapter->rx_ps_bsize0, 731 DMA_FROM_DEVICE); 732 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 733 dev_err(&pdev->dev, "Rx DMA map failed\n"); 734 adapter->rx_dma_failed++; 735 /* cleanup skb */ 736 dev_kfree_skb_any(skb); 737 buffer_info->skb = NULL; 738 break; 739 } 740 741 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 742 743 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 744 /* 745 * Force memory writes to complete before letting h/w 746 * know there are new descriptors to fetch. (Only 747 * applicable for weak-ordered memory model archs, 748 * such as IA-64). 749 */ 750 wmb(); 751 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 752 e1000e_update_rdt_wa(rx_ring, i << 1); 753 else 754 writel(i << 1, rx_ring->tail); 755 } 756 757 i++; 758 if (i == rx_ring->count) 759 i = 0; 760 buffer_info = &rx_ring->buffer_info[i]; 761 } 762 763 no_buffers: 764 rx_ring->next_to_use = i; 765 } 766 767 /** 768 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 769 * @rx_ring: Rx descriptor ring 770 * @cleaned_count: number of buffers to allocate this pass 771 **/ 772 773 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, 774 int cleaned_count, gfp_t gfp) 775 { 776 struct e1000_adapter *adapter = rx_ring->adapter; 777 struct net_device *netdev = adapter->netdev; 778 struct pci_dev *pdev = adapter->pdev; 779 union e1000_rx_desc_extended *rx_desc; 780 struct e1000_buffer *buffer_info; 781 struct sk_buff *skb; 782 unsigned int i; 783 unsigned int bufsz = 256 - 16 /* for skb_reserve */; 784 785 i = rx_ring->next_to_use; 786 buffer_info = &rx_ring->buffer_info[i]; 787 788 while (cleaned_count--) { 789 skb = buffer_info->skb; 790 if (skb) { 791 skb_trim(skb, 0); 792 goto check_page; 793 } 794 795 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 796 if (unlikely(!skb)) { 797 /* Better luck next round */ 798 adapter->alloc_rx_buff_failed++; 799 break; 800 } 801 802 buffer_info->skb = skb; 803 check_page: 804 /* allocate a new page if necessary */ 805 if (!buffer_info->page) { 806 buffer_info->page = alloc_page(gfp); 807 if (unlikely(!buffer_info->page)) { 808 adapter->alloc_rx_buff_failed++; 809 break; 810 } 811 } 812 813 if (!buffer_info->dma) 814 buffer_info->dma = dma_map_page(&pdev->dev, 815 buffer_info->page, 0, 816 PAGE_SIZE, 817 DMA_FROM_DEVICE); 818 819 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 820 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 821 822 if (unlikely(++i == rx_ring->count)) 823 i = 0; 824 buffer_info = &rx_ring->buffer_info[i]; 825 } 826 827 if (likely(rx_ring->next_to_use != i)) { 828 rx_ring->next_to_use = i; 829 if (unlikely(i-- == 0)) 830 i = (rx_ring->count - 1); 831 832 /* Force memory writes to complete before letting h/w 833 * know there are new descriptors to fetch. (Only 834 * applicable for weak-ordered memory model archs, 835 * such as IA-64). */ 836 wmb(); 837 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 838 e1000e_update_rdt_wa(rx_ring, i); 839 else 840 writel(i, rx_ring->tail); 841 } 842 } 843 844 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, 845 struct sk_buff *skb) 846 { 847 if (netdev->features & NETIF_F_RXHASH) 848 skb->rxhash = le32_to_cpu(rss); 849 } 850 851 /** 852 * e1000_clean_rx_irq - Send received data up the network stack 853 * @rx_ring: Rx descriptor ring 854 * 855 * the return value indicates whether actual cleaning was done, there 856 * is no guarantee that everything was cleaned 857 **/ 858 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, 859 int work_to_do) 860 { 861 struct e1000_adapter *adapter = rx_ring->adapter; 862 struct net_device *netdev = adapter->netdev; 863 struct pci_dev *pdev = adapter->pdev; 864 struct e1000_hw *hw = &adapter->hw; 865 union e1000_rx_desc_extended *rx_desc, *next_rxd; 866 struct e1000_buffer *buffer_info, *next_buffer; 867 u32 length, staterr; 868 unsigned int i; 869 int cleaned_count = 0; 870 bool cleaned = false; 871 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 872 873 i = rx_ring->next_to_clean; 874 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 875 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 876 buffer_info = &rx_ring->buffer_info[i]; 877 878 while (staterr & E1000_RXD_STAT_DD) { 879 struct sk_buff *skb; 880 881 if (*work_done >= work_to_do) 882 break; 883 (*work_done)++; 884 rmb(); /* read descriptor and rx_buffer_info after status DD */ 885 886 skb = buffer_info->skb; 887 buffer_info->skb = NULL; 888 889 prefetch(skb->data - NET_IP_ALIGN); 890 891 i++; 892 if (i == rx_ring->count) 893 i = 0; 894 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 895 prefetch(next_rxd); 896 897 next_buffer = &rx_ring->buffer_info[i]; 898 899 cleaned = true; 900 cleaned_count++; 901 dma_unmap_single(&pdev->dev, 902 buffer_info->dma, 903 adapter->rx_buffer_len, 904 DMA_FROM_DEVICE); 905 buffer_info->dma = 0; 906 907 length = le16_to_cpu(rx_desc->wb.upper.length); 908 909 /* 910 * !EOP means multiple descriptors were used to store a single 911 * packet, if that's the case we need to toss it. In fact, we 912 * need to toss every packet with the EOP bit clear and the 913 * next frame that _does_ have the EOP bit set, as it is by 914 * definition only a frame fragment 915 */ 916 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) 917 adapter->flags2 |= FLAG2_IS_DISCARDING; 918 919 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 920 /* All receives must fit into a single buffer */ 921 e_dbg("Receive packet consumed multiple buffers\n"); 922 /* recycle */ 923 buffer_info->skb = skb; 924 if (staterr & E1000_RXD_STAT_EOP) 925 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 926 goto next_desc; 927 } 928 929 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 930 !(netdev->features & NETIF_F_RXALL))) { 931 /* recycle */ 932 buffer_info->skb = skb; 933 goto next_desc; 934 } 935 936 /* adjust length to remove Ethernet CRC */ 937 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 938 /* If configured to store CRC, don't subtract FCS, 939 * but keep the FCS bytes out of the total_rx_bytes 940 * counter 941 */ 942 if (netdev->features & NETIF_F_RXFCS) 943 total_rx_bytes -= 4; 944 else 945 length -= 4; 946 } 947 948 total_rx_bytes += length; 949 total_rx_packets++; 950 951 /* 952 * code added for copybreak, this should improve 953 * performance for small packets with large amounts 954 * of reassembly being done in the stack 955 */ 956 if (length < copybreak) { 957 struct sk_buff *new_skb = 958 netdev_alloc_skb_ip_align(netdev, length); 959 if (new_skb) { 960 skb_copy_to_linear_data_offset(new_skb, 961 -NET_IP_ALIGN, 962 (skb->data - 963 NET_IP_ALIGN), 964 (length + 965 NET_IP_ALIGN)); 966 /* save the skb in buffer_info as good */ 967 buffer_info->skb = skb; 968 skb = new_skb; 969 } 970 /* else just continue with the old one */ 971 } 972 /* end copybreak code */ 973 skb_put(skb, length); 974 975 /* Receive Checksum Offload */ 976 e1000_rx_checksum(adapter, staterr, 977 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 978 979 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 980 981 e1000_receive_skb(adapter, netdev, skb, staterr, 982 rx_desc->wb.upper.vlan); 983 984 next_desc: 985 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 986 987 /* return some buffers to hardware, one at a time is too slow */ 988 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 989 adapter->alloc_rx_buf(rx_ring, cleaned_count, 990 GFP_ATOMIC); 991 cleaned_count = 0; 992 } 993 994 /* use prefetched values */ 995 rx_desc = next_rxd; 996 buffer_info = next_buffer; 997 998 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 999 } 1000 rx_ring->next_to_clean = i; 1001 1002 cleaned_count = e1000_desc_unused(rx_ring); 1003 if (cleaned_count) 1004 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1005 1006 adapter->total_rx_bytes += total_rx_bytes; 1007 adapter->total_rx_packets += total_rx_packets; 1008 return cleaned; 1009 } 1010 1011 static void e1000_put_txbuf(struct e1000_ring *tx_ring, 1012 struct e1000_buffer *buffer_info) 1013 { 1014 struct e1000_adapter *adapter = tx_ring->adapter; 1015 1016 if (buffer_info->dma) { 1017 if (buffer_info->mapped_as_page) 1018 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1019 buffer_info->length, DMA_TO_DEVICE); 1020 else 1021 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1022 buffer_info->length, DMA_TO_DEVICE); 1023 buffer_info->dma = 0; 1024 } 1025 if (buffer_info->skb) { 1026 dev_kfree_skb_any(buffer_info->skb); 1027 buffer_info->skb = NULL; 1028 } 1029 buffer_info->time_stamp = 0; 1030 } 1031 1032 static void e1000_print_hw_hang(struct work_struct *work) 1033 { 1034 struct e1000_adapter *adapter = container_of(work, 1035 struct e1000_adapter, 1036 print_hang_task); 1037 struct net_device *netdev = adapter->netdev; 1038 struct e1000_ring *tx_ring = adapter->tx_ring; 1039 unsigned int i = tx_ring->next_to_clean; 1040 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 1041 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 1042 struct e1000_hw *hw = &adapter->hw; 1043 u16 phy_status, phy_1000t_status, phy_ext_status; 1044 u16 pci_status; 1045 1046 if (test_bit(__E1000_DOWN, &adapter->state)) 1047 return; 1048 1049 if (!adapter->tx_hang_recheck && 1050 (adapter->flags2 & FLAG2_DMA_BURST)) { 1051 /* May be block on write-back, flush and detect again 1052 * flush pending descriptor writebacks to memory 1053 */ 1054 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1055 /* execute the writes immediately */ 1056 e1e_flush(); 1057 adapter->tx_hang_recheck = true; 1058 return; 1059 } 1060 /* Real hang detected */ 1061 adapter->tx_hang_recheck = false; 1062 netif_stop_queue(netdev); 1063 1064 e1e_rphy(hw, PHY_STATUS, &phy_status); 1065 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 1066 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); 1067 1068 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); 1069 1070 /* detected Hardware unit hang */ 1071 e_err("Detected Hardware Unit Hang:\n" 1072 " TDH <%x>\n" 1073 " TDT <%x>\n" 1074 " next_to_use <%x>\n" 1075 " next_to_clean <%x>\n" 1076 "buffer_info[next_to_clean]:\n" 1077 " time_stamp <%lx>\n" 1078 " next_to_watch <%x>\n" 1079 " jiffies <%lx>\n" 1080 " next_to_watch.status <%x>\n" 1081 "MAC Status <%x>\n" 1082 "PHY Status <%x>\n" 1083 "PHY 1000BASE-T Status <%x>\n" 1084 "PHY Extended Status <%x>\n" 1085 "PCI Status <%x>\n", 1086 readl(tx_ring->head), 1087 readl(tx_ring->tail), 1088 tx_ring->next_to_use, 1089 tx_ring->next_to_clean, 1090 tx_ring->buffer_info[eop].time_stamp, 1091 eop, 1092 jiffies, 1093 eop_desc->upper.fields.status, 1094 er32(STATUS), 1095 phy_status, 1096 phy_1000t_status, 1097 phy_ext_status, 1098 pci_status); 1099 } 1100 1101 /** 1102 * e1000_clean_tx_irq - Reclaim resources after transmit completes 1103 * @tx_ring: Tx descriptor ring 1104 * 1105 * the return value indicates whether actual cleaning was done, there 1106 * is no guarantee that everything was cleaned 1107 **/ 1108 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) 1109 { 1110 struct e1000_adapter *adapter = tx_ring->adapter; 1111 struct net_device *netdev = adapter->netdev; 1112 struct e1000_hw *hw = &adapter->hw; 1113 struct e1000_tx_desc *tx_desc, *eop_desc; 1114 struct e1000_buffer *buffer_info; 1115 unsigned int i, eop; 1116 unsigned int count = 0; 1117 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 1118 unsigned int bytes_compl = 0, pkts_compl = 0; 1119 1120 i = tx_ring->next_to_clean; 1121 eop = tx_ring->buffer_info[i].next_to_watch; 1122 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1123 1124 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1125 (count < tx_ring->count)) { 1126 bool cleaned = false; 1127 rmb(); /* read buffer_info after eop_desc */ 1128 for (; !cleaned; count++) { 1129 tx_desc = E1000_TX_DESC(*tx_ring, i); 1130 buffer_info = &tx_ring->buffer_info[i]; 1131 cleaned = (i == eop); 1132 1133 if (cleaned) { 1134 total_tx_packets += buffer_info->segs; 1135 total_tx_bytes += buffer_info->bytecount; 1136 if (buffer_info->skb) { 1137 bytes_compl += buffer_info->skb->len; 1138 pkts_compl++; 1139 } 1140 } 1141 1142 e1000_put_txbuf(tx_ring, buffer_info); 1143 tx_desc->upper.data = 0; 1144 1145 i++; 1146 if (i == tx_ring->count) 1147 i = 0; 1148 } 1149 1150 if (i == tx_ring->next_to_use) 1151 break; 1152 eop = tx_ring->buffer_info[i].next_to_watch; 1153 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1154 } 1155 1156 tx_ring->next_to_clean = i; 1157 1158 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 1159 1160 #define TX_WAKE_THRESHOLD 32 1161 if (count && netif_carrier_ok(netdev) && 1162 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { 1163 /* Make sure that anybody stopping the queue after this 1164 * sees the new next_to_clean. 1165 */ 1166 smp_mb(); 1167 1168 if (netif_queue_stopped(netdev) && 1169 !(test_bit(__E1000_DOWN, &adapter->state))) { 1170 netif_wake_queue(netdev); 1171 ++adapter->restart_queue; 1172 } 1173 } 1174 1175 if (adapter->detect_tx_hung) { 1176 /* 1177 * Detect a transmit hang in hardware, this serializes the 1178 * check with the clearing of time_stamp and movement of i 1179 */ 1180 adapter->detect_tx_hung = false; 1181 if (tx_ring->buffer_info[i].time_stamp && 1182 time_after(jiffies, tx_ring->buffer_info[i].time_stamp 1183 + (adapter->tx_timeout_factor * HZ)) && 1184 !(er32(STATUS) & E1000_STATUS_TXOFF)) 1185 schedule_work(&adapter->print_hang_task); 1186 else 1187 adapter->tx_hang_recheck = false; 1188 } 1189 adapter->total_tx_bytes += total_tx_bytes; 1190 adapter->total_tx_packets += total_tx_packets; 1191 return count < tx_ring->count; 1192 } 1193 1194 /** 1195 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 1196 * @rx_ring: Rx descriptor ring 1197 * 1198 * the return value indicates whether actual cleaning was done, there 1199 * is no guarantee that everything was cleaned 1200 **/ 1201 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, 1202 int work_to_do) 1203 { 1204 struct e1000_adapter *adapter = rx_ring->adapter; 1205 struct e1000_hw *hw = &adapter->hw; 1206 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 1207 struct net_device *netdev = adapter->netdev; 1208 struct pci_dev *pdev = adapter->pdev; 1209 struct e1000_buffer *buffer_info, *next_buffer; 1210 struct e1000_ps_page *ps_page; 1211 struct sk_buff *skb; 1212 unsigned int i, j; 1213 u32 length, staterr; 1214 int cleaned_count = 0; 1215 bool cleaned = false; 1216 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1217 1218 i = rx_ring->next_to_clean; 1219 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 1220 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1221 buffer_info = &rx_ring->buffer_info[i]; 1222 1223 while (staterr & E1000_RXD_STAT_DD) { 1224 if (*work_done >= work_to_do) 1225 break; 1226 (*work_done)++; 1227 skb = buffer_info->skb; 1228 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1229 1230 /* in the packet split case this is header only */ 1231 prefetch(skb->data - NET_IP_ALIGN); 1232 1233 i++; 1234 if (i == rx_ring->count) 1235 i = 0; 1236 next_rxd = E1000_RX_DESC_PS(*rx_ring, i); 1237 prefetch(next_rxd); 1238 1239 next_buffer = &rx_ring->buffer_info[i]; 1240 1241 cleaned = true; 1242 cleaned_count++; 1243 dma_unmap_single(&pdev->dev, buffer_info->dma, 1244 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); 1245 buffer_info->dma = 0; 1246 1247 /* see !EOP comment in other Rx routine */ 1248 if (!(staterr & E1000_RXD_STAT_EOP)) 1249 adapter->flags2 |= FLAG2_IS_DISCARDING; 1250 1251 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 1252 e_dbg("Packet Split buffers didn't pick up the full packet\n"); 1253 dev_kfree_skb_irq(skb); 1254 if (staterr & E1000_RXD_STAT_EOP) 1255 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1256 goto next_desc; 1257 } 1258 1259 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1260 !(netdev->features & NETIF_F_RXALL))) { 1261 dev_kfree_skb_irq(skb); 1262 goto next_desc; 1263 } 1264 1265 length = le16_to_cpu(rx_desc->wb.middle.length0); 1266 1267 if (!length) { 1268 e_dbg("Last part of the packet spanning multiple descriptors\n"); 1269 dev_kfree_skb_irq(skb); 1270 goto next_desc; 1271 } 1272 1273 /* Good Receive */ 1274 skb_put(skb, length); 1275 1276 { 1277 /* 1278 * this looks ugly, but it seems compiler issues make 1279 * it more efficient than reusing j 1280 */ 1281 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 1282 1283 /* 1284 * page alloc/put takes too long and effects small 1285 * packet throughput, so unsplit small packets and 1286 * save the alloc/put only valid in softirq (napi) 1287 * context to call kmap_* 1288 */ 1289 if (l1 && (l1 <= copybreak) && 1290 ((length + l1) <= adapter->rx_ps_bsize0)) { 1291 u8 *vaddr; 1292 1293 ps_page = &buffer_info->ps_pages[0]; 1294 1295 /* 1296 * there is no documentation about how to call 1297 * kmap_atomic, so we can't hold the mapping 1298 * very long 1299 */ 1300 dma_sync_single_for_cpu(&pdev->dev, 1301 ps_page->dma, 1302 PAGE_SIZE, 1303 DMA_FROM_DEVICE); 1304 vaddr = kmap_atomic(ps_page->page, 1305 KM_SKB_DATA_SOFTIRQ); 1306 memcpy(skb_tail_pointer(skb), vaddr, l1); 1307 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 1308 dma_sync_single_for_device(&pdev->dev, 1309 ps_page->dma, 1310 PAGE_SIZE, 1311 DMA_FROM_DEVICE); 1312 1313 /* remove the CRC */ 1314 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1315 if (!(netdev->features & NETIF_F_RXFCS)) 1316 l1 -= 4; 1317 } 1318 1319 skb_put(skb, l1); 1320 goto copydone; 1321 } /* if */ 1322 } 1323 1324 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1325 length = le16_to_cpu(rx_desc->wb.upper.length[j]); 1326 if (!length) 1327 break; 1328 1329 ps_page = &buffer_info->ps_pages[j]; 1330 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1331 DMA_FROM_DEVICE); 1332 ps_page->dma = 0; 1333 skb_fill_page_desc(skb, j, ps_page->page, 0, length); 1334 ps_page->page = NULL; 1335 skb->len += length; 1336 skb->data_len += length; 1337 skb->truesize += PAGE_SIZE; 1338 } 1339 1340 /* strip the ethernet crc, problem is we're using pages now so 1341 * this whole operation can get a little cpu intensive 1342 */ 1343 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { 1344 if (!(netdev->features & NETIF_F_RXFCS)) 1345 pskb_trim(skb, skb->len - 4); 1346 } 1347 1348 copydone: 1349 total_rx_bytes += skb->len; 1350 total_rx_packets++; 1351 1352 e1000_rx_checksum(adapter, staterr, 1353 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 1354 1355 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1356 1357 if (rx_desc->wb.upper.header_status & 1358 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) 1359 adapter->rx_hdr_split++; 1360 1361 e1000_receive_skb(adapter, netdev, skb, 1362 staterr, rx_desc->wb.middle.vlan); 1363 1364 next_desc: 1365 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); 1366 buffer_info->skb = NULL; 1367 1368 /* return some buffers to hardware, one at a time is too slow */ 1369 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 1370 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1371 GFP_ATOMIC); 1372 cleaned_count = 0; 1373 } 1374 1375 /* use prefetched values */ 1376 rx_desc = next_rxd; 1377 buffer_info = next_buffer; 1378 1379 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1380 } 1381 rx_ring->next_to_clean = i; 1382 1383 cleaned_count = e1000_desc_unused(rx_ring); 1384 if (cleaned_count) 1385 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1386 1387 adapter->total_rx_bytes += total_rx_bytes; 1388 adapter->total_rx_packets += total_rx_packets; 1389 return cleaned; 1390 } 1391 1392 /** 1393 * e1000_consume_page - helper function 1394 **/ 1395 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 1396 u16 length) 1397 { 1398 bi->page = NULL; 1399 skb->len += length; 1400 skb->data_len += length; 1401 skb->truesize += PAGE_SIZE; 1402 } 1403 1404 /** 1405 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 1406 * @adapter: board private structure 1407 * 1408 * the return value indicates whether actual cleaning was done, there 1409 * is no guarantee that everything was cleaned 1410 **/ 1411 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, 1412 int work_to_do) 1413 { 1414 struct e1000_adapter *adapter = rx_ring->adapter; 1415 struct net_device *netdev = adapter->netdev; 1416 struct pci_dev *pdev = adapter->pdev; 1417 union e1000_rx_desc_extended *rx_desc, *next_rxd; 1418 struct e1000_buffer *buffer_info, *next_buffer; 1419 u32 length, staterr; 1420 unsigned int i; 1421 int cleaned_count = 0; 1422 bool cleaned = false; 1423 unsigned int total_rx_bytes=0, total_rx_packets=0; 1424 1425 i = rx_ring->next_to_clean; 1426 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 1427 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1428 buffer_info = &rx_ring->buffer_info[i]; 1429 1430 while (staterr & E1000_RXD_STAT_DD) { 1431 struct sk_buff *skb; 1432 1433 if (*work_done >= work_to_do) 1434 break; 1435 (*work_done)++; 1436 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1437 1438 skb = buffer_info->skb; 1439 buffer_info->skb = NULL; 1440 1441 ++i; 1442 if (i == rx_ring->count) 1443 i = 0; 1444 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 1445 prefetch(next_rxd); 1446 1447 next_buffer = &rx_ring->buffer_info[i]; 1448 1449 cleaned = true; 1450 cleaned_count++; 1451 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, 1452 DMA_FROM_DEVICE); 1453 buffer_info->dma = 0; 1454 1455 length = le16_to_cpu(rx_desc->wb.upper.length); 1456 1457 /* errors is only valid for DD + EOP descriptors */ 1458 if (unlikely((staterr & E1000_RXD_STAT_EOP) && 1459 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && 1460 !(netdev->features & NETIF_F_RXALL)))) { 1461 /* recycle both page and skb */ 1462 buffer_info->skb = skb; 1463 /* an error means any chain goes out the window too */ 1464 if (rx_ring->rx_skb_top) 1465 dev_kfree_skb_irq(rx_ring->rx_skb_top); 1466 rx_ring->rx_skb_top = NULL; 1467 goto next_desc; 1468 } 1469 1470 #define rxtop (rx_ring->rx_skb_top) 1471 if (!(staterr & E1000_RXD_STAT_EOP)) { 1472 /* this descriptor is only the beginning (or middle) */ 1473 if (!rxtop) { 1474 /* this is the beginning of a chain */ 1475 rxtop = skb; 1476 skb_fill_page_desc(rxtop, 0, buffer_info->page, 1477 0, length); 1478 } else { 1479 /* this is the middle of a chain */ 1480 skb_fill_page_desc(rxtop, 1481 skb_shinfo(rxtop)->nr_frags, 1482 buffer_info->page, 0, length); 1483 /* re-use the skb, only consumed the page */ 1484 buffer_info->skb = skb; 1485 } 1486 e1000_consume_page(buffer_info, rxtop, length); 1487 goto next_desc; 1488 } else { 1489 if (rxtop) { 1490 /* end of the chain */ 1491 skb_fill_page_desc(rxtop, 1492 skb_shinfo(rxtop)->nr_frags, 1493 buffer_info->page, 0, length); 1494 /* re-use the current skb, we only consumed the 1495 * page */ 1496 buffer_info->skb = skb; 1497 skb = rxtop; 1498 rxtop = NULL; 1499 e1000_consume_page(buffer_info, skb, length); 1500 } else { 1501 /* no chain, got EOP, this buf is the packet 1502 * copybreak to save the put_page/alloc_page */ 1503 if (length <= copybreak && 1504 skb_tailroom(skb) >= length) { 1505 u8 *vaddr; 1506 vaddr = kmap_atomic(buffer_info->page, 1507 KM_SKB_DATA_SOFTIRQ); 1508 memcpy(skb_tail_pointer(skb), vaddr, 1509 length); 1510 kunmap_atomic(vaddr, 1511 KM_SKB_DATA_SOFTIRQ); 1512 /* re-use the page, so don't erase 1513 * buffer_info->page */ 1514 skb_put(skb, length); 1515 } else { 1516 skb_fill_page_desc(skb, 0, 1517 buffer_info->page, 0, 1518 length); 1519 e1000_consume_page(buffer_info, skb, 1520 length); 1521 } 1522 } 1523 } 1524 1525 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 1526 e1000_rx_checksum(adapter, staterr, 1527 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 1528 1529 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1530 1531 /* probably a little skewed due to removing CRC */ 1532 total_rx_bytes += skb->len; 1533 total_rx_packets++; 1534 1535 /* eth type trans needs skb->data to point to something */ 1536 if (!pskb_may_pull(skb, ETH_HLEN)) { 1537 e_err("pskb_may_pull failed.\n"); 1538 dev_kfree_skb_irq(skb); 1539 goto next_desc; 1540 } 1541 1542 e1000_receive_skb(adapter, netdev, skb, staterr, 1543 rx_desc->wb.upper.vlan); 1544 1545 next_desc: 1546 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 1547 1548 /* return some buffers to hardware, one at a time is too slow */ 1549 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 1550 adapter->alloc_rx_buf(rx_ring, cleaned_count, 1551 GFP_ATOMIC); 1552 cleaned_count = 0; 1553 } 1554 1555 /* use prefetched values */ 1556 rx_desc = next_rxd; 1557 buffer_info = next_buffer; 1558 1559 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1560 } 1561 rx_ring->next_to_clean = i; 1562 1563 cleaned_count = e1000_desc_unused(rx_ring); 1564 if (cleaned_count) 1565 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); 1566 1567 adapter->total_rx_bytes += total_rx_bytes; 1568 adapter->total_rx_packets += total_rx_packets; 1569 return cleaned; 1570 } 1571 1572 /** 1573 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1574 * @rx_ring: Rx descriptor ring 1575 **/ 1576 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) 1577 { 1578 struct e1000_adapter *adapter = rx_ring->adapter; 1579 struct e1000_buffer *buffer_info; 1580 struct e1000_ps_page *ps_page; 1581 struct pci_dev *pdev = adapter->pdev; 1582 unsigned int i, j; 1583 1584 /* Free all the Rx ring sk_buffs */ 1585 for (i = 0; i < rx_ring->count; i++) { 1586 buffer_info = &rx_ring->buffer_info[i]; 1587 if (buffer_info->dma) { 1588 if (adapter->clean_rx == e1000_clean_rx_irq) 1589 dma_unmap_single(&pdev->dev, buffer_info->dma, 1590 adapter->rx_buffer_len, 1591 DMA_FROM_DEVICE); 1592 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1593 dma_unmap_page(&pdev->dev, buffer_info->dma, 1594 PAGE_SIZE, 1595 DMA_FROM_DEVICE); 1596 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1597 dma_unmap_single(&pdev->dev, buffer_info->dma, 1598 adapter->rx_ps_bsize0, 1599 DMA_FROM_DEVICE); 1600 buffer_info->dma = 0; 1601 } 1602 1603 if (buffer_info->page) { 1604 put_page(buffer_info->page); 1605 buffer_info->page = NULL; 1606 } 1607 1608 if (buffer_info->skb) { 1609 dev_kfree_skb(buffer_info->skb); 1610 buffer_info->skb = NULL; 1611 } 1612 1613 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1614 ps_page = &buffer_info->ps_pages[j]; 1615 if (!ps_page->page) 1616 break; 1617 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1618 DMA_FROM_DEVICE); 1619 ps_page->dma = 0; 1620 put_page(ps_page->page); 1621 ps_page->page = NULL; 1622 } 1623 } 1624 1625 /* there also may be some cached data from a chained receive */ 1626 if (rx_ring->rx_skb_top) { 1627 dev_kfree_skb(rx_ring->rx_skb_top); 1628 rx_ring->rx_skb_top = NULL; 1629 } 1630 1631 /* Zero out the descriptor ring */ 1632 memset(rx_ring->desc, 0, rx_ring->size); 1633 1634 rx_ring->next_to_clean = 0; 1635 rx_ring->next_to_use = 0; 1636 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1637 1638 writel(0, rx_ring->head); 1639 writel(0, rx_ring->tail); 1640 } 1641 1642 static void e1000e_downshift_workaround(struct work_struct *work) 1643 { 1644 struct e1000_adapter *adapter = container_of(work, 1645 struct e1000_adapter, downshift_task); 1646 1647 if (test_bit(__E1000_DOWN, &adapter->state)) 1648 return; 1649 1650 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1651 } 1652 1653 /** 1654 * e1000_intr_msi - Interrupt Handler 1655 * @irq: interrupt number 1656 * @data: pointer to a network interface device structure 1657 **/ 1658 static irqreturn_t e1000_intr_msi(int irq, void *data) 1659 { 1660 struct net_device *netdev = data; 1661 struct e1000_adapter *adapter = netdev_priv(netdev); 1662 struct e1000_hw *hw = &adapter->hw; 1663 u32 icr = er32(ICR); 1664 1665 /* 1666 * read ICR disables interrupts using IAM 1667 */ 1668 1669 if (icr & E1000_ICR_LSC) { 1670 hw->mac.get_link_status = true; 1671 /* 1672 * ICH8 workaround-- Call gig speed drop workaround on cable 1673 * disconnect (LSC) before accessing any PHY registers 1674 */ 1675 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1676 (!(er32(STATUS) & E1000_STATUS_LU))) 1677 schedule_work(&adapter->downshift_task); 1678 1679 /* 1680 * 80003ES2LAN workaround-- For packet buffer work-around on 1681 * link down event; disable receives here in the ISR and reset 1682 * adapter in watchdog 1683 */ 1684 if (netif_carrier_ok(netdev) && 1685 adapter->flags & FLAG_RX_NEEDS_RESTART) { 1686 /* disable receives */ 1687 u32 rctl = er32(RCTL); 1688 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1689 adapter->flags |= FLAG_RX_RESTART_NOW; 1690 } 1691 /* guard against interrupt when we're going down */ 1692 if (!test_bit(__E1000_DOWN, &adapter->state)) 1693 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1694 } 1695 1696 if (napi_schedule_prep(&adapter->napi)) { 1697 adapter->total_tx_bytes = 0; 1698 adapter->total_tx_packets = 0; 1699 adapter->total_rx_bytes = 0; 1700 adapter->total_rx_packets = 0; 1701 __napi_schedule(&adapter->napi); 1702 } 1703 1704 return IRQ_HANDLED; 1705 } 1706 1707 /** 1708 * e1000_intr - Interrupt Handler 1709 * @irq: interrupt number 1710 * @data: pointer to a network interface device structure 1711 **/ 1712 static irqreturn_t e1000_intr(int irq, void *data) 1713 { 1714 struct net_device *netdev = data; 1715 struct e1000_adapter *adapter = netdev_priv(netdev); 1716 struct e1000_hw *hw = &adapter->hw; 1717 u32 rctl, icr = er32(ICR); 1718 1719 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) 1720 return IRQ_NONE; /* Not our interrupt */ 1721 1722 /* 1723 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is 1724 * not set, then the adapter didn't send an interrupt 1725 */ 1726 if (!(icr & E1000_ICR_INT_ASSERTED)) 1727 return IRQ_NONE; 1728 1729 /* 1730 * Interrupt Auto-Mask...upon reading ICR, 1731 * interrupts are masked. No need for the 1732 * IMC write 1733 */ 1734 1735 if (icr & E1000_ICR_LSC) { 1736 hw->mac.get_link_status = true; 1737 /* 1738 * ICH8 workaround-- Call gig speed drop workaround on cable 1739 * disconnect (LSC) before accessing any PHY registers 1740 */ 1741 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1742 (!(er32(STATUS) & E1000_STATUS_LU))) 1743 schedule_work(&adapter->downshift_task); 1744 1745 /* 1746 * 80003ES2LAN workaround-- 1747 * For packet buffer work-around on link down event; 1748 * disable receives here in the ISR and 1749 * reset adapter in watchdog 1750 */ 1751 if (netif_carrier_ok(netdev) && 1752 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { 1753 /* disable receives */ 1754 rctl = er32(RCTL); 1755 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1756 adapter->flags |= FLAG_RX_RESTART_NOW; 1757 } 1758 /* guard against interrupt when we're going down */ 1759 if (!test_bit(__E1000_DOWN, &adapter->state)) 1760 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1761 } 1762 1763 if (napi_schedule_prep(&adapter->napi)) { 1764 adapter->total_tx_bytes = 0; 1765 adapter->total_tx_packets = 0; 1766 adapter->total_rx_bytes = 0; 1767 adapter->total_rx_packets = 0; 1768 __napi_schedule(&adapter->napi); 1769 } 1770 1771 return IRQ_HANDLED; 1772 } 1773 1774 static irqreturn_t e1000_msix_other(int irq, void *data) 1775 { 1776 struct net_device *netdev = data; 1777 struct e1000_adapter *adapter = netdev_priv(netdev); 1778 struct e1000_hw *hw = &adapter->hw; 1779 u32 icr = er32(ICR); 1780 1781 if (!(icr & E1000_ICR_INT_ASSERTED)) { 1782 if (!test_bit(__E1000_DOWN, &adapter->state)) 1783 ew32(IMS, E1000_IMS_OTHER); 1784 return IRQ_NONE; 1785 } 1786 1787 if (icr & adapter->eiac_mask) 1788 ew32(ICS, (icr & adapter->eiac_mask)); 1789 1790 if (icr & E1000_ICR_OTHER) { 1791 if (!(icr & E1000_ICR_LSC)) 1792 goto no_link_interrupt; 1793 hw->mac.get_link_status = true; 1794 /* guard against interrupt when we're going down */ 1795 if (!test_bit(__E1000_DOWN, &adapter->state)) 1796 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1797 } 1798 1799 no_link_interrupt: 1800 if (!test_bit(__E1000_DOWN, &adapter->state)) 1801 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); 1802 1803 return IRQ_HANDLED; 1804 } 1805 1806 1807 static irqreturn_t e1000_intr_msix_tx(int irq, void *data) 1808 { 1809 struct net_device *netdev = data; 1810 struct e1000_adapter *adapter = netdev_priv(netdev); 1811 struct e1000_hw *hw = &adapter->hw; 1812 struct e1000_ring *tx_ring = adapter->tx_ring; 1813 1814 1815 adapter->total_tx_bytes = 0; 1816 adapter->total_tx_packets = 0; 1817 1818 if (!e1000_clean_tx_irq(tx_ring)) 1819 /* Ring was not completely cleaned, so fire another interrupt */ 1820 ew32(ICS, tx_ring->ims_val); 1821 1822 return IRQ_HANDLED; 1823 } 1824 1825 static irqreturn_t e1000_intr_msix_rx(int irq, void *data) 1826 { 1827 struct net_device *netdev = data; 1828 struct e1000_adapter *adapter = netdev_priv(netdev); 1829 struct e1000_ring *rx_ring = adapter->rx_ring; 1830 1831 /* Write the ITR value calculated at the end of the 1832 * previous interrupt. 1833 */ 1834 if (rx_ring->set_itr) { 1835 writel(1000000000 / (rx_ring->itr_val * 256), 1836 rx_ring->itr_register); 1837 rx_ring->set_itr = 0; 1838 } 1839 1840 if (napi_schedule_prep(&adapter->napi)) { 1841 adapter->total_rx_bytes = 0; 1842 adapter->total_rx_packets = 0; 1843 __napi_schedule(&adapter->napi); 1844 } 1845 return IRQ_HANDLED; 1846 } 1847 1848 /** 1849 * e1000_configure_msix - Configure MSI-X hardware 1850 * 1851 * e1000_configure_msix sets up the hardware to properly 1852 * generate MSI-X interrupts. 1853 **/ 1854 static void e1000_configure_msix(struct e1000_adapter *adapter) 1855 { 1856 struct e1000_hw *hw = &adapter->hw; 1857 struct e1000_ring *rx_ring = adapter->rx_ring; 1858 struct e1000_ring *tx_ring = adapter->tx_ring; 1859 int vector = 0; 1860 u32 ctrl_ext, ivar = 0; 1861 1862 adapter->eiac_mask = 0; 1863 1864 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ 1865 if (hw->mac.type == e1000_82574) { 1866 u32 rfctl = er32(RFCTL); 1867 rfctl |= E1000_RFCTL_ACK_DIS; 1868 ew32(RFCTL, rfctl); 1869 } 1870 1871 #define E1000_IVAR_INT_ALLOC_VALID 0x8 1872 /* Configure Rx vector */ 1873 rx_ring->ims_val = E1000_IMS_RXQ0; 1874 adapter->eiac_mask |= rx_ring->ims_val; 1875 if (rx_ring->itr_val) 1876 writel(1000000000 / (rx_ring->itr_val * 256), 1877 rx_ring->itr_register); 1878 else 1879 writel(1, rx_ring->itr_register); 1880 ivar = E1000_IVAR_INT_ALLOC_VALID | vector; 1881 1882 /* Configure Tx vector */ 1883 tx_ring->ims_val = E1000_IMS_TXQ0; 1884 vector++; 1885 if (tx_ring->itr_val) 1886 writel(1000000000 / (tx_ring->itr_val * 256), 1887 tx_ring->itr_register); 1888 else 1889 writel(1, tx_ring->itr_register); 1890 adapter->eiac_mask |= tx_ring->ims_val; 1891 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); 1892 1893 /* set vector for Other Causes, e.g. link changes */ 1894 vector++; 1895 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); 1896 if (rx_ring->itr_val) 1897 writel(1000000000 / (rx_ring->itr_val * 256), 1898 hw->hw_addr + E1000_EITR_82574(vector)); 1899 else 1900 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); 1901 1902 /* Cause Tx interrupts on every write back */ 1903 ivar |= (1 << 31); 1904 1905 ew32(IVAR, ivar); 1906 1907 /* enable MSI-X PBA support */ 1908 ctrl_ext = er32(CTRL_EXT); 1909 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; 1910 1911 /* Auto-Mask Other interrupts upon ICR read */ 1912 #define E1000_EIAC_MASK_82574 0x01F00000 1913 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); 1914 ctrl_ext |= E1000_CTRL_EXT_EIAME; 1915 ew32(CTRL_EXT, ctrl_ext); 1916 e1e_flush(); 1917 } 1918 1919 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) 1920 { 1921 if (adapter->msix_entries) { 1922 pci_disable_msix(adapter->pdev); 1923 kfree(adapter->msix_entries); 1924 adapter->msix_entries = NULL; 1925 } else if (adapter->flags & FLAG_MSI_ENABLED) { 1926 pci_disable_msi(adapter->pdev); 1927 adapter->flags &= ~FLAG_MSI_ENABLED; 1928 } 1929 } 1930 1931 /** 1932 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported 1933 * 1934 * Attempt to configure interrupts using the best available 1935 * capabilities of the hardware and kernel. 1936 **/ 1937 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) 1938 { 1939 int err; 1940 int i; 1941 1942 switch (adapter->int_mode) { 1943 case E1000E_INT_MODE_MSIX: 1944 if (adapter->flags & FLAG_HAS_MSIX) { 1945 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ 1946 adapter->msix_entries = kcalloc(adapter->num_vectors, 1947 sizeof(struct msix_entry), 1948 GFP_KERNEL); 1949 if (adapter->msix_entries) { 1950 for (i = 0; i < adapter->num_vectors; i++) 1951 adapter->msix_entries[i].entry = i; 1952 1953 err = pci_enable_msix(adapter->pdev, 1954 adapter->msix_entries, 1955 adapter->num_vectors); 1956 if (err == 0) 1957 return; 1958 } 1959 /* MSI-X failed, so fall through and try MSI */ 1960 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); 1961 e1000e_reset_interrupt_capability(adapter); 1962 } 1963 adapter->int_mode = E1000E_INT_MODE_MSI; 1964 /* Fall through */ 1965 case E1000E_INT_MODE_MSI: 1966 if (!pci_enable_msi(adapter->pdev)) { 1967 adapter->flags |= FLAG_MSI_ENABLED; 1968 } else { 1969 adapter->int_mode = E1000E_INT_MODE_LEGACY; 1970 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); 1971 } 1972 /* Fall through */ 1973 case E1000E_INT_MODE_LEGACY: 1974 /* Don't do anything; this is the system default */ 1975 break; 1976 } 1977 1978 /* store the number of vectors being used */ 1979 adapter->num_vectors = 1; 1980 } 1981 1982 /** 1983 * e1000_request_msix - Initialize MSI-X interrupts 1984 * 1985 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the 1986 * kernel. 1987 **/ 1988 static int e1000_request_msix(struct e1000_adapter *adapter) 1989 { 1990 struct net_device *netdev = adapter->netdev; 1991 int err = 0, vector = 0; 1992 1993 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1994 snprintf(adapter->rx_ring->name, 1995 sizeof(adapter->rx_ring->name) - 1, 1996 "%s-rx-0", netdev->name); 1997 else 1998 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1999 err = request_irq(adapter->msix_entries[vector].vector, 2000 e1000_intr_msix_rx, 0, adapter->rx_ring->name, 2001 netdev); 2002 if (err) 2003 return err; 2004 adapter->rx_ring->itr_register = adapter->hw.hw_addr + 2005 E1000_EITR_82574(vector); 2006 adapter->rx_ring->itr_val = adapter->itr; 2007 vector++; 2008 2009 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 2010 snprintf(adapter->tx_ring->name, 2011 sizeof(adapter->tx_ring->name) - 1, 2012 "%s-tx-0", netdev->name); 2013 else 2014 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 2015 err = request_irq(adapter->msix_entries[vector].vector, 2016 e1000_intr_msix_tx, 0, adapter->tx_ring->name, 2017 netdev); 2018 if (err) 2019 return err; 2020 adapter->tx_ring->itr_register = adapter->hw.hw_addr + 2021 E1000_EITR_82574(vector); 2022 adapter->tx_ring->itr_val = adapter->itr; 2023 vector++; 2024 2025 err = request_irq(adapter->msix_entries[vector].vector, 2026 e1000_msix_other, 0, netdev->name, netdev); 2027 if (err) 2028 return err; 2029 2030 e1000_configure_msix(adapter); 2031 2032 return 0; 2033 } 2034 2035 /** 2036 * e1000_request_irq - initialize interrupts 2037 * 2038 * Attempts to configure interrupts using the best available 2039 * capabilities of the hardware and kernel. 2040 **/ 2041 static int e1000_request_irq(struct e1000_adapter *adapter) 2042 { 2043 struct net_device *netdev = adapter->netdev; 2044 int err; 2045 2046 if (adapter->msix_entries) { 2047 err = e1000_request_msix(adapter); 2048 if (!err) 2049 return err; 2050 /* fall back to MSI */ 2051 e1000e_reset_interrupt_capability(adapter); 2052 adapter->int_mode = E1000E_INT_MODE_MSI; 2053 e1000e_set_interrupt_capability(adapter); 2054 } 2055 if (adapter->flags & FLAG_MSI_ENABLED) { 2056 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, 2057 netdev->name, netdev); 2058 if (!err) 2059 return err; 2060 2061 /* fall back to legacy interrupt */ 2062 e1000e_reset_interrupt_capability(adapter); 2063 adapter->int_mode = E1000E_INT_MODE_LEGACY; 2064 } 2065 2066 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, 2067 netdev->name, netdev); 2068 if (err) 2069 e_err("Unable to allocate interrupt, Error: %d\n", err); 2070 2071 return err; 2072 } 2073 2074 static void e1000_free_irq(struct e1000_adapter *adapter) 2075 { 2076 struct net_device *netdev = adapter->netdev; 2077 2078 if (adapter->msix_entries) { 2079 int vector = 0; 2080 2081 free_irq(adapter->msix_entries[vector].vector, netdev); 2082 vector++; 2083 2084 free_irq(adapter->msix_entries[vector].vector, netdev); 2085 vector++; 2086 2087 /* Other Causes interrupt vector */ 2088 free_irq(adapter->msix_entries[vector].vector, netdev); 2089 return; 2090 } 2091 2092 free_irq(adapter->pdev->irq, netdev); 2093 } 2094 2095 /** 2096 * e1000_irq_disable - Mask off interrupt generation on the NIC 2097 **/ 2098 static void e1000_irq_disable(struct e1000_adapter *adapter) 2099 { 2100 struct e1000_hw *hw = &adapter->hw; 2101 2102 ew32(IMC, ~0); 2103 if (adapter->msix_entries) 2104 ew32(EIAC_82574, 0); 2105 e1e_flush(); 2106 2107 if (adapter->msix_entries) { 2108 int i; 2109 for (i = 0; i < adapter->num_vectors; i++) 2110 synchronize_irq(adapter->msix_entries[i].vector); 2111 } else { 2112 synchronize_irq(adapter->pdev->irq); 2113 } 2114 } 2115 2116 /** 2117 * e1000_irq_enable - Enable default interrupt generation settings 2118 **/ 2119 static void e1000_irq_enable(struct e1000_adapter *adapter) 2120 { 2121 struct e1000_hw *hw = &adapter->hw; 2122 2123 if (adapter->msix_entries) { 2124 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2125 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); 2126 } else { 2127 ew32(IMS, IMS_ENABLE_MASK); 2128 } 2129 e1e_flush(); 2130 } 2131 2132 /** 2133 * e1000e_get_hw_control - get control of the h/w from f/w 2134 * @adapter: address of board private structure 2135 * 2136 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2137 * For ASF and Pass Through versions of f/w this means that 2138 * the driver is loaded. For AMT version (only with 82573) 2139 * of the f/w this means that the network i/f is open. 2140 **/ 2141 void e1000e_get_hw_control(struct e1000_adapter *adapter) 2142 { 2143 struct e1000_hw *hw = &adapter->hw; 2144 u32 ctrl_ext; 2145 u32 swsm; 2146 2147 /* Let firmware know the driver has taken over */ 2148 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2149 swsm = er32(SWSM); 2150 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 2151 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2152 ctrl_ext = er32(CTRL_EXT); 2153 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2154 } 2155 } 2156 2157 /** 2158 * e1000e_release_hw_control - release control of the h/w to f/w 2159 * @adapter: address of board private structure 2160 * 2161 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2162 * For ASF and Pass Through versions of f/w this means that the 2163 * driver is no longer loaded. For AMT version (only with 82573) i 2164 * of the f/w this means that the network i/f is closed. 2165 * 2166 **/ 2167 void e1000e_release_hw_control(struct e1000_adapter *adapter) 2168 { 2169 struct e1000_hw *hw = &adapter->hw; 2170 u32 ctrl_ext; 2171 u32 swsm; 2172 2173 /* Let firmware taken over control of h/w */ 2174 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2175 swsm = er32(SWSM); 2176 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 2177 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2178 ctrl_ext = er32(CTRL_EXT); 2179 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2180 } 2181 } 2182 2183 /** 2184 * @e1000_alloc_ring - allocate memory for a ring structure 2185 **/ 2186 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, 2187 struct e1000_ring *ring) 2188 { 2189 struct pci_dev *pdev = adapter->pdev; 2190 2191 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2192 GFP_KERNEL); 2193 if (!ring->desc) 2194 return -ENOMEM; 2195 2196 return 0; 2197 } 2198 2199 /** 2200 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) 2201 * @tx_ring: Tx descriptor ring 2202 * 2203 * Return 0 on success, negative on failure 2204 **/ 2205 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) 2206 { 2207 struct e1000_adapter *adapter = tx_ring->adapter; 2208 int err = -ENOMEM, size; 2209 2210 size = sizeof(struct e1000_buffer) * tx_ring->count; 2211 tx_ring->buffer_info = vzalloc(size); 2212 if (!tx_ring->buffer_info) 2213 goto err; 2214 2215 /* round up to nearest 4K */ 2216 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2217 tx_ring->size = ALIGN(tx_ring->size, 4096); 2218 2219 err = e1000_alloc_ring_dma(adapter, tx_ring); 2220 if (err) 2221 goto err; 2222 2223 tx_ring->next_to_use = 0; 2224 tx_ring->next_to_clean = 0; 2225 2226 return 0; 2227 err: 2228 vfree(tx_ring->buffer_info); 2229 e_err("Unable to allocate memory for the transmit descriptor ring\n"); 2230 return err; 2231 } 2232 2233 /** 2234 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) 2235 * @rx_ring: Rx descriptor ring 2236 * 2237 * Returns 0 on success, negative on failure 2238 **/ 2239 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) 2240 { 2241 struct e1000_adapter *adapter = rx_ring->adapter; 2242 struct e1000_buffer *buffer_info; 2243 int i, size, desc_len, err = -ENOMEM; 2244 2245 size = sizeof(struct e1000_buffer) * rx_ring->count; 2246 rx_ring->buffer_info = vzalloc(size); 2247 if (!rx_ring->buffer_info) 2248 goto err; 2249 2250 for (i = 0; i < rx_ring->count; i++) { 2251 buffer_info = &rx_ring->buffer_info[i]; 2252 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, 2253 sizeof(struct e1000_ps_page), 2254 GFP_KERNEL); 2255 if (!buffer_info->ps_pages) 2256 goto err_pages; 2257 } 2258 2259 desc_len = sizeof(union e1000_rx_desc_packet_split); 2260 2261 /* Round up to nearest 4K */ 2262 rx_ring->size = rx_ring->count * desc_len; 2263 rx_ring->size = ALIGN(rx_ring->size, 4096); 2264 2265 err = e1000_alloc_ring_dma(adapter, rx_ring); 2266 if (err) 2267 goto err_pages; 2268 2269 rx_ring->next_to_clean = 0; 2270 rx_ring->next_to_use = 0; 2271 rx_ring->rx_skb_top = NULL; 2272 2273 return 0; 2274 2275 err_pages: 2276 for (i = 0; i < rx_ring->count; i++) { 2277 buffer_info = &rx_ring->buffer_info[i]; 2278 kfree(buffer_info->ps_pages); 2279 } 2280 err: 2281 vfree(rx_ring->buffer_info); 2282 e_err("Unable to allocate memory for the receive descriptor ring\n"); 2283 return err; 2284 } 2285 2286 /** 2287 * e1000_clean_tx_ring - Free Tx Buffers 2288 * @tx_ring: Tx descriptor ring 2289 **/ 2290 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) 2291 { 2292 struct e1000_adapter *adapter = tx_ring->adapter; 2293 struct e1000_buffer *buffer_info; 2294 unsigned long size; 2295 unsigned int i; 2296 2297 for (i = 0; i < tx_ring->count; i++) { 2298 buffer_info = &tx_ring->buffer_info[i]; 2299 e1000_put_txbuf(tx_ring, buffer_info); 2300 } 2301 2302 netdev_reset_queue(adapter->netdev); 2303 size = sizeof(struct e1000_buffer) * tx_ring->count; 2304 memset(tx_ring->buffer_info, 0, size); 2305 2306 memset(tx_ring->desc, 0, tx_ring->size); 2307 2308 tx_ring->next_to_use = 0; 2309 tx_ring->next_to_clean = 0; 2310 2311 writel(0, tx_ring->head); 2312 writel(0, tx_ring->tail); 2313 } 2314 2315 /** 2316 * e1000e_free_tx_resources - Free Tx Resources per Queue 2317 * @tx_ring: Tx descriptor ring 2318 * 2319 * Free all transmit software resources 2320 **/ 2321 void e1000e_free_tx_resources(struct e1000_ring *tx_ring) 2322 { 2323 struct e1000_adapter *adapter = tx_ring->adapter; 2324 struct pci_dev *pdev = adapter->pdev; 2325 2326 e1000_clean_tx_ring(tx_ring); 2327 2328 vfree(tx_ring->buffer_info); 2329 tx_ring->buffer_info = NULL; 2330 2331 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2332 tx_ring->dma); 2333 tx_ring->desc = NULL; 2334 } 2335 2336 /** 2337 * e1000e_free_rx_resources - Free Rx Resources 2338 * @rx_ring: Rx descriptor ring 2339 * 2340 * Free all receive software resources 2341 **/ 2342 void e1000e_free_rx_resources(struct e1000_ring *rx_ring) 2343 { 2344 struct e1000_adapter *adapter = rx_ring->adapter; 2345 struct pci_dev *pdev = adapter->pdev; 2346 int i; 2347 2348 e1000_clean_rx_ring(rx_ring); 2349 2350 for (i = 0; i < rx_ring->count; i++) 2351 kfree(rx_ring->buffer_info[i].ps_pages); 2352 2353 vfree(rx_ring->buffer_info); 2354 rx_ring->buffer_info = NULL; 2355 2356 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2357 rx_ring->dma); 2358 rx_ring->desc = NULL; 2359 } 2360 2361 /** 2362 * e1000_update_itr - update the dynamic ITR value based on statistics 2363 * @adapter: pointer to adapter 2364 * @itr_setting: current adapter->itr 2365 * @packets: the number of packets during this measurement interval 2366 * @bytes: the number of bytes during this measurement interval 2367 * 2368 * Stores a new ITR value based on packets and byte 2369 * counts during the last interrupt. The advantage of per interrupt 2370 * computation is faster updates and more accurate ITR for the current 2371 * traffic pattern. Constants in this function were computed 2372 * based on theoretical maximum wire speed and thresholds were set based 2373 * on testing data as well as attempting to minimize response time 2374 * while increasing bulk throughput. This functionality is controlled 2375 * by the InterruptThrottleRate module parameter. 2376 **/ 2377 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2378 u16 itr_setting, int packets, 2379 int bytes) 2380 { 2381 unsigned int retval = itr_setting; 2382 2383 if (packets == 0) 2384 return itr_setting; 2385 2386 switch (itr_setting) { 2387 case lowest_latency: 2388 /* handle TSO and jumbo frames */ 2389 if (bytes/packets > 8000) 2390 retval = bulk_latency; 2391 else if ((packets < 5) && (bytes > 512)) 2392 retval = low_latency; 2393 break; 2394 case low_latency: /* 50 usec aka 20000 ints/s */ 2395 if (bytes > 10000) { 2396 /* this if handles the TSO accounting */ 2397 if (bytes/packets > 8000) 2398 retval = bulk_latency; 2399 else if ((packets < 10) || ((bytes/packets) > 1200)) 2400 retval = bulk_latency; 2401 else if ((packets > 35)) 2402 retval = lowest_latency; 2403 } else if (bytes/packets > 2000) { 2404 retval = bulk_latency; 2405 } else if (packets <= 2 && bytes < 512) { 2406 retval = lowest_latency; 2407 } 2408 break; 2409 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2410 if (bytes > 25000) { 2411 if (packets > 35) 2412 retval = low_latency; 2413 } else if (bytes < 6000) { 2414 retval = low_latency; 2415 } 2416 break; 2417 } 2418 2419 return retval; 2420 } 2421 2422 static void e1000_set_itr(struct e1000_adapter *adapter) 2423 { 2424 struct e1000_hw *hw = &adapter->hw; 2425 u16 current_itr; 2426 u32 new_itr = adapter->itr; 2427 2428 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2429 if (adapter->link_speed != SPEED_1000) { 2430 current_itr = 0; 2431 new_itr = 4000; 2432 goto set_itr_now; 2433 } 2434 2435 if (adapter->flags2 & FLAG2_DISABLE_AIM) { 2436 new_itr = 0; 2437 goto set_itr_now; 2438 } 2439 2440 adapter->tx_itr = e1000_update_itr(adapter, 2441 adapter->tx_itr, 2442 adapter->total_tx_packets, 2443 adapter->total_tx_bytes); 2444 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2445 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2446 adapter->tx_itr = low_latency; 2447 2448 adapter->rx_itr = e1000_update_itr(adapter, 2449 adapter->rx_itr, 2450 adapter->total_rx_packets, 2451 adapter->total_rx_bytes); 2452 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2453 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2454 adapter->rx_itr = low_latency; 2455 2456 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2457 2458 switch (current_itr) { 2459 /* counts and packets in update_itr are dependent on these numbers */ 2460 case lowest_latency: 2461 new_itr = 70000; 2462 break; 2463 case low_latency: 2464 new_itr = 20000; /* aka hwitr = ~200 */ 2465 break; 2466 case bulk_latency: 2467 new_itr = 4000; 2468 break; 2469 default: 2470 break; 2471 } 2472 2473 set_itr_now: 2474 if (new_itr != adapter->itr) { 2475 /* 2476 * this attempts to bias the interrupt rate towards Bulk 2477 * by adding intermediate steps when interrupt rate is 2478 * increasing 2479 */ 2480 new_itr = new_itr > adapter->itr ? 2481 min(adapter->itr + (new_itr >> 2), new_itr) : 2482 new_itr; 2483 adapter->itr = new_itr; 2484 adapter->rx_ring->itr_val = new_itr; 2485 if (adapter->msix_entries) 2486 adapter->rx_ring->set_itr = 1; 2487 else 2488 if (new_itr) 2489 ew32(ITR, 1000000000 / (new_itr * 256)); 2490 else 2491 ew32(ITR, 0); 2492 } 2493 } 2494 2495 /** 2496 * e1000_alloc_queues - Allocate memory for all rings 2497 * @adapter: board private structure to initialize 2498 **/ 2499 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 2500 { 2501 int size = sizeof(struct e1000_ring); 2502 2503 adapter->tx_ring = kzalloc(size, GFP_KERNEL); 2504 if (!adapter->tx_ring) 2505 goto err; 2506 adapter->tx_ring->count = adapter->tx_ring_count; 2507 adapter->tx_ring->adapter = adapter; 2508 2509 adapter->rx_ring = kzalloc(size, GFP_KERNEL); 2510 if (!adapter->rx_ring) 2511 goto err; 2512 adapter->rx_ring->count = adapter->rx_ring_count; 2513 adapter->rx_ring->adapter = adapter; 2514 2515 return 0; 2516 err: 2517 e_err("Unable to allocate memory for queues\n"); 2518 kfree(adapter->rx_ring); 2519 kfree(adapter->tx_ring); 2520 return -ENOMEM; 2521 } 2522 2523 /** 2524 * e1000_clean - NAPI Rx polling callback 2525 * @napi: struct associated with this polling callback 2526 * @budget: amount of packets driver is allowed to process this poll 2527 **/ 2528 static int e1000_clean(struct napi_struct *napi, int budget) 2529 { 2530 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 2531 struct e1000_hw *hw = &adapter->hw; 2532 struct net_device *poll_dev = adapter->netdev; 2533 int tx_cleaned = 1, work_done = 0; 2534 2535 adapter = netdev_priv(poll_dev); 2536 2537 if (adapter->msix_entries && 2538 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2539 goto clean_rx; 2540 2541 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); 2542 2543 clean_rx: 2544 adapter->clean_rx(adapter->rx_ring, &work_done, budget); 2545 2546 if (!tx_cleaned) 2547 work_done = budget; 2548 2549 /* If budget not fully consumed, exit the polling mode */ 2550 if (work_done < budget) { 2551 if (adapter->itr_setting & 3) 2552 e1000_set_itr(adapter); 2553 napi_complete(napi); 2554 if (!test_bit(__E1000_DOWN, &adapter->state)) { 2555 if (adapter->msix_entries) 2556 ew32(IMS, adapter->rx_ring->ims_val); 2557 else 2558 e1000_irq_enable(adapter); 2559 } 2560 } 2561 2562 return work_done; 2563 } 2564 2565 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 2566 { 2567 struct e1000_adapter *adapter = netdev_priv(netdev); 2568 struct e1000_hw *hw = &adapter->hw; 2569 u32 vfta, index; 2570 2571 /* don't update vlan cookie if already programmed */ 2572 if ((adapter->hw.mng_cookie.status & 2573 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2574 (vid == adapter->mng_vlan_id)) 2575 return 0; 2576 2577 /* add VID to filter table */ 2578 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2579 index = (vid >> 5) & 0x7F; 2580 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2581 vfta |= (1 << (vid & 0x1F)); 2582 hw->mac.ops.write_vfta(hw, index, vfta); 2583 } 2584 2585 set_bit(vid, adapter->active_vlans); 2586 2587 return 0; 2588 } 2589 2590 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2591 { 2592 struct e1000_adapter *adapter = netdev_priv(netdev); 2593 struct e1000_hw *hw = &adapter->hw; 2594 u32 vfta, index; 2595 2596 if ((adapter->hw.mng_cookie.status & 2597 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2598 (vid == adapter->mng_vlan_id)) { 2599 /* release control to f/w */ 2600 e1000e_release_hw_control(adapter); 2601 return 0; 2602 } 2603 2604 /* remove VID from filter table */ 2605 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2606 index = (vid >> 5) & 0x7F; 2607 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2608 vfta &= ~(1 << (vid & 0x1F)); 2609 hw->mac.ops.write_vfta(hw, index, vfta); 2610 } 2611 2612 clear_bit(vid, adapter->active_vlans); 2613 2614 return 0; 2615 } 2616 2617 /** 2618 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering 2619 * @adapter: board private structure to initialize 2620 **/ 2621 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) 2622 { 2623 struct net_device *netdev = adapter->netdev; 2624 struct e1000_hw *hw = &adapter->hw; 2625 u32 rctl; 2626 2627 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2628 /* disable VLAN receive filtering */ 2629 rctl = er32(RCTL); 2630 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); 2631 ew32(RCTL, rctl); 2632 2633 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { 2634 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2635 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2636 } 2637 } 2638 } 2639 2640 /** 2641 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering 2642 * @adapter: board private structure to initialize 2643 **/ 2644 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) 2645 { 2646 struct e1000_hw *hw = &adapter->hw; 2647 u32 rctl; 2648 2649 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2650 /* enable VLAN receive filtering */ 2651 rctl = er32(RCTL); 2652 rctl |= E1000_RCTL_VFE; 2653 rctl &= ~E1000_RCTL_CFIEN; 2654 ew32(RCTL, rctl); 2655 } 2656 } 2657 2658 /** 2659 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping 2660 * @adapter: board private structure to initialize 2661 **/ 2662 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) 2663 { 2664 struct e1000_hw *hw = &adapter->hw; 2665 u32 ctrl; 2666 2667 /* disable VLAN tag insert/strip */ 2668 ctrl = er32(CTRL); 2669 ctrl &= ~E1000_CTRL_VME; 2670 ew32(CTRL, ctrl); 2671 } 2672 2673 /** 2674 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping 2675 * @adapter: board private structure to initialize 2676 **/ 2677 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) 2678 { 2679 struct e1000_hw *hw = &adapter->hw; 2680 u32 ctrl; 2681 2682 /* enable VLAN tag insert/strip */ 2683 ctrl = er32(CTRL); 2684 ctrl |= E1000_CTRL_VME; 2685 ew32(CTRL, ctrl); 2686 } 2687 2688 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 2689 { 2690 struct net_device *netdev = adapter->netdev; 2691 u16 vid = adapter->hw.mng_cookie.vlan_id; 2692 u16 old_vid = adapter->mng_vlan_id; 2693 2694 if (adapter->hw.mng_cookie.status & 2695 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { 2696 e1000_vlan_rx_add_vid(netdev, vid); 2697 adapter->mng_vlan_id = vid; 2698 } 2699 2700 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) 2701 e1000_vlan_rx_kill_vid(netdev, old_vid); 2702 } 2703 2704 static void e1000_restore_vlan(struct e1000_adapter *adapter) 2705 { 2706 u16 vid; 2707 2708 e1000_vlan_rx_add_vid(adapter->netdev, 0); 2709 2710 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2711 e1000_vlan_rx_add_vid(adapter->netdev, vid); 2712 } 2713 2714 static void e1000_init_manageability_pt(struct e1000_adapter *adapter) 2715 { 2716 struct e1000_hw *hw = &adapter->hw; 2717 u32 manc, manc2h, mdef, i, j; 2718 2719 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) 2720 return; 2721 2722 manc = er32(MANC); 2723 2724 /* 2725 * enable receiving management packets to the host. this will probably 2726 * generate destination unreachable messages from the host OS, but 2727 * the packets will be handled on SMBUS 2728 */ 2729 manc |= E1000_MANC_EN_MNG2HOST; 2730 manc2h = er32(MANC2H); 2731 2732 switch (hw->mac.type) { 2733 default: 2734 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); 2735 break; 2736 case e1000_82574: 2737 case e1000_82583: 2738 /* 2739 * Check if IPMI pass-through decision filter already exists; 2740 * if so, enable it. 2741 */ 2742 for (i = 0, j = 0; i < 8; i++) { 2743 mdef = er32(MDEF(i)); 2744 2745 /* Ignore filters with anything other than IPMI ports */ 2746 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2747 continue; 2748 2749 /* Enable this decision filter in MANC2H */ 2750 if (mdef) 2751 manc2h |= (1 << i); 2752 2753 j |= mdef; 2754 } 2755 2756 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2757 break; 2758 2759 /* Create new decision filter in an empty filter */ 2760 for (i = 0, j = 0; i < 8; i++) 2761 if (er32(MDEF(i)) == 0) { 2762 ew32(MDEF(i), (E1000_MDEF_PORT_623 | 2763 E1000_MDEF_PORT_664)); 2764 manc2h |= (1 << 1); 2765 j++; 2766 break; 2767 } 2768 2769 if (!j) 2770 e_warn("Unable to create IPMI pass-through filter\n"); 2771 break; 2772 } 2773 2774 ew32(MANC2H, manc2h); 2775 ew32(MANC, manc); 2776 } 2777 2778 /** 2779 * e1000_configure_tx - Configure Transmit Unit after Reset 2780 * @adapter: board private structure 2781 * 2782 * Configure the Tx unit of the MAC after a reset. 2783 **/ 2784 static void e1000_configure_tx(struct e1000_adapter *adapter) 2785 { 2786 struct e1000_hw *hw = &adapter->hw; 2787 struct e1000_ring *tx_ring = adapter->tx_ring; 2788 u64 tdba; 2789 u32 tdlen, tarc; 2790 2791 /* Setup the HW Tx Head and Tail descriptor pointers */ 2792 tdba = tx_ring->dma; 2793 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); 2794 ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); 2795 ew32(TDBAH, (tdba >> 32)); 2796 ew32(TDLEN, tdlen); 2797 ew32(TDH, 0); 2798 ew32(TDT, 0); 2799 tx_ring->head = adapter->hw.hw_addr + E1000_TDH; 2800 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT; 2801 2802 /* Set the Tx Interrupt Delay register */ 2803 ew32(TIDV, adapter->tx_int_delay); 2804 /* Tx irq moderation */ 2805 ew32(TADV, adapter->tx_abs_int_delay); 2806 2807 if (adapter->flags2 & FLAG2_DMA_BURST) { 2808 u32 txdctl = er32(TXDCTL(0)); 2809 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2810 E1000_TXDCTL_WTHRESH); 2811 /* 2812 * set up some performance related parameters to encourage the 2813 * hardware to use the bus more efficiently in bursts, depends 2814 * on the tx_int_delay to be enabled, 2815 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time 2816 * hthresh = 1 ==> prefetch when one or more available 2817 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2818 * BEWARE: this seems to work but should be considered first if 2819 * there are Tx hangs or other Tx related bugs 2820 */ 2821 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; 2822 ew32(TXDCTL(0), txdctl); 2823 } 2824 /* erratum work around: set txdctl the same for both queues */ 2825 ew32(TXDCTL(1), er32(TXDCTL(0))); 2826 2827 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2828 tarc = er32(TARC(0)); 2829 /* 2830 * set the speed mode bit, we'll clear it if we're not at 2831 * gigabit link later 2832 */ 2833 #define SPEED_MODE_BIT (1 << 21) 2834 tarc |= SPEED_MODE_BIT; 2835 ew32(TARC(0), tarc); 2836 } 2837 2838 /* errata: program both queues to unweighted RR */ 2839 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { 2840 tarc = er32(TARC(0)); 2841 tarc |= 1; 2842 ew32(TARC(0), tarc); 2843 tarc = er32(TARC(1)); 2844 tarc |= 1; 2845 ew32(TARC(1), tarc); 2846 } 2847 2848 /* Setup Transmit Descriptor Settings for eop descriptor */ 2849 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 2850 2851 /* only set IDE if we are delaying interrupts using the timers */ 2852 if (adapter->tx_int_delay) 2853 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2854 2855 /* enable Report Status bit */ 2856 adapter->txd_cmd |= E1000_TXD_CMD_RS; 2857 2858 hw->mac.ops.config_collision_dist(hw); 2859 } 2860 2861 /** 2862 * e1000_setup_rctl - configure the receive control registers 2863 * @adapter: Board private structure 2864 **/ 2865 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 2866 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 2867 static void e1000_setup_rctl(struct e1000_adapter *adapter) 2868 { 2869 struct e1000_hw *hw = &adapter->hw; 2870 u32 rctl, rfctl; 2871 u32 pages = 0; 2872 2873 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2874 if (hw->mac.type == e1000_pch2lan) { 2875 s32 ret_val; 2876 2877 if (adapter->netdev->mtu > ETH_DATA_LEN) 2878 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2879 else 2880 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2881 2882 if (ret_val) 2883 e_dbg("failed to enable jumbo frame workaround mode\n"); 2884 } 2885 2886 /* Program MC offset vector base */ 2887 rctl = er32(RCTL); 2888 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2889 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 2890 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 2891 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2892 2893 /* Do not Store bad packets */ 2894 rctl &= ~E1000_RCTL_SBP; 2895 2896 /* Enable Long Packet receive */ 2897 if (adapter->netdev->mtu <= ETH_DATA_LEN) 2898 rctl &= ~E1000_RCTL_LPE; 2899 else 2900 rctl |= E1000_RCTL_LPE; 2901 2902 /* Some systems expect that the CRC is included in SMBUS traffic. The 2903 * hardware strips the CRC before sending to both SMBUS (BMC) and to 2904 * host memory when this is enabled 2905 */ 2906 if (adapter->flags2 & FLAG2_CRC_STRIPPING) 2907 rctl |= E1000_RCTL_SECRC; 2908 2909 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ 2910 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { 2911 u16 phy_data; 2912 2913 e1e_rphy(hw, PHY_REG(770, 26), &phy_data); 2914 phy_data &= 0xfff8; 2915 phy_data |= (1 << 2); 2916 e1e_wphy(hw, PHY_REG(770, 26), phy_data); 2917 2918 e1e_rphy(hw, 22, &phy_data); 2919 phy_data &= 0x0fff; 2920 phy_data |= (1 << 14); 2921 e1e_wphy(hw, 0x10, 0x2823); 2922 e1e_wphy(hw, 0x11, 0x0003); 2923 e1e_wphy(hw, 22, phy_data); 2924 } 2925 2926 /* Setup buffer sizes */ 2927 rctl &= ~E1000_RCTL_SZ_4096; 2928 rctl |= E1000_RCTL_BSEX; 2929 switch (adapter->rx_buffer_len) { 2930 case 2048: 2931 default: 2932 rctl |= E1000_RCTL_SZ_2048; 2933 rctl &= ~E1000_RCTL_BSEX; 2934 break; 2935 case 4096: 2936 rctl |= E1000_RCTL_SZ_4096; 2937 break; 2938 case 8192: 2939 rctl |= E1000_RCTL_SZ_8192; 2940 break; 2941 case 16384: 2942 rctl |= E1000_RCTL_SZ_16384; 2943 break; 2944 } 2945 2946 /* Enable Extended Status in all Receive Descriptors */ 2947 rfctl = er32(RFCTL); 2948 rfctl |= E1000_RFCTL_EXTEN; 2949 2950 /* 2951 * 82571 and greater support packet-split where the protocol 2952 * header is placed in skb->data and the packet data is 2953 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 2954 * In the case of a non-split, skb->data is linearly filled, 2955 * followed by the page buffers. Therefore, skb->data is 2956 * sized to hold the largest protocol header. 2957 * 2958 * allocations using alloc_page take too long for regular MTU 2959 * so only enable packet split for jumbo frames 2960 * 2961 * Using pages when the page size is greater than 16k wastes 2962 * a lot of memory, since we allocate 3 pages at all times 2963 * per packet. 2964 */ 2965 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2966 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2967 adapter->rx_ps_pages = pages; 2968 else 2969 adapter->rx_ps_pages = 0; 2970 2971 if (adapter->rx_ps_pages) { 2972 u32 psrctl = 0; 2973 2974 /* 2975 * disable packet split support for IPv6 extension headers, 2976 * because some malformed IPv6 headers can hang the Rx 2977 */ 2978 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 2979 E1000_RFCTL_NEW_IPV6_EXT_DIS); 2980 2981 /* Enable Packet split descriptors */ 2982 rctl |= E1000_RCTL_DTYP_PS; 2983 2984 psrctl |= adapter->rx_ps_bsize0 >> 2985 E1000_PSRCTL_BSIZE0_SHIFT; 2986 2987 switch (adapter->rx_ps_pages) { 2988 case 3: 2989 psrctl |= PAGE_SIZE << 2990 E1000_PSRCTL_BSIZE3_SHIFT; 2991 case 2: 2992 psrctl |= PAGE_SIZE << 2993 E1000_PSRCTL_BSIZE2_SHIFT; 2994 case 1: 2995 psrctl |= PAGE_SIZE >> 2996 E1000_PSRCTL_BSIZE1_SHIFT; 2997 break; 2998 } 2999 3000 ew32(PSRCTL, psrctl); 3001 } 3002 3003 /* This is useful for sniffing bad packets. */ 3004 if (adapter->netdev->features & NETIF_F_RXALL) { 3005 /* UPE and MPE will be handled by normal PROMISC logic 3006 * in e1000e_set_rx_mode */ 3007 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3008 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3009 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3010 3011 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 3012 E1000_RCTL_DPF | /* Allow filtered pause */ 3013 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 3014 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 3015 * and that breaks VLANs. 3016 */ 3017 } 3018 3019 ew32(RFCTL, rfctl); 3020 ew32(RCTL, rctl); 3021 /* just started the receive unit, no need to restart */ 3022 adapter->flags &= ~FLAG_RX_RESTART_NOW; 3023 } 3024 3025 /** 3026 * e1000_configure_rx - Configure Receive Unit after Reset 3027 * @adapter: board private structure 3028 * 3029 * Configure the Rx unit of the MAC after a reset. 3030 **/ 3031 static void e1000_configure_rx(struct e1000_adapter *adapter) 3032 { 3033 struct e1000_hw *hw = &adapter->hw; 3034 struct e1000_ring *rx_ring = adapter->rx_ring; 3035 u64 rdba; 3036 u32 rdlen, rctl, rxcsum, ctrl_ext; 3037 3038 if (adapter->rx_ps_pages) { 3039 /* this is a 32 byte descriptor */ 3040 rdlen = rx_ring->count * 3041 sizeof(union e1000_rx_desc_packet_split); 3042 adapter->clean_rx = e1000_clean_rx_irq_ps; 3043 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 3044 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 3045 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3046 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 3047 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 3048 } else { 3049 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3050 adapter->clean_rx = e1000_clean_rx_irq; 3051 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 3052 } 3053 3054 /* disable receives while setting up the descriptors */ 3055 rctl = er32(RCTL); 3056 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3057 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3058 e1e_flush(); 3059 usleep_range(10000, 20000); 3060 3061 if (adapter->flags2 & FLAG2_DMA_BURST) { 3062 /* 3063 * set the writeback threshold (only takes effect if the RDTR 3064 * is set). set GRAN=1 and write back up to 0x4 worth, and 3065 * enable prefetching of 0x20 Rx descriptors 3066 * granularity = 01 3067 * wthresh = 04, 3068 * hthresh = 04, 3069 * pthresh = 0x20 3070 */ 3071 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); 3072 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); 3073 3074 /* 3075 * override the delay timers for enabling bursting, only if 3076 * the value was not set by the user via module options 3077 */ 3078 if (adapter->rx_int_delay == DEFAULT_RDTR) 3079 adapter->rx_int_delay = BURST_RDTR; 3080 if (adapter->rx_abs_int_delay == DEFAULT_RADV) 3081 adapter->rx_abs_int_delay = BURST_RADV; 3082 } 3083 3084 /* set the Receive Delay Timer Register */ 3085 ew32(RDTR, adapter->rx_int_delay); 3086 3087 /* irq moderation */ 3088 ew32(RADV, adapter->rx_abs_int_delay); 3089 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) 3090 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3091 3092 ctrl_ext = er32(CTRL_EXT); 3093 /* Auto-Mask interrupts upon ICR access */ 3094 ctrl_ext |= E1000_CTRL_EXT_IAME; 3095 ew32(IAM, 0xffffffff); 3096 ew32(CTRL_EXT, ctrl_ext); 3097 e1e_flush(); 3098 3099 /* 3100 * Setup the HW Rx Head and Tail Descriptor Pointers and 3101 * the Base and Length of the Rx Descriptor Ring 3102 */ 3103 rdba = rx_ring->dma; 3104 ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); 3105 ew32(RDBAH, (rdba >> 32)); 3106 ew32(RDLEN, rdlen); 3107 ew32(RDH, 0); 3108 ew32(RDT, 0); 3109 rx_ring->head = adapter->hw.hw_addr + E1000_RDH; 3110 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT; 3111 3112 /* Enable Receive Checksum Offload for TCP and UDP */ 3113 rxcsum = er32(RXCSUM); 3114 if (adapter->netdev->features & NETIF_F_RXCSUM) { 3115 rxcsum |= E1000_RXCSUM_TUOFL; 3116 3117 /* 3118 * IPv4 payload checksum for UDP fragments must be 3119 * used in conjunction with packet-split. 3120 */ 3121 if (adapter->rx_ps_pages) 3122 rxcsum |= E1000_RXCSUM_IPPCSE; 3123 } else { 3124 rxcsum &= ~E1000_RXCSUM_TUOFL; 3125 /* no need to clear IPPCSE as it defaults to 0 */ 3126 } 3127 ew32(RXCSUM, rxcsum); 3128 3129 if (adapter->hw.mac.type == e1000_pch2lan) { 3130 /* 3131 * With jumbo frames, excessive C-state transition 3132 * latencies result in dropped transactions. 3133 */ 3134 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3135 u32 rxdctl = er32(RXDCTL(0)); 3136 ew32(RXDCTL(0), rxdctl | 0x3); 3137 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); 3138 } else { 3139 pm_qos_update_request(&adapter->netdev->pm_qos_req, 3140 PM_QOS_DEFAULT_VALUE); 3141 } 3142 } 3143 3144 /* Enable Receives */ 3145 ew32(RCTL, rctl); 3146 } 3147 3148 /** 3149 * e1000e_write_mc_addr_list - write multicast addresses to MTA 3150 * @netdev: network interface device structure 3151 * 3152 * Writes multicast address list to the MTA hash table. 3153 * Returns: -ENOMEM on failure 3154 * 0 on no addresses written 3155 * X on writing X addresses to MTA 3156 */ 3157 static int e1000e_write_mc_addr_list(struct net_device *netdev) 3158 { 3159 struct e1000_adapter *adapter = netdev_priv(netdev); 3160 struct e1000_hw *hw = &adapter->hw; 3161 struct netdev_hw_addr *ha; 3162 u8 *mta_list; 3163 int i; 3164 3165 if (netdev_mc_empty(netdev)) { 3166 /* nothing to program, so clear mc list */ 3167 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); 3168 return 0; 3169 } 3170 3171 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); 3172 if (!mta_list) 3173 return -ENOMEM; 3174 3175 /* update_mc_addr_list expects a packed array of only addresses. */ 3176 i = 0; 3177 netdev_for_each_mc_addr(ha, netdev) 3178 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3179 3180 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); 3181 kfree(mta_list); 3182 3183 return netdev_mc_count(netdev); 3184 } 3185 3186 /** 3187 * e1000e_write_uc_addr_list - write unicast addresses to RAR table 3188 * @netdev: network interface device structure 3189 * 3190 * Writes unicast address list to the RAR table. 3191 * Returns: -ENOMEM on failure/insufficient address space 3192 * 0 on no addresses written 3193 * X on writing X addresses to the RAR table 3194 **/ 3195 static int e1000e_write_uc_addr_list(struct net_device *netdev) 3196 { 3197 struct e1000_adapter *adapter = netdev_priv(netdev); 3198 struct e1000_hw *hw = &adapter->hw; 3199 unsigned int rar_entries = hw->mac.rar_entry_count; 3200 int count = 0; 3201 3202 /* save a rar entry for our hardware address */ 3203 rar_entries--; 3204 3205 /* save a rar entry for the LAA workaround */ 3206 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) 3207 rar_entries--; 3208 3209 /* return ENOMEM indicating insufficient memory for addresses */ 3210 if (netdev_uc_count(netdev) > rar_entries) 3211 return -ENOMEM; 3212 3213 if (!netdev_uc_empty(netdev) && rar_entries) { 3214 struct netdev_hw_addr *ha; 3215 3216 /* 3217 * write the addresses in reverse order to avoid write 3218 * combining 3219 */ 3220 netdev_for_each_uc_addr(ha, netdev) { 3221 if (!rar_entries) 3222 break; 3223 e1000e_rar_set(hw, ha->addr, rar_entries--); 3224 count++; 3225 } 3226 } 3227 3228 /* zero out the remaining RAR entries not used above */ 3229 for (; rar_entries > 0; rar_entries--) { 3230 ew32(RAH(rar_entries), 0); 3231 ew32(RAL(rar_entries), 0); 3232 } 3233 e1e_flush(); 3234 3235 return count; 3236 } 3237 3238 /** 3239 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set 3240 * @netdev: network interface device structure 3241 * 3242 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast 3243 * address list or the network interface flags are updated. This routine is 3244 * responsible for configuring the hardware for proper unicast, multicast, 3245 * promiscuous mode, and all-multi behavior. 3246 **/ 3247 static void e1000e_set_rx_mode(struct net_device *netdev) 3248 { 3249 struct e1000_adapter *adapter = netdev_priv(netdev); 3250 struct e1000_hw *hw = &adapter->hw; 3251 u32 rctl; 3252 3253 /* Check for Promiscuous and All Multicast modes */ 3254 rctl = er32(RCTL); 3255 3256 /* clear the affected bits */ 3257 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); 3258 3259 if (netdev->flags & IFF_PROMISC) { 3260 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3261 /* Do not hardware filter VLANs in promisc mode */ 3262 e1000e_vlan_filter_disable(adapter); 3263 } else { 3264 int count; 3265 3266 if (netdev->flags & IFF_ALLMULTI) { 3267 rctl |= E1000_RCTL_MPE; 3268 } else { 3269 /* 3270 * Write addresses to the MTA, if the attempt fails 3271 * then we should just turn on promiscuous mode so 3272 * that we can at least receive multicast traffic 3273 */ 3274 count = e1000e_write_mc_addr_list(netdev); 3275 if (count < 0) 3276 rctl |= E1000_RCTL_MPE; 3277 } 3278 e1000e_vlan_filter_enable(adapter); 3279 /* 3280 * Write addresses to available RAR registers, if there is not 3281 * sufficient space to store all the addresses then enable 3282 * unicast promiscuous mode 3283 */ 3284 count = e1000e_write_uc_addr_list(netdev); 3285 if (count < 0) 3286 rctl |= E1000_RCTL_UPE; 3287 } 3288 3289 ew32(RCTL, rctl); 3290 3291 if (netdev->features & NETIF_F_HW_VLAN_RX) 3292 e1000e_vlan_strip_enable(adapter); 3293 else 3294 e1000e_vlan_strip_disable(adapter); 3295 } 3296 3297 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) 3298 { 3299 struct e1000_hw *hw = &adapter->hw; 3300 u32 mrqc, rxcsum; 3301 int i; 3302 static const u32 rsskey[10] = { 3303 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0, 3304 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe 3305 }; 3306 3307 /* Fill out hash function seed */ 3308 for (i = 0; i < 10; i++) 3309 ew32(RSSRK(i), rsskey[i]); 3310 3311 /* Direct all traffic to queue 0 */ 3312 for (i = 0; i < 32; i++) 3313 ew32(RETA(i), 0); 3314 3315 /* 3316 * Disable raw packet checksumming so that RSS hash is placed in 3317 * descriptor on writeback. 3318 */ 3319 rxcsum = er32(RXCSUM); 3320 rxcsum |= E1000_RXCSUM_PCSD; 3321 3322 ew32(RXCSUM, rxcsum); 3323 3324 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | 3325 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3326 E1000_MRQC_RSS_FIELD_IPV6 | 3327 E1000_MRQC_RSS_FIELD_IPV6_TCP | 3328 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 3329 3330 ew32(MRQC, mrqc); 3331 } 3332 3333 /** 3334 * e1000_configure - configure the hardware for Rx and Tx 3335 * @adapter: private board structure 3336 **/ 3337 static void e1000_configure(struct e1000_adapter *adapter) 3338 { 3339 struct e1000_ring *rx_ring = adapter->rx_ring; 3340 3341 e1000e_set_rx_mode(adapter->netdev); 3342 3343 e1000_restore_vlan(adapter); 3344 e1000_init_manageability_pt(adapter); 3345 3346 e1000_configure_tx(adapter); 3347 3348 if (adapter->netdev->features & NETIF_F_RXHASH) 3349 e1000e_setup_rss_hash(adapter); 3350 e1000_setup_rctl(adapter); 3351 e1000_configure_rx(adapter); 3352 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); 3353 } 3354 3355 /** 3356 * e1000e_power_up_phy - restore link in case the phy was powered down 3357 * @adapter: address of board private structure 3358 * 3359 * The phy may be powered down to save power and turn off link when the 3360 * driver is unloaded and wake on lan is not enabled (among others) 3361 * *** this routine MUST be followed by a call to e1000e_reset *** 3362 **/ 3363 void e1000e_power_up_phy(struct e1000_adapter *adapter) 3364 { 3365 if (adapter->hw.phy.ops.power_up) 3366 adapter->hw.phy.ops.power_up(&adapter->hw); 3367 3368 adapter->hw.mac.ops.setup_link(&adapter->hw); 3369 } 3370 3371 /** 3372 * e1000_power_down_phy - Power down the PHY 3373 * 3374 * Power down the PHY so no link is implied when interface is down. 3375 * The PHY cannot be powered down if management or WoL is active. 3376 */ 3377 static void e1000_power_down_phy(struct e1000_adapter *adapter) 3378 { 3379 /* WoL is enabled */ 3380 if (adapter->wol) 3381 return; 3382 3383 if (adapter->hw.phy.ops.power_down) 3384 adapter->hw.phy.ops.power_down(&adapter->hw); 3385 } 3386 3387 /** 3388 * e1000e_reset - bring the hardware into a known good state 3389 * 3390 * This function boots the hardware and enables some settings that 3391 * require a configuration cycle of the hardware - those cannot be 3392 * set/changed during runtime. After reset the device needs to be 3393 * properly configured for Rx, Tx etc. 3394 */ 3395 void e1000e_reset(struct e1000_adapter *adapter) 3396 { 3397 struct e1000_mac_info *mac = &adapter->hw.mac; 3398 struct e1000_fc_info *fc = &adapter->hw.fc; 3399 struct e1000_hw *hw = &adapter->hw; 3400 u32 tx_space, min_tx_space, min_rx_space; 3401 u32 pba = adapter->pba; 3402 u16 hwm; 3403 3404 /* reset Packet Buffer Allocation to default */ 3405 ew32(PBA, pba); 3406 3407 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 3408 /* 3409 * To maintain wire speed transmits, the Tx FIFO should be 3410 * large enough to accommodate two full transmit packets, 3411 * rounded up to the next 1KB and expressed in KB. Likewise, 3412 * the Rx FIFO should be large enough to accommodate at least 3413 * one full receive packet and is similarly rounded up and 3414 * expressed in KB. 3415 */ 3416 pba = er32(PBA); 3417 /* upper 16 bits has Tx packet buffer allocation size in KB */ 3418 tx_space = pba >> 16; 3419 /* lower 16 bits has Rx packet buffer allocation size in KB */ 3420 pba &= 0xffff; 3421 /* 3422 * the Tx fifo also stores 16 bytes of information about the Tx 3423 * but don't include ethernet FCS because hardware appends it 3424 */ 3425 min_tx_space = (adapter->max_frame_size + 3426 sizeof(struct e1000_tx_desc) - 3427 ETH_FCS_LEN) * 2; 3428 min_tx_space = ALIGN(min_tx_space, 1024); 3429 min_tx_space >>= 10; 3430 /* software strips receive CRC, so leave room for it */ 3431 min_rx_space = adapter->max_frame_size; 3432 min_rx_space = ALIGN(min_rx_space, 1024); 3433 min_rx_space >>= 10; 3434 3435 /* 3436 * If current Tx allocation is less than the min Tx FIFO size, 3437 * and the min Tx FIFO size is less than the current Rx FIFO 3438 * allocation, take space away from current Rx allocation 3439 */ 3440 if ((tx_space < min_tx_space) && 3441 ((min_tx_space - tx_space) < pba)) { 3442 pba -= min_tx_space - tx_space; 3443 3444 /* 3445 * if short on Rx space, Rx wins and must trump Tx 3446 * adjustment or use Early Receive if available 3447 */ 3448 if (pba < min_rx_space) 3449 pba = min_rx_space; 3450 } 3451 3452 ew32(PBA, pba); 3453 } 3454 3455 /* 3456 * flow control settings 3457 * 3458 * The high water mark must be low enough to fit one full frame 3459 * (or the size used for early receive) above it in the Rx FIFO. 3460 * Set it to the lower of: 3461 * - 90% of the Rx FIFO size, and 3462 * - the full Rx FIFO size minus one full frame 3463 */ 3464 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 3465 fc->pause_time = 0xFFFF; 3466 else 3467 fc->pause_time = E1000_FC_PAUSE_TIME; 3468 fc->send_xon = true; 3469 fc->current_mode = fc->requested_mode; 3470 3471 switch (hw->mac.type) { 3472 case e1000_ich9lan: 3473 case e1000_ich10lan: 3474 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3475 pba = 14; 3476 ew32(PBA, pba); 3477 fc->high_water = 0x2800; 3478 fc->low_water = fc->high_water - 8; 3479 break; 3480 } 3481 /* fall-through */ 3482 default: 3483 hwm = min(((pba << 10) * 9 / 10), 3484 ((pba << 10) - adapter->max_frame_size)); 3485 3486 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 3487 fc->low_water = fc->high_water - 8; 3488 break; 3489 case e1000_pchlan: 3490 /* 3491 * Workaround PCH LOM adapter hangs with certain network 3492 * loads. If hangs persist, try disabling Tx flow control. 3493 */ 3494 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3495 fc->high_water = 0x3500; 3496 fc->low_water = 0x1500; 3497 } else { 3498 fc->high_water = 0x5000; 3499 fc->low_water = 0x3000; 3500 } 3501 fc->refresh_time = 0x1000; 3502 break; 3503 case e1000_pch2lan: 3504 fc->high_water = 0x05C20; 3505 fc->low_water = 0x05048; 3506 fc->pause_time = 0x0650; 3507 fc->refresh_time = 0x0400; 3508 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3509 pba = 14; 3510 ew32(PBA, pba); 3511 } 3512 break; 3513 } 3514 3515 /* 3516 * Disable Adaptive Interrupt Moderation if 2 full packets cannot 3517 * fit in receive buffer. 3518 */ 3519 if (adapter->itr_setting & 0x3) { 3520 if ((adapter->max_frame_size * 2) > (pba << 10)) { 3521 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { 3522 dev_info(&adapter->pdev->dev, 3523 "Interrupt Throttle Rate turned off\n"); 3524 adapter->flags2 |= FLAG2_DISABLE_AIM; 3525 ew32(ITR, 0); 3526 } 3527 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { 3528 dev_info(&adapter->pdev->dev, 3529 "Interrupt Throttle Rate turned on\n"); 3530 adapter->flags2 &= ~FLAG2_DISABLE_AIM; 3531 adapter->itr = 20000; 3532 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3533 } 3534 } 3535 3536 /* Allow time for pending master requests to run */ 3537 mac->ops.reset_hw(hw); 3538 3539 /* 3540 * For parts with AMT enabled, let the firmware know 3541 * that the network interface is in control 3542 */ 3543 if (adapter->flags & FLAG_HAS_AMT) 3544 e1000e_get_hw_control(adapter); 3545 3546 ew32(WUC, 0); 3547 3548 if (mac->ops.init_hw(hw)) 3549 e_err("Hardware Error\n"); 3550 3551 e1000_update_mng_vlan(adapter); 3552 3553 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 3554 ew32(VET, ETH_P_8021Q); 3555 3556 e1000e_reset_adaptive(hw); 3557 3558 if (!netif_running(adapter->netdev) && 3559 !test_bit(__E1000_TESTING, &adapter->state)) { 3560 e1000_power_down_phy(adapter); 3561 return; 3562 } 3563 3564 e1000_get_phy_info(hw); 3565 3566 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 3567 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { 3568 u16 phy_data = 0; 3569 /* 3570 * speed up time to link by disabling smart power down, ignore 3571 * the return value of this function because there is nothing 3572 * different we would do if it failed 3573 */ 3574 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 3575 phy_data &= ~IGP02E1000_PM_SPD; 3576 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 3577 } 3578 } 3579 3580 int e1000e_up(struct e1000_adapter *adapter) 3581 { 3582 struct e1000_hw *hw = &adapter->hw; 3583 3584 /* hardware has been reset, we need to reload some things */ 3585 e1000_configure(adapter); 3586 3587 clear_bit(__E1000_DOWN, &adapter->state); 3588 3589 if (adapter->msix_entries) 3590 e1000_configure_msix(adapter); 3591 e1000_irq_enable(adapter); 3592 3593 netif_start_queue(adapter->netdev); 3594 3595 /* fire a link change interrupt to start the watchdog */ 3596 if (adapter->msix_entries) 3597 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); 3598 else 3599 ew32(ICS, E1000_ICS_LSC); 3600 3601 return 0; 3602 } 3603 3604 static void e1000e_flush_descriptors(struct e1000_adapter *adapter) 3605 { 3606 struct e1000_hw *hw = &adapter->hw; 3607 3608 if (!(adapter->flags2 & FLAG2_DMA_BURST)) 3609 return; 3610 3611 /* flush pending descriptor writebacks to memory */ 3612 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 3613 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 3614 3615 /* execute the writes immediately */ 3616 e1e_flush(); 3617 } 3618 3619 static void e1000e_update_stats(struct e1000_adapter *adapter); 3620 3621 void e1000e_down(struct e1000_adapter *adapter) 3622 { 3623 struct net_device *netdev = adapter->netdev; 3624 struct e1000_hw *hw = &adapter->hw; 3625 u32 tctl, rctl; 3626 3627 /* 3628 * signal that we're down so the interrupt handler does not 3629 * reschedule our watchdog timer 3630 */ 3631 set_bit(__E1000_DOWN, &adapter->state); 3632 3633 /* disable receives in the hardware */ 3634 rctl = er32(RCTL); 3635 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3636 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3637 /* flush and sleep below */ 3638 3639 netif_stop_queue(netdev); 3640 3641 /* disable transmits in the hardware */ 3642 tctl = er32(TCTL); 3643 tctl &= ~E1000_TCTL_EN; 3644 ew32(TCTL, tctl); 3645 3646 /* flush both disables and wait for them to finish */ 3647 e1e_flush(); 3648 usleep_range(10000, 20000); 3649 3650 e1000_irq_disable(adapter); 3651 3652 del_timer_sync(&adapter->watchdog_timer); 3653 del_timer_sync(&adapter->phy_info_timer); 3654 3655 netif_carrier_off(netdev); 3656 3657 spin_lock(&adapter->stats64_lock); 3658 e1000e_update_stats(adapter); 3659 spin_unlock(&adapter->stats64_lock); 3660 3661 e1000e_flush_descriptors(adapter); 3662 e1000_clean_tx_ring(adapter->tx_ring); 3663 e1000_clean_rx_ring(adapter->rx_ring); 3664 3665 adapter->link_speed = 0; 3666 adapter->link_duplex = 0; 3667 3668 if (!pci_channel_offline(adapter->pdev)) 3669 e1000e_reset(adapter); 3670 3671 /* 3672 * TODO: for power management, we could drop the link and 3673 * pci_disable_device here. 3674 */ 3675 } 3676 3677 void e1000e_reinit_locked(struct e1000_adapter *adapter) 3678 { 3679 might_sleep(); 3680 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 3681 usleep_range(1000, 2000); 3682 e1000e_down(adapter); 3683 e1000e_up(adapter); 3684 clear_bit(__E1000_RESETTING, &adapter->state); 3685 } 3686 3687 /** 3688 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 3689 * @adapter: board private structure to initialize 3690 * 3691 * e1000_sw_init initializes the Adapter private data structure. 3692 * Fields are initialized based on PCI device information and 3693 * OS network device settings (MTU size). 3694 **/ 3695 static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 3696 { 3697 struct net_device *netdev = adapter->netdev; 3698 3699 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 3700 adapter->rx_ps_bsize0 = 128; 3701 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3702 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3703 adapter->tx_ring_count = E1000_DEFAULT_TXD; 3704 adapter->rx_ring_count = E1000_DEFAULT_RXD; 3705 3706 spin_lock_init(&adapter->stats64_lock); 3707 3708 e1000e_set_interrupt_capability(adapter); 3709 3710 if (e1000_alloc_queues(adapter)) 3711 return -ENOMEM; 3712 3713 /* Explicitly disable IRQ since the NIC can be in any state. */ 3714 e1000_irq_disable(adapter); 3715 3716 set_bit(__E1000_DOWN, &adapter->state); 3717 return 0; 3718 } 3719 3720 /** 3721 * e1000_intr_msi_test - Interrupt Handler 3722 * @irq: interrupt number 3723 * @data: pointer to a network interface device structure 3724 **/ 3725 static irqreturn_t e1000_intr_msi_test(int irq, void *data) 3726 { 3727 struct net_device *netdev = data; 3728 struct e1000_adapter *adapter = netdev_priv(netdev); 3729 struct e1000_hw *hw = &adapter->hw; 3730 u32 icr = er32(ICR); 3731 3732 e_dbg("icr is %08X\n", icr); 3733 if (icr & E1000_ICR_RXSEQ) { 3734 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 3735 wmb(); 3736 } 3737 3738 return IRQ_HANDLED; 3739 } 3740 3741 /** 3742 * e1000_test_msi_interrupt - Returns 0 for successful test 3743 * @adapter: board private struct 3744 * 3745 * code flow taken from tg3.c 3746 **/ 3747 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) 3748 { 3749 struct net_device *netdev = adapter->netdev; 3750 struct e1000_hw *hw = &adapter->hw; 3751 int err; 3752 3753 /* poll_enable hasn't been called yet, so don't need disable */ 3754 /* clear any pending events */ 3755 er32(ICR); 3756 3757 /* free the real vector and request a test handler */ 3758 e1000_free_irq(adapter); 3759 e1000e_reset_interrupt_capability(adapter); 3760 3761 /* Assume that the test fails, if it succeeds then the test 3762 * MSI irq handler will unset this flag */ 3763 adapter->flags |= FLAG_MSI_TEST_FAILED; 3764 3765 err = pci_enable_msi(adapter->pdev); 3766 if (err) 3767 goto msi_test_failed; 3768 3769 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, 3770 netdev->name, netdev); 3771 if (err) { 3772 pci_disable_msi(adapter->pdev); 3773 goto msi_test_failed; 3774 } 3775 3776 wmb(); 3777 3778 e1000_irq_enable(adapter); 3779 3780 /* fire an unusual interrupt on the test handler */ 3781 ew32(ICS, E1000_ICS_RXSEQ); 3782 e1e_flush(); 3783 msleep(50); 3784 3785 e1000_irq_disable(adapter); 3786 3787 rmb(); 3788 3789 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 3790 adapter->int_mode = E1000E_INT_MODE_LEGACY; 3791 e_info("MSI interrupt test failed, using legacy interrupt.\n"); 3792 } else { 3793 e_dbg("MSI interrupt test succeeded!\n"); 3794 } 3795 3796 free_irq(adapter->pdev->irq, netdev); 3797 pci_disable_msi(adapter->pdev); 3798 3799 msi_test_failed: 3800 e1000e_set_interrupt_capability(adapter); 3801 return e1000_request_irq(adapter); 3802 } 3803 3804 /** 3805 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored 3806 * @adapter: board private struct 3807 * 3808 * code flow taken from tg3.c, called with e1000 interrupts disabled. 3809 **/ 3810 static int e1000_test_msi(struct e1000_adapter *adapter) 3811 { 3812 int err; 3813 u16 pci_cmd; 3814 3815 if (!(adapter->flags & FLAG_MSI_ENABLED)) 3816 return 0; 3817 3818 /* disable SERR in case the MSI write causes a master abort */ 3819 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3820 if (pci_cmd & PCI_COMMAND_SERR) 3821 pci_write_config_word(adapter->pdev, PCI_COMMAND, 3822 pci_cmd & ~PCI_COMMAND_SERR); 3823 3824 err = e1000_test_msi_interrupt(adapter); 3825 3826 /* re-enable SERR */ 3827 if (pci_cmd & PCI_COMMAND_SERR) { 3828 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3829 pci_cmd |= PCI_COMMAND_SERR; 3830 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 3831 } 3832 3833 return err; 3834 } 3835 3836 /** 3837 * e1000_open - Called when a network interface is made active 3838 * @netdev: network interface device structure 3839 * 3840 * Returns 0 on success, negative value on failure 3841 * 3842 * The open entry point is called when a network interface is made 3843 * active by the system (IFF_UP). At this point all resources needed 3844 * for transmit and receive operations are allocated, the interrupt 3845 * handler is registered with the OS, the watchdog timer is started, 3846 * and the stack is notified that the interface is ready. 3847 **/ 3848 static int e1000_open(struct net_device *netdev) 3849 { 3850 struct e1000_adapter *adapter = netdev_priv(netdev); 3851 struct e1000_hw *hw = &adapter->hw; 3852 struct pci_dev *pdev = adapter->pdev; 3853 int err; 3854 3855 /* disallow open during test */ 3856 if (test_bit(__E1000_TESTING, &adapter->state)) 3857 return -EBUSY; 3858 3859 pm_runtime_get_sync(&pdev->dev); 3860 3861 netif_carrier_off(netdev); 3862 3863 /* allocate transmit descriptors */ 3864 err = e1000e_setup_tx_resources(adapter->tx_ring); 3865 if (err) 3866 goto err_setup_tx; 3867 3868 /* allocate receive descriptors */ 3869 err = e1000e_setup_rx_resources(adapter->rx_ring); 3870 if (err) 3871 goto err_setup_rx; 3872 3873 /* 3874 * If AMT is enabled, let the firmware know that the network 3875 * interface is now open and reset the part to a known state. 3876 */ 3877 if (adapter->flags & FLAG_HAS_AMT) { 3878 e1000e_get_hw_control(adapter); 3879 e1000e_reset(adapter); 3880 } 3881 3882 e1000e_power_up_phy(adapter); 3883 3884 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 3885 if ((adapter->hw.mng_cookie.status & 3886 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 3887 e1000_update_mng_vlan(adapter); 3888 3889 /* DMA latency requirement to workaround jumbo issue */ 3890 if (adapter->hw.mac.type == e1000_pch2lan) 3891 pm_qos_add_request(&adapter->netdev->pm_qos_req, 3892 PM_QOS_CPU_DMA_LATENCY, 3893 PM_QOS_DEFAULT_VALUE); 3894 3895 /* 3896 * before we allocate an interrupt, we must be ready to handle it. 3897 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3898 * as soon as we call pci_request_irq, so we have to setup our 3899 * clean_rx handler before we do so. 3900 */ 3901 e1000_configure(adapter); 3902 3903 err = e1000_request_irq(adapter); 3904 if (err) 3905 goto err_req_irq; 3906 3907 /* 3908 * Work around PCIe errata with MSI interrupts causing some chipsets to 3909 * ignore e1000e MSI messages, which means we need to test our MSI 3910 * interrupt now 3911 */ 3912 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { 3913 err = e1000_test_msi(adapter); 3914 if (err) { 3915 e_err("Interrupt allocation failed\n"); 3916 goto err_req_irq; 3917 } 3918 } 3919 3920 /* From here on the code is the same as e1000e_up() */ 3921 clear_bit(__E1000_DOWN, &adapter->state); 3922 3923 napi_enable(&adapter->napi); 3924 3925 e1000_irq_enable(adapter); 3926 3927 adapter->tx_hang_recheck = false; 3928 netif_start_queue(netdev); 3929 3930 adapter->idle_check = true; 3931 pm_runtime_put(&pdev->dev); 3932 3933 /* fire a link status change interrupt to start the watchdog */ 3934 if (adapter->msix_entries) 3935 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); 3936 else 3937 ew32(ICS, E1000_ICS_LSC); 3938 3939 return 0; 3940 3941 err_req_irq: 3942 e1000e_release_hw_control(adapter); 3943 e1000_power_down_phy(adapter); 3944 e1000e_free_rx_resources(adapter->rx_ring); 3945 err_setup_rx: 3946 e1000e_free_tx_resources(adapter->tx_ring); 3947 err_setup_tx: 3948 e1000e_reset(adapter); 3949 pm_runtime_put_sync(&pdev->dev); 3950 3951 return err; 3952 } 3953 3954 /** 3955 * e1000_close - Disables a network interface 3956 * @netdev: network interface device structure 3957 * 3958 * Returns 0, this is not allowed to fail 3959 * 3960 * The close entry point is called when an interface is de-activated 3961 * by the OS. The hardware is still under the drivers control, but 3962 * needs to be disabled. A global MAC reset is issued to stop the 3963 * hardware, and all transmit and receive resources are freed. 3964 **/ 3965 static int e1000_close(struct net_device *netdev) 3966 { 3967 struct e1000_adapter *adapter = netdev_priv(netdev); 3968 struct pci_dev *pdev = adapter->pdev; 3969 3970 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3971 3972 pm_runtime_get_sync(&pdev->dev); 3973 3974 napi_disable(&adapter->napi); 3975 3976 if (!test_bit(__E1000_DOWN, &adapter->state)) { 3977 e1000e_down(adapter); 3978 e1000_free_irq(adapter); 3979 } 3980 e1000_power_down_phy(adapter); 3981 3982 e1000e_free_tx_resources(adapter->tx_ring); 3983 e1000e_free_rx_resources(adapter->rx_ring); 3984 3985 /* 3986 * kill manageability vlan ID if supported, but not if a vlan with 3987 * the same ID is registered on the host OS (let 8021q kill it) 3988 */ 3989 if (adapter->hw.mng_cookie.status & 3990 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) 3991 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 3992 3993 /* 3994 * If AMT is enabled, let the firmware know that the network 3995 * interface is now closed 3996 */ 3997 if ((adapter->flags & FLAG_HAS_AMT) && 3998 !test_bit(__E1000_TESTING, &adapter->state)) 3999 e1000e_release_hw_control(adapter); 4000 4001 if (adapter->hw.mac.type == e1000_pch2lan) 4002 pm_qos_remove_request(&adapter->netdev->pm_qos_req); 4003 4004 pm_runtime_put_sync(&pdev->dev); 4005 4006 return 0; 4007 } 4008 /** 4009 * e1000_set_mac - Change the Ethernet Address of the NIC 4010 * @netdev: network interface device structure 4011 * @p: pointer to an address structure 4012 * 4013 * Returns 0 on success, negative on failure 4014 **/ 4015 static int e1000_set_mac(struct net_device *netdev, void *p) 4016 { 4017 struct e1000_adapter *adapter = netdev_priv(netdev); 4018 struct sockaddr *addr = p; 4019 4020 if (!is_valid_ether_addr(addr->sa_data)) 4021 return -EADDRNOTAVAIL; 4022 4023 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 4024 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 4025 4026 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 4027 4028 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { 4029 /* activate the work around */ 4030 e1000e_set_laa_state_82571(&adapter->hw, 1); 4031 4032 /* 4033 * Hold a copy of the LAA in RAR[14] This is done so that 4034 * between the time RAR[0] gets clobbered and the time it 4035 * gets fixed (in e1000_watchdog), the actual LAA is in one 4036 * of the RARs and no incoming packets directed to this port 4037 * are dropped. Eventually the LAA will be in RAR[0] and 4038 * RAR[14] 4039 */ 4040 e1000e_rar_set(&adapter->hw, 4041 adapter->hw.mac.addr, 4042 adapter->hw.mac.rar_entry_count - 1); 4043 } 4044 4045 return 0; 4046 } 4047 4048 /** 4049 * e1000e_update_phy_task - work thread to update phy 4050 * @work: pointer to our work struct 4051 * 4052 * this worker thread exists because we must acquire a 4053 * semaphore to read the phy, which we could msleep while 4054 * waiting for it, and we can't msleep in a timer. 4055 **/ 4056 static void e1000e_update_phy_task(struct work_struct *work) 4057 { 4058 struct e1000_adapter *adapter = container_of(work, 4059 struct e1000_adapter, update_phy_task); 4060 4061 if (test_bit(__E1000_DOWN, &adapter->state)) 4062 return; 4063 4064 e1000_get_phy_info(&adapter->hw); 4065 } 4066 4067 /* 4068 * Need to wait a few seconds after link up to get diagnostic information from 4069 * the phy 4070 */ 4071 static void e1000_update_phy_info(unsigned long data) 4072 { 4073 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4074 4075 if (test_bit(__E1000_DOWN, &adapter->state)) 4076 return; 4077 4078 schedule_work(&adapter->update_phy_task); 4079 } 4080 4081 /** 4082 * e1000e_update_phy_stats - Update the PHY statistics counters 4083 * @adapter: board private structure 4084 * 4085 * Read/clear the upper 16-bit PHY registers and read/accumulate lower 4086 **/ 4087 static void e1000e_update_phy_stats(struct e1000_adapter *adapter) 4088 { 4089 struct e1000_hw *hw = &adapter->hw; 4090 s32 ret_val; 4091 u16 phy_data; 4092 4093 ret_val = hw->phy.ops.acquire(hw); 4094 if (ret_val) 4095 return; 4096 4097 /* 4098 * A page set is expensive so check if already on desired page. 4099 * If not, set to the page with the PHY status registers. 4100 */ 4101 hw->phy.addr = 1; 4102 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 4103 &phy_data); 4104 if (ret_val) 4105 goto release; 4106 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { 4107 ret_val = hw->phy.ops.set_page(hw, 4108 HV_STATS_PAGE << IGP_PAGE_SHIFT); 4109 if (ret_val) 4110 goto release; 4111 } 4112 4113 /* Single Collision Count */ 4114 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 4115 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 4116 if (!ret_val) 4117 adapter->stats.scc += phy_data; 4118 4119 /* Excessive Collision Count */ 4120 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 4121 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 4122 if (!ret_val) 4123 adapter->stats.ecol += phy_data; 4124 4125 /* Multiple Collision Count */ 4126 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 4127 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 4128 if (!ret_val) 4129 adapter->stats.mcc += phy_data; 4130 4131 /* Late Collision Count */ 4132 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 4133 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 4134 if (!ret_val) 4135 adapter->stats.latecol += phy_data; 4136 4137 /* Collision Count - also used for adaptive IFS */ 4138 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 4139 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 4140 if (!ret_val) 4141 hw->mac.collision_delta = phy_data; 4142 4143 /* Defer Count */ 4144 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 4145 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 4146 if (!ret_val) 4147 adapter->stats.dc += phy_data; 4148 4149 /* Transmit with no CRS */ 4150 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 4151 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 4152 if (!ret_val) 4153 adapter->stats.tncrs += phy_data; 4154 4155 release: 4156 hw->phy.ops.release(hw); 4157 } 4158 4159 /** 4160 * e1000e_update_stats - Update the board statistics counters 4161 * @adapter: board private structure 4162 **/ 4163 static void e1000e_update_stats(struct e1000_adapter *adapter) 4164 { 4165 struct net_device *netdev = adapter->netdev; 4166 struct e1000_hw *hw = &adapter->hw; 4167 struct pci_dev *pdev = adapter->pdev; 4168 4169 /* 4170 * Prevent stats update while adapter is being reset, or if the pci 4171 * connection is down. 4172 */ 4173 if (adapter->link_speed == 0) 4174 return; 4175 if (pci_channel_offline(pdev)) 4176 return; 4177 4178 adapter->stats.crcerrs += er32(CRCERRS); 4179 adapter->stats.gprc += er32(GPRC); 4180 adapter->stats.gorc += er32(GORCL); 4181 er32(GORCH); /* Clear gorc */ 4182 adapter->stats.bprc += er32(BPRC); 4183 adapter->stats.mprc += er32(MPRC); 4184 adapter->stats.roc += er32(ROC); 4185 4186 adapter->stats.mpc += er32(MPC); 4187 4188 /* Half-duplex statistics */ 4189 if (adapter->link_duplex == HALF_DUPLEX) { 4190 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { 4191 e1000e_update_phy_stats(adapter); 4192 } else { 4193 adapter->stats.scc += er32(SCC); 4194 adapter->stats.ecol += er32(ECOL); 4195 adapter->stats.mcc += er32(MCC); 4196 adapter->stats.latecol += er32(LATECOL); 4197 adapter->stats.dc += er32(DC); 4198 4199 hw->mac.collision_delta = er32(COLC); 4200 4201 if ((hw->mac.type != e1000_82574) && 4202 (hw->mac.type != e1000_82583)) 4203 adapter->stats.tncrs += er32(TNCRS); 4204 } 4205 adapter->stats.colc += hw->mac.collision_delta; 4206 } 4207 4208 adapter->stats.xonrxc += er32(XONRXC); 4209 adapter->stats.xontxc += er32(XONTXC); 4210 adapter->stats.xoffrxc += er32(XOFFRXC); 4211 adapter->stats.xofftxc += er32(XOFFTXC); 4212 adapter->stats.gptc += er32(GPTC); 4213 adapter->stats.gotc += er32(GOTCL); 4214 er32(GOTCH); /* Clear gotc */ 4215 adapter->stats.rnbc += er32(RNBC); 4216 adapter->stats.ruc += er32(RUC); 4217 4218 adapter->stats.mptc += er32(MPTC); 4219 adapter->stats.bptc += er32(BPTC); 4220 4221 /* used for adaptive IFS */ 4222 4223 hw->mac.tx_packet_delta = er32(TPT); 4224 adapter->stats.tpt += hw->mac.tx_packet_delta; 4225 4226 adapter->stats.algnerrc += er32(ALGNERRC); 4227 adapter->stats.rxerrc += er32(RXERRC); 4228 adapter->stats.cexterr += er32(CEXTERR); 4229 adapter->stats.tsctc += er32(TSCTC); 4230 adapter->stats.tsctfc += er32(TSCTFC); 4231 4232 /* Fill out the OS statistics structure */ 4233 netdev->stats.multicast = adapter->stats.mprc; 4234 netdev->stats.collisions = adapter->stats.colc; 4235 4236 /* Rx Errors */ 4237 4238 /* 4239 * RLEC on some newer hardware can be incorrect so build 4240 * our own version based on RUC and ROC 4241 */ 4242 netdev->stats.rx_errors = adapter->stats.rxerrc + 4243 adapter->stats.crcerrs + adapter->stats.algnerrc + 4244 adapter->stats.ruc + adapter->stats.roc + 4245 adapter->stats.cexterr; 4246 netdev->stats.rx_length_errors = adapter->stats.ruc + 4247 adapter->stats.roc; 4248 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 4249 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 4250 netdev->stats.rx_missed_errors = adapter->stats.mpc; 4251 4252 /* Tx Errors */ 4253 netdev->stats.tx_errors = adapter->stats.ecol + 4254 adapter->stats.latecol; 4255 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 4256 netdev->stats.tx_window_errors = adapter->stats.latecol; 4257 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 4258 4259 /* Tx Dropped needs to be maintained elsewhere */ 4260 4261 /* Management Stats */ 4262 adapter->stats.mgptc += er32(MGTPTC); 4263 adapter->stats.mgprc += er32(MGTPRC); 4264 adapter->stats.mgpdc += er32(MGTPDC); 4265 } 4266 4267 /** 4268 * e1000_phy_read_status - Update the PHY register status snapshot 4269 * @adapter: board private structure 4270 **/ 4271 static void e1000_phy_read_status(struct e1000_adapter *adapter) 4272 { 4273 struct e1000_hw *hw = &adapter->hw; 4274 struct e1000_phy_regs *phy = &adapter->phy_regs; 4275 4276 if ((er32(STATUS) & E1000_STATUS_LU) && 4277 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4278 int ret_val; 4279 4280 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); 4281 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); 4282 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); 4283 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); 4284 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); 4285 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); 4286 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); 4287 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); 4288 if (ret_val) 4289 e_warn("Error reading PHY register\n"); 4290 } else { 4291 /* 4292 * Do not read PHY registers if link is not up 4293 * Set values to typical power-on defaults 4294 */ 4295 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); 4296 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | 4297 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | 4298 BMSR_ERCAP); 4299 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | 4300 ADVERTISE_ALL | ADVERTISE_CSMA); 4301 phy->lpa = 0; 4302 phy->expansion = EXPANSION_ENABLENPAGE; 4303 phy->ctrl1000 = ADVERTISE_1000FULL; 4304 phy->stat1000 = 0; 4305 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); 4306 } 4307 } 4308 4309 static void e1000_print_link_info(struct e1000_adapter *adapter) 4310 { 4311 struct e1000_hw *hw = &adapter->hw; 4312 u32 ctrl = er32(CTRL); 4313 4314 /* Link status message must follow this format for user tools */ 4315 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 4316 adapter->netdev->name, 4317 adapter->link_speed, 4318 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", 4319 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : 4320 (ctrl & E1000_CTRL_RFCE) ? "Rx" : 4321 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); 4322 } 4323 4324 static bool e1000e_has_link(struct e1000_adapter *adapter) 4325 { 4326 struct e1000_hw *hw = &adapter->hw; 4327 bool link_active = false; 4328 s32 ret_val = 0; 4329 4330 /* 4331 * get_link_status is set on LSC (link status) interrupt or 4332 * Rx sequence error interrupt. get_link_status will stay 4333 * false until the check_for_link establishes link 4334 * for copper adapters ONLY 4335 */ 4336 switch (hw->phy.media_type) { 4337 case e1000_media_type_copper: 4338 if (hw->mac.get_link_status) { 4339 ret_val = hw->mac.ops.check_for_link(hw); 4340 link_active = !hw->mac.get_link_status; 4341 } else { 4342 link_active = true; 4343 } 4344 break; 4345 case e1000_media_type_fiber: 4346 ret_val = hw->mac.ops.check_for_link(hw); 4347 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 4348 break; 4349 case e1000_media_type_internal_serdes: 4350 ret_val = hw->mac.ops.check_for_link(hw); 4351 link_active = adapter->hw.mac.serdes_has_link; 4352 break; 4353 default: 4354 case e1000_media_type_unknown: 4355 break; 4356 } 4357 4358 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 4359 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 4360 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 4361 e_info("Gigabit has been disabled, downgrading speed\n"); 4362 } 4363 4364 return link_active; 4365 } 4366 4367 static void e1000e_enable_receives(struct e1000_adapter *adapter) 4368 { 4369 /* make sure the receive unit is started */ 4370 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4371 (adapter->flags & FLAG_RX_RESTART_NOW)) { 4372 struct e1000_hw *hw = &adapter->hw; 4373 u32 rctl = er32(RCTL); 4374 ew32(RCTL, rctl | E1000_RCTL_EN); 4375 adapter->flags &= ~FLAG_RX_RESTART_NOW; 4376 } 4377 } 4378 4379 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) 4380 { 4381 struct e1000_hw *hw = &adapter->hw; 4382 4383 /* 4384 * With 82574 controllers, PHY needs to be checked periodically 4385 * for hung state and reset, if two calls return true 4386 */ 4387 if (e1000_check_phy_82574(hw)) 4388 adapter->phy_hang_count++; 4389 else 4390 adapter->phy_hang_count = 0; 4391 4392 if (adapter->phy_hang_count > 1) { 4393 adapter->phy_hang_count = 0; 4394 schedule_work(&adapter->reset_task); 4395 } 4396 } 4397 4398 /** 4399 * e1000_watchdog - Timer Call-back 4400 * @data: pointer to adapter cast into an unsigned long 4401 **/ 4402 static void e1000_watchdog(unsigned long data) 4403 { 4404 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4405 4406 /* Do the rest outside of interrupt context */ 4407 schedule_work(&adapter->watchdog_task); 4408 4409 /* TODO: make this use queue_delayed_work() */ 4410 } 4411 4412 static void e1000_watchdog_task(struct work_struct *work) 4413 { 4414 struct e1000_adapter *adapter = container_of(work, 4415 struct e1000_adapter, watchdog_task); 4416 struct net_device *netdev = adapter->netdev; 4417 struct e1000_mac_info *mac = &adapter->hw.mac; 4418 struct e1000_phy_info *phy = &adapter->hw.phy; 4419 struct e1000_ring *tx_ring = adapter->tx_ring; 4420 struct e1000_hw *hw = &adapter->hw; 4421 u32 link, tctl; 4422 4423 if (test_bit(__E1000_DOWN, &adapter->state)) 4424 return; 4425 4426 link = e1000e_has_link(adapter); 4427 if ((netif_carrier_ok(netdev)) && link) { 4428 /* Cancel scheduled suspend requests. */ 4429 pm_runtime_resume(netdev->dev.parent); 4430 4431 e1000e_enable_receives(adapter); 4432 goto link_up; 4433 } 4434 4435 if ((e1000e_enable_tx_pkt_filtering(hw)) && 4436 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) 4437 e1000_update_mng_vlan(adapter); 4438 4439 if (link) { 4440 if (!netif_carrier_ok(netdev)) { 4441 bool txb2b = true; 4442 4443 /* Cancel scheduled suspend requests. */ 4444 pm_runtime_resume(netdev->dev.parent); 4445 4446 /* update snapshot of PHY registers on LSC */ 4447 e1000_phy_read_status(adapter); 4448 mac->ops.get_link_up_info(&adapter->hw, 4449 &adapter->link_speed, 4450 &adapter->link_duplex); 4451 e1000_print_link_info(adapter); 4452 /* 4453 * On supported PHYs, check for duplex mismatch only 4454 * if link has autonegotiated at 10/100 half 4455 */ 4456 if ((hw->phy.type == e1000_phy_igp_3 || 4457 hw->phy.type == e1000_phy_bm) && 4458 (hw->mac.autoneg == true) && 4459 (adapter->link_speed == SPEED_10 || 4460 adapter->link_speed == SPEED_100) && 4461 (adapter->link_duplex == HALF_DUPLEX)) { 4462 u16 autoneg_exp; 4463 4464 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); 4465 4466 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) 4467 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); 4468 } 4469 4470 /* adjust timeout factor according to speed/duplex */ 4471 adapter->tx_timeout_factor = 1; 4472 switch (adapter->link_speed) { 4473 case SPEED_10: 4474 txb2b = false; 4475 adapter->tx_timeout_factor = 16; 4476 break; 4477 case SPEED_100: 4478 txb2b = false; 4479 adapter->tx_timeout_factor = 10; 4480 break; 4481 } 4482 4483 /* 4484 * workaround: re-program speed mode bit after 4485 * link-up event 4486 */ 4487 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 4488 !txb2b) { 4489 u32 tarc0; 4490 tarc0 = er32(TARC(0)); 4491 tarc0 &= ~SPEED_MODE_BIT; 4492 ew32(TARC(0), tarc0); 4493 } 4494 4495 /* 4496 * disable TSO for pcie and 10/100 speeds, to avoid 4497 * some hardware issues 4498 */ 4499 if (!(adapter->flags & FLAG_TSO_FORCE)) { 4500 switch (adapter->link_speed) { 4501 case SPEED_10: 4502 case SPEED_100: 4503 e_info("10/100 speed: disabling TSO\n"); 4504 netdev->features &= ~NETIF_F_TSO; 4505 netdev->features &= ~NETIF_F_TSO6; 4506 break; 4507 case SPEED_1000: 4508 netdev->features |= NETIF_F_TSO; 4509 netdev->features |= NETIF_F_TSO6; 4510 break; 4511 default: 4512 /* oops */ 4513 break; 4514 } 4515 } 4516 4517 /* 4518 * enable transmits in the hardware, need to do this 4519 * after setting TARC(0) 4520 */ 4521 tctl = er32(TCTL); 4522 tctl |= E1000_TCTL_EN; 4523 ew32(TCTL, tctl); 4524 4525 /* 4526 * Perform any post-link-up configuration before 4527 * reporting link up. 4528 */ 4529 if (phy->ops.cfg_on_link_up) 4530 phy->ops.cfg_on_link_up(hw); 4531 4532 netif_carrier_on(netdev); 4533 4534 if (!test_bit(__E1000_DOWN, &adapter->state)) 4535 mod_timer(&adapter->phy_info_timer, 4536 round_jiffies(jiffies + 2 * HZ)); 4537 } 4538 } else { 4539 if (netif_carrier_ok(netdev)) { 4540 adapter->link_speed = 0; 4541 adapter->link_duplex = 0; 4542 /* Link status message must follow this format */ 4543 printk(KERN_INFO "e1000e: %s NIC Link is Down\n", 4544 adapter->netdev->name); 4545 netif_carrier_off(netdev); 4546 if (!test_bit(__E1000_DOWN, &adapter->state)) 4547 mod_timer(&adapter->phy_info_timer, 4548 round_jiffies(jiffies + 2 * HZ)); 4549 4550 if (adapter->flags & FLAG_RX_NEEDS_RESTART) 4551 schedule_work(&adapter->reset_task); 4552 else 4553 pm_schedule_suspend(netdev->dev.parent, 4554 LINK_TIMEOUT); 4555 } 4556 } 4557 4558 link_up: 4559 spin_lock(&adapter->stats64_lock); 4560 e1000e_update_stats(adapter); 4561 4562 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 4563 adapter->tpt_old = adapter->stats.tpt; 4564 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 4565 adapter->colc_old = adapter->stats.colc; 4566 4567 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; 4568 adapter->gorc_old = adapter->stats.gorc; 4569 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; 4570 adapter->gotc_old = adapter->stats.gotc; 4571 spin_unlock(&adapter->stats64_lock); 4572 4573 e1000e_update_adaptive(&adapter->hw); 4574 4575 if (!netif_carrier_ok(netdev) && 4576 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { 4577 /* 4578 * We've lost link, so the controller stops DMA, 4579 * but we've got queued Tx work that's never going 4580 * to get done, so reset controller to flush Tx. 4581 * (Do the reset outside of interrupt context). 4582 */ 4583 schedule_work(&adapter->reset_task); 4584 /* return immediately since reset is imminent */ 4585 return; 4586 } 4587 4588 /* Simple mode for Interrupt Throttle Rate (ITR) */ 4589 if (adapter->itr_setting == 4) { 4590 /* 4591 * Symmetric Tx/Rx gets a reduced ITR=2000; 4592 * Total asymmetrical Tx or Rx gets ITR=8000; 4593 * everyone else is between 2000-8000. 4594 */ 4595 u32 goc = (adapter->gotc + adapter->gorc) / 10000; 4596 u32 dif = (adapter->gotc > adapter->gorc ? 4597 adapter->gotc - adapter->gorc : 4598 adapter->gorc - adapter->gotc) / 10000; 4599 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 4600 4601 ew32(ITR, 1000000000 / (itr * 256)); 4602 } 4603 4604 /* Cause software interrupt to ensure Rx ring is cleaned */ 4605 if (adapter->msix_entries) 4606 ew32(ICS, adapter->rx_ring->ims_val); 4607 else 4608 ew32(ICS, E1000_ICS_RXDMT0); 4609 4610 /* flush pending descriptors to memory before detecting Tx hang */ 4611 e1000e_flush_descriptors(adapter); 4612 4613 /* Force detection of hung controller every watchdog period */ 4614 adapter->detect_tx_hung = true; 4615 4616 /* 4617 * With 82571 controllers, LAA may be overwritten due to controller 4618 * reset from the other port. Set the appropriate LAA in RAR[0] 4619 */ 4620 if (e1000e_get_laa_state_82571(hw)) 4621 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 4622 4623 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) 4624 e1000e_check_82574_phy_workaround(adapter); 4625 4626 /* Reset the timer */ 4627 if (!test_bit(__E1000_DOWN, &adapter->state)) 4628 mod_timer(&adapter->watchdog_timer, 4629 round_jiffies(jiffies + 2 * HZ)); 4630 } 4631 4632 #define E1000_TX_FLAGS_CSUM 0x00000001 4633 #define E1000_TX_FLAGS_VLAN 0x00000002 4634 #define E1000_TX_FLAGS_TSO 0x00000004 4635 #define E1000_TX_FLAGS_IPV4 0x00000008 4636 #define E1000_TX_FLAGS_NO_FCS 0x00000010 4637 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 4638 #define E1000_TX_FLAGS_VLAN_SHIFT 16 4639 4640 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) 4641 { 4642 struct e1000_context_desc *context_desc; 4643 struct e1000_buffer *buffer_info; 4644 unsigned int i; 4645 u32 cmd_length = 0; 4646 u16 ipcse = 0, tucse, mss; 4647 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4648 4649 if (!skb_is_gso(skb)) 4650 return 0; 4651 4652 if (skb_header_cloned(skb)) { 4653 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4654 4655 if (err) 4656 return err; 4657 } 4658 4659 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4660 mss = skb_shinfo(skb)->gso_size; 4661 if (skb->protocol == htons(ETH_P_IP)) { 4662 struct iphdr *iph = ip_hdr(skb); 4663 iph->tot_len = 0; 4664 iph->check = 0; 4665 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 4666 0, IPPROTO_TCP, 0); 4667 cmd_length = E1000_TXD_CMD_IP; 4668 ipcse = skb_transport_offset(skb) - 1; 4669 } else if (skb_is_gso_v6(skb)) { 4670 ipv6_hdr(skb)->payload_len = 0; 4671 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4672 &ipv6_hdr(skb)->daddr, 4673 0, IPPROTO_TCP, 0); 4674 ipcse = 0; 4675 } 4676 ipcss = skb_network_offset(skb); 4677 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 4678 tucss = skb_transport_offset(skb); 4679 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 4680 tucse = 0; 4681 4682 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 4683 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 4684 4685 i = tx_ring->next_to_use; 4686 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 4687 buffer_info = &tx_ring->buffer_info[i]; 4688 4689 context_desc->lower_setup.ip_fields.ipcss = ipcss; 4690 context_desc->lower_setup.ip_fields.ipcso = ipcso; 4691 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 4692 context_desc->upper_setup.tcp_fields.tucss = tucss; 4693 context_desc->upper_setup.tcp_fields.tucso = tucso; 4694 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 4695 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 4696 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 4697 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 4698 4699 buffer_info->time_stamp = jiffies; 4700 buffer_info->next_to_watch = i; 4701 4702 i++; 4703 if (i == tx_ring->count) 4704 i = 0; 4705 tx_ring->next_to_use = i; 4706 4707 return 1; 4708 } 4709 4710 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) 4711 { 4712 struct e1000_adapter *adapter = tx_ring->adapter; 4713 struct e1000_context_desc *context_desc; 4714 struct e1000_buffer *buffer_info; 4715 unsigned int i; 4716 u8 css; 4717 u32 cmd_len = E1000_TXD_CMD_DEXT; 4718 __be16 protocol; 4719 4720 if (skb->ip_summed != CHECKSUM_PARTIAL) 4721 return 0; 4722 4723 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) 4724 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 4725 else 4726 protocol = skb->protocol; 4727 4728 switch (protocol) { 4729 case cpu_to_be16(ETH_P_IP): 4730 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 4731 cmd_len |= E1000_TXD_CMD_TCP; 4732 break; 4733 case cpu_to_be16(ETH_P_IPV6): 4734 /* XXX not handling all IPV6 headers */ 4735 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 4736 cmd_len |= E1000_TXD_CMD_TCP; 4737 break; 4738 default: 4739 if (unlikely(net_ratelimit())) 4740 e_warn("checksum_partial proto=%x!\n", 4741 be16_to_cpu(protocol)); 4742 break; 4743 } 4744 4745 css = skb_checksum_start_offset(skb); 4746 4747 i = tx_ring->next_to_use; 4748 buffer_info = &tx_ring->buffer_info[i]; 4749 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 4750 4751 context_desc->lower_setup.ip_config = 0; 4752 context_desc->upper_setup.tcp_fields.tucss = css; 4753 context_desc->upper_setup.tcp_fields.tucso = 4754 css + skb->csum_offset; 4755 context_desc->upper_setup.tcp_fields.tucse = 0; 4756 context_desc->tcp_seg_setup.data = 0; 4757 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 4758 4759 buffer_info->time_stamp = jiffies; 4760 buffer_info->next_to_watch = i; 4761 4762 i++; 4763 if (i == tx_ring->count) 4764 i = 0; 4765 tx_ring->next_to_use = i; 4766 4767 return 1; 4768 } 4769 4770 #define E1000_MAX_PER_TXD 8192 4771 #define E1000_MAX_TXD_PWR 12 4772 4773 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 4774 unsigned int first, unsigned int max_per_txd, 4775 unsigned int nr_frags, unsigned int mss) 4776 { 4777 struct e1000_adapter *adapter = tx_ring->adapter; 4778 struct pci_dev *pdev = adapter->pdev; 4779 struct e1000_buffer *buffer_info; 4780 unsigned int len = skb_headlen(skb); 4781 unsigned int offset = 0, size, count = 0, i; 4782 unsigned int f, bytecount, segs; 4783 4784 i = tx_ring->next_to_use; 4785 4786 while (len) { 4787 buffer_info = &tx_ring->buffer_info[i]; 4788 size = min(len, max_per_txd); 4789 4790 buffer_info->length = size; 4791 buffer_info->time_stamp = jiffies; 4792 buffer_info->next_to_watch = i; 4793 buffer_info->dma = dma_map_single(&pdev->dev, 4794 skb->data + offset, 4795 size, DMA_TO_DEVICE); 4796 buffer_info->mapped_as_page = false; 4797 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4798 goto dma_error; 4799 4800 len -= size; 4801 offset += size; 4802 count++; 4803 4804 if (len) { 4805 i++; 4806 if (i == tx_ring->count) 4807 i = 0; 4808 } 4809 } 4810 4811 for (f = 0; f < nr_frags; f++) { 4812 const struct skb_frag_struct *frag; 4813 4814 frag = &skb_shinfo(skb)->frags[f]; 4815 len = skb_frag_size(frag); 4816 offset = 0; 4817 4818 while (len) { 4819 i++; 4820 if (i == tx_ring->count) 4821 i = 0; 4822 4823 buffer_info = &tx_ring->buffer_info[i]; 4824 size = min(len, max_per_txd); 4825 4826 buffer_info->length = size; 4827 buffer_info->time_stamp = jiffies; 4828 buffer_info->next_to_watch = i; 4829 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 4830 offset, size, DMA_TO_DEVICE); 4831 buffer_info->mapped_as_page = true; 4832 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4833 goto dma_error; 4834 4835 len -= size; 4836 offset += size; 4837 count++; 4838 } 4839 } 4840 4841 segs = skb_shinfo(skb)->gso_segs ? : 1; 4842 /* multiply data chunks by size of headers */ 4843 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 4844 4845 tx_ring->buffer_info[i].skb = skb; 4846 tx_ring->buffer_info[i].segs = segs; 4847 tx_ring->buffer_info[i].bytecount = bytecount; 4848 tx_ring->buffer_info[first].next_to_watch = i; 4849 4850 return count; 4851 4852 dma_error: 4853 dev_err(&pdev->dev, "Tx DMA map failed\n"); 4854 buffer_info->dma = 0; 4855 if (count) 4856 count--; 4857 4858 while (count--) { 4859 if (i == 0) 4860 i += tx_ring->count; 4861 i--; 4862 buffer_info = &tx_ring->buffer_info[i]; 4863 e1000_put_txbuf(tx_ring, buffer_info); 4864 } 4865 4866 return 0; 4867 } 4868 4869 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) 4870 { 4871 struct e1000_adapter *adapter = tx_ring->adapter; 4872 struct e1000_tx_desc *tx_desc = NULL; 4873 struct e1000_buffer *buffer_info; 4874 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 4875 unsigned int i; 4876 4877 if (tx_flags & E1000_TX_FLAGS_TSO) { 4878 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 4879 E1000_TXD_CMD_TSE; 4880 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 4881 4882 if (tx_flags & E1000_TX_FLAGS_IPV4) 4883 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 4884 } 4885 4886 if (tx_flags & E1000_TX_FLAGS_CSUM) { 4887 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 4888 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 4889 } 4890 4891 if (tx_flags & E1000_TX_FLAGS_VLAN) { 4892 txd_lower |= E1000_TXD_CMD_VLE; 4893 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 4894 } 4895 4896 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 4897 txd_lower &= ~(E1000_TXD_CMD_IFCS); 4898 4899 i = tx_ring->next_to_use; 4900 4901 do { 4902 buffer_info = &tx_ring->buffer_info[i]; 4903 tx_desc = E1000_TX_DESC(*tx_ring, i); 4904 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4905 tx_desc->lower.data = 4906 cpu_to_le32(txd_lower | buffer_info->length); 4907 tx_desc->upper.data = cpu_to_le32(txd_upper); 4908 4909 i++; 4910 if (i == tx_ring->count) 4911 i = 0; 4912 } while (--count > 0); 4913 4914 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 4915 4916 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 4917 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 4918 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 4919 4920 /* 4921 * Force memory writes to complete before letting h/w 4922 * know there are new descriptors to fetch. (Only 4923 * applicable for weak-ordered memory model archs, 4924 * such as IA-64). 4925 */ 4926 wmb(); 4927 4928 tx_ring->next_to_use = i; 4929 4930 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 4931 e1000e_update_tdt_wa(tx_ring, i); 4932 else 4933 writel(i, tx_ring->tail); 4934 4935 /* 4936 * we need this if more than one processor can write to our tail 4937 * at a time, it synchronizes IO on IA64/Altix systems 4938 */ 4939 mmiowb(); 4940 } 4941 4942 #define MINIMUM_DHCP_PACKET_SIZE 282 4943 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, 4944 struct sk_buff *skb) 4945 { 4946 struct e1000_hw *hw = &adapter->hw; 4947 u16 length, offset; 4948 4949 if (vlan_tx_tag_present(skb)) { 4950 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 4951 (adapter->hw.mng_cookie.status & 4952 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 4953 return 0; 4954 } 4955 4956 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) 4957 return 0; 4958 4959 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) 4960 return 0; 4961 4962 { 4963 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); 4964 struct udphdr *udp; 4965 4966 if (ip->protocol != IPPROTO_UDP) 4967 return 0; 4968 4969 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); 4970 if (ntohs(udp->dest) != 67) 4971 return 0; 4972 4973 offset = (u8 *)udp + 8 - skb->data; 4974 length = skb->len - offset; 4975 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); 4976 } 4977 4978 return 0; 4979 } 4980 4981 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 4982 { 4983 struct e1000_adapter *adapter = tx_ring->adapter; 4984 4985 netif_stop_queue(adapter->netdev); 4986 /* 4987 * Herbert's original patch had: 4988 * smp_mb__after_netif_stop_queue(); 4989 * but since that doesn't exist yet, just open code it. 4990 */ 4991 smp_mb(); 4992 4993 /* 4994 * We need to check again in a case another CPU has just 4995 * made room available. 4996 */ 4997 if (e1000_desc_unused(tx_ring) < size) 4998 return -EBUSY; 4999 5000 /* A reprieve! */ 5001 netif_start_queue(adapter->netdev); 5002 ++adapter->restart_queue; 5003 return 0; 5004 } 5005 5006 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5007 { 5008 if (e1000_desc_unused(tx_ring) >= size) 5009 return 0; 5010 return __e1000_maybe_stop_tx(tx_ring, size); 5011 } 5012 5013 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) 5014 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 5015 struct net_device *netdev) 5016 { 5017 struct e1000_adapter *adapter = netdev_priv(netdev); 5018 struct e1000_ring *tx_ring = adapter->tx_ring; 5019 unsigned int first; 5020 unsigned int max_per_txd = E1000_MAX_PER_TXD; 5021 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 5022 unsigned int tx_flags = 0; 5023 unsigned int len = skb_headlen(skb); 5024 unsigned int nr_frags; 5025 unsigned int mss; 5026 int count = 0; 5027 int tso; 5028 unsigned int f; 5029 5030 if (test_bit(__E1000_DOWN, &adapter->state)) { 5031 dev_kfree_skb_any(skb); 5032 return NETDEV_TX_OK; 5033 } 5034 5035 if (skb->len <= 0) { 5036 dev_kfree_skb_any(skb); 5037 return NETDEV_TX_OK; 5038 } 5039 5040 mss = skb_shinfo(skb)->gso_size; 5041 /* 5042 * The controller does a simple calculation to 5043 * make sure there is enough room in the FIFO before 5044 * initiating the DMA for each buffer. The calc is: 5045 * 4 = ceil(buffer len/mss). To make sure we don't 5046 * overrun the FIFO, adjust the max buffer len if mss 5047 * drops. 5048 */ 5049 if (mss) { 5050 u8 hdr_len; 5051 max_per_txd = min(mss << 2, max_per_txd); 5052 max_txd_pwr = fls(max_per_txd) - 1; 5053 5054 /* 5055 * TSO Workaround for 82571/2/3 Controllers -- if skb->data 5056 * points to just header, pull a few bytes of payload from 5057 * frags into skb->data 5058 */ 5059 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5060 /* 5061 * we do this workaround for ES2LAN, but it is un-necessary, 5062 * avoiding it could save a lot of cycles 5063 */ 5064 if (skb->data_len && (hdr_len == len)) { 5065 unsigned int pull_size; 5066 5067 pull_size = min_t(unsigned int, 4, skb->data_len); 5068 if (!__pskb_pull_tail(skb, pull_size)) { 5069 e_err("__pskb_pull_tail failed.\n"); 5070 dev_kfree_skb_any(skb); 5071 return NETDEV_TX_OK; 5072 } 5073 len = skb_headlen(skb); 5074 } 5075 } 5076 5077 /* reserve a descriptor for the offload context */ 5078 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 5079 count++; 5080 count++; 5081 5082 count += TXD_USE_COUNT(len, max_txd_pwr); 5083 5084 nr_frags = skb_shinfo(skb)->nr_frags; 5085 for (f = 0; f < nr_frags; f++) 5086 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 5087 max_txd_pwr); 5088 5089 if (adapter->hw.mac.tx_pkt_filtering) 5090 e1000_transfer_dhcp_info(adapter, skb); 5091 5092 /* 5093 * need: count + 2 desc gap to keep tail from touching 5094 * head, otherwise try next time 5095 */ 5096 if (e1000_maybe_stop_tx(tx_ring, count + 2)) 5097 return NETDEV_TX_BUSY; 5098 5099 if (vlan_tx_tag_present(skb)) { 5100 tx_flags |= E1000_TX_FLAGS_VLAN; 5101 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 5102 } 5103 5104 first = tx_ring->next_to_use; 5105 5106 tso = e1000_tso(tx_ring, skb); 5107 if (tso < 0) { 5108 dev_kfree_skb_any(skb); 5109 return NETDEV_TX_OK; 5110 } 5111 5112 if (tso) 5113 tx_flags |= E1000_TX_FLAGS_TSO; 5114 else if (e1000_tx_csum(tx_ring, skb)) 5115 tx_flags |= E1000_TX_FLAGS_CSUM; 5116 5117 /* 5118 * Old method was to assume IPv4 packet by default if TSO was enabled. 5119 * 82571 hardware supports TSO capabilities for IPv6 as well... 5120 * no longer assume, we must. 5121 */ 5122 if (skb->protocol == htons(ETH_P_IP)) 5123 tx_flags |= E1000_TX_FLAGS_IPV4; 5124 5125 if (unlikely(skb->no_fcs)) 5126 tx_flags |= E1000_TX_FLAGS_NO_FCS; 5127 5128 /* if count is 0 then mapping error has occurred */ 5129 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); 5130 if (count) { 5131 netdev_sent_queue(netdev, skb->len); 5132 e1000_tx_queue(tx_ring, tx_flags, count); 5133 /* Make sure there is space in the ring for the next send. */ 5134 e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); 5135 5136 } else { 5137 dev_kfree_skb_any(skb); 5138 tx_ring->buffer_info[first].time_stamp = 0; 5139 tx_ring->next_to_use = first; 5140 } 5141 5142 return NETDEV_TX_OK; 5143 } 5144 5145 /** 5146 * e1000_tx_timeout - Respond to a Tx Hang 5147 * @netdev: network interface device structure 5148 **/ 5149 static void e1000_tx_timeout(struct net_device *netdev) 5150 { 5151 struct e1000_adapter *adapter = netdev_priv(netdev); 5152 5153 /* Do the reset outside of interrupt context */ 5154 adapter->tx_timeout_count++; 5155 schedule_work(&adapter->reset_task); 5156 } 5157 5158 static void e1000_reset_task(struct work_struct *work) 5159 { 5160 struct e1000_adapter *adapter; 5161 adapter = container_of(work, struct e1000_adapter, reset_task); 5162 5163 /* don't run the task if already down */ 5164 if (test_bit(__E1000_DOWN, &adapter->state)) 5165 return; 5166 5167 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 5168 (adapter->flags & FLAG_RX_RESTART_NOW))) { 5169 e1000e_dump(adapter); 5170 e_err("Reset adapter\n"); 5171 } 5172 e1000e_reinit_locked(adapter); 5173 } 5174 5175 /** 5176 * e1000_get_stats64 - Get System Network Statistics 5177 * @netdev: network interface device structure 5178 * @stats: rtnl_link_stats64 pointer 5179 * 5180 * Returns the address of the device statistics structure. 5181 **/ 5182 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 5183 struct rtnl_link_stats64 *stats) 5184 { 5185 struct e1000_adapter *adapter = netdev_priv(netdev); 5186 5187 memset(stats, 0, sizeof(struct rtnl_link_stats64)); 5188 spin_lock(&adapter->stats64_lock); 5189 e1000e_update_stats(adapter); 5190 /* Fill out the OS statistics structure */ 5191 stats->rx_bytes = adapter->stats.gorc; 5192 stats->rx_packets = adapter->stats.gprc; 5193 stats->tx_bytes = adapter->stats.gotc; 5194 stats->tx_packets = adapter->stats.gptc; 5195 stats->multicast = adapter->stats.mprc; 5196 stats->collisions = adapter->stats.colc; 5197 5198 /* Rx Errors */ 5199 5200 /* 5201 * RLEC on some newer hardware can be incorrect so build 5202 * our own version based on RUC and ROC 5203 */ 5204 stats->rx_errors = adapter->stats.rxerrc + 5205 adapter->stats.crcerrs + adapter->stats.algnerrc + 5206 adapter->stats.ruc + adapter->stats.roc + 5207 adapter->stats.cexterr; 5208 stats->rx_length_errors = adapter->stats.ruc + 5209 adapter->stats.roc; 5210 stats->rx_crc_errors = adapter->stats.crcerrs; 5211 stats->rx_frame_errors = adapter->stats.algnerrc; 5212 stats->rx_missed_errors = adapter->stats.mpc; 5213 5214 /* Tx Errors */ 5215 stats->tx_errors = adapter->stats.ecol + 5216 adapter->stats.latecol; 5217 stats->tx_aborted_errors = adapter->stats.ecol; 5218 stats->tx_window_errors = adapter->stats.latecol; 5219 stats->tx_carrier_errors = adapter->stats.tncrs; 5220 5221 /* Tx Dropped needs to be maintained elsewhere */ 5222 5223 spin_unlock(&adapter->stats64_lock); 5224 return stats; 5225 } 5226 5227 /** 5228 * e1000_change_mtu - Change the Maximum Transfer Unit 5229 * @netdev: network interface device structure 5230 * @new_mtu: new value for maximum frame size 5231 * 5232 * Returns 0 on success, negative on failure 5233 **/ 5234 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5235 { 5236 struct e1000_adapter *adapter = netdev_priv(netdev); 5237 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5238 5239 /* Jumbo frame support */ 5240 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 5241 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 5242 e_err("Jumbo Frames not supported.\n"); 5243 return -EINVAL; 5244 } 5245 5246 /* 5247 * IP payload checksum (enabled with jumbos/packet-split when 5248 * Rx checksum is enabled) and generation of RSS hash is 5249 * mutually exclusive in the hardware. 5250 */ 5251 if ((netdev->features & NETIF_F_RXCSUM) && 5252 (netdev->features & NETIF_F_RXHASH)) { 5253 e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n"); 5254 return -EINVAL; 5255 } 5256 } 5257 5258 /* Supported frame sizes */ 5259 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || 5260 (max_frame > adapter->max_hw_frame_size)) { 5261 e_err("Unsupported MTU setting\n"); 5262 return -EINVAL; 5263 } 5264 5265 /* Jumbo frame workaround on 82579 requires CRC be stripped */ 5266 if ((adapter->hw.mac.type == e1000_pch2lan) && 5267 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 5268 (new_mtu > ETH_DATA_LEN)) { 5269 e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); 5270 return -EINVAL; 5271 } 5272 5273 /* 82573 Errata 17 */ 5274 if (((adapter->hw.mac.type == e1000_82573) || 5275 (adapter->hw.mac.type == e1000_82574)) && 5276 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { 5277 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; 5278 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); 5279 } 5280 5281 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 5282 usleep_range(1000, 2000); 5283 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 5284 adapter->max_frame_size = max_frame; 5285 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5286 netdev->mtu = new_mtu; 5287 if (netif_running(netdev)) 5288 e1000e_down(adapter); 5289 5290 /* 5291 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 5292 * means we reserve 2 more, this pushes us to allocate from the next 5293 * larger slab size. 5294 * i.e. RXBUFFER_2048 --> size-4096 slab 5295 * However with the new *_jumbo_rx* routines, jumbo receives will use 5296 * fragmented skbs 5297 */ 5298 5299 if (max_frame <= 2048) 5300 adapter->rx_buffer_len = 2048; 5301 else 5302 adapter->rx_buffer_len = 4096; 5303 5304 /* adjust allocation if LPE protects us, and we aren't using SBP */ 5305 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 5306 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 5307 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 5308 + ETH_FCS_LEN; 5309 5310 if (netif_running(netdev)) 5311 e1000e_up(adapter); 5312 else 5313 e1000e_reset(adapter); 5314 5315 clear_bit(__E1000_RESETTING, &adapter->state); 5316 5317 return 0; 5318 } 5319 5320 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 5321 int cmd) 5322 { 5323 struct e1000_adapter *adapter = netdev_priv(netdev); 5324 struct mii_ioctl_data *data = if_mii(ifr); 5325 5326 if (adapter->hw.phy.media_type != e1000_media_type_copper) 5327 return -EOPNOTSUPP; 5328 5329 switch (cmd) { 5330 case SIOCGMIIPHY: 5331 data->phy_id = adapter->hw.phy.addr; 5332 break; 5333 case SIOCGMIIREG: 5334 e1000_phy_read_status(adapter); 5335 5336 switch (data->reg_num & 0x1F) { 5337 case MII_BMCR: 5338 data->val_out = adapter->phy_regs.bmcr; 5339 break; 5340 case MII_BMSR: 5341 data->val_out = adapter->phy_regs.bmsr; 5342 break; 5343 case MII_PHYSID1: 5344 data->val_out = (adapter->hw.phy.id >> 16); 5345 break; 5346 case MII_PHYSID2: 5347 data->val_out = (adapter->hw.phy.id & 0xFFFF); 5348 break; 5349 case MII_ADVERTISE: 5350 data->val_out = adapter->phy_regs.advertise; 5351 break; 5352 case MII_LPA: 5353 data->val_out = adapter->phy_regs.lpa; 5354 break; 5355 case MII_EXPANSION: 5356 data->val_out = adapter->phy_regs.expansion; 5357 break; 5358 case MII_CTRL1000: 5359 data->val_out = adapter->phy_regs.ctrl1000; 5360 break; 5361 case MII_STAT1000: 5362 data->val_out = adapter->phy_regs.stat1000; 5363 break; 5364 case MII_ESTATUS: 5365 data->val_out = adapter->phy_regs.estatus; 5366 break; 5367 default: 5368 return -EIO; 5369 } 5370 break; 5371 case SIOCSMIIREG: 5372 default: 5373 return -EOPNOTSUPP; 5374 } 5375 return 0; 5376 } 5377 5378 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 5379 { 5380 switch (cmd) { 5381 case SIOCGMIIPHY: 5382 case SIOCGMIIREG: 5383 case SIOCSMIIREG: 5384 return e1000_mii_ioctl(netdev, ifr, cmd); 5385 default: 5386 return -EOPNOTSUPP; 5387 } 5388 } 5389 5390 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) 5391 { 5392 struct e1000_hw *hw = &adapter->hw; 5393 u32 i, mac_reg; 5394 u16 phy_reg, wuc_enable; 5395 int retval = 0; 5396 5397 /* copy MAC RARs to PHY RARs */ 5398 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 5399 5400 retval = hw->phy.ops.acquire(hw); 5401 if (retval) { 5402 e_err("Could not acquire PHY\n"); 5403 return retval; 5404 } 5405 5406 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ 5407 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 5408 if (retval) 5409 goto release; 5410 5411 /* copy MAC MTA to PHY MTA - only needed for pchlan */ 5412 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 5413 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 5414 hw->phy.ops.write_reg_page(hw, BM_MTA(i), 5415 (u16)(mac_reg & 0xFFFF)); 5416 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, 5417 (u16)((mac_reg >> 16) & 0xFFFF)); 5418 } 5419 5420 /* configure PHY Rx Control register */ 5421 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); 5422 mac_reg = er32(RCTL); 5423 if (mac_reg & E1000_RCTL_UPE) 5424 phy_reg |= BM_RCTL_UPE; 5425 if (mac_reg & E1000_RCTL_MPE) 5426 phy_reg |= BM_RCTL_MPE; 5427 phy_reg &= ~(BM_RCTL_MO_MASK); 5428 if (mac_reg & E1000_RCTL_MO_3) 5429 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 5430 << BM_RCTL_MO_SHIFT); 5431 if (mac_reg & E1000_RCTL_BAM) 5432 phy_reg |= BM_RCTL_BAM; 5433 if (mac_reg & E1000_RCTL_PMCF) 5434 phy_reg |= BM_RCTL_PMCF; 5435 mac_reg = er32(CTRL); 5436 if (mac_reg & E1000_CTRL_RFCE) 5437 phy_reg |= BM_RCTL_RFCE; 5438 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); 5439 5440 /* enable PHY wakeup in MAC register */ 5441 ew32(WUFC, wufc); 5442 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); 5443 5444 /* configure and enable PHY wakeup in PHY registers */ 5445 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); 5446 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 5447 5448 /* activate PHY wakeup */ 5449 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 5450 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 5451 if (retval) 5452 e_err("Could not set PHY Host Wakeup bit\n"); 5453 release: 5454 hw->phy.ops.release(hw); 5455 5456 return retval; 5457 } 5458 5459 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, 5460 bool runtime) 5461 { 5462 struct net_device *netdev = pci_get_drvdata(pdev); 5463 struct e1000_adapter *adapter = netdev_priv(netdev); 5464 struct e1000_hw *hw = &adapter->hw; 5465 u32 ctrl, ctrl_ext, rctl, status; 5466 /* Runtime suspend should only enable wakeup for link changes */ 5467 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 5468 int retval = 0; 5469 5470 netif_device_detach(netdev); 5471 5472 if (netif_running(netdev)) { 5473 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 5474 e1000e_down(adapter); 5475 e1000_free_irq(adapter); 5476 } 5477 e1000e_reset_interrupt_capability(adapter); 5478 5479 retval = pci_save_state(pdev); 5480 if (retval) 5481 return retval; 5482 5483 status = er32(STATUS); 5484 if (status & E1000_STATUS_LU) 5485 wufc &= ~E1000_WUFC_LNKC; 5486 5487 if (wufc) { 5488 e1000_setup_rctl(adapter); 5489 e1000e_set_rx_mode(netdev); 5490 5491 /* turn on all-multi mode if wake on multicast is enabled */ 5492 if (wufc & E1000_WUFC_MC) { 5493 rctl = er32(RCTL); 5494 rctl |= E1000_RCTL_MPE; 5495 ew32(RCTL, rctl); 5496 } 5497 5498 ctrl = er32(CTRL); 5499 /* advertise wake from D3Cold */ 5500 #define E1000_CTRL_ADVD3WUC 0x00100000 5501 /* phy power management enable */ 5502 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5503 ctrl |= E1000_CTRL_ADVD3WUC; 5504 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) 5505 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; 5506 ew32(CTRL, ctrl); 5507 5508 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 5509 adapter->hw.phy.media_type == 5510 e1000_media_type_internal_serdes) { 5511 /* keep the laser running in D3 */ 5512 ctrl_ext = er32(CTRL_EXT); 5513 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 5514 ew32(CTRL_EXT, ctrl_ext); 5515 } 5516 5517 if (adapter->flags & FLAG_IS_ICH) 5518 e1000_suspend_workarounds_ich8lan(&adapter->hw); 5519 5520 /* Allow time for pending master requests to run */ 5521 e1000e_disable_pcie_master(&adapter->hw); 5522 5523 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 5524 /* enable wakeup by the PHY */ 5525 retval = e1000_init_phy_wakeup(adapter, wufc); 5526 if (retval) 5527 return retval; 5528 } else { 5529 /* enable wakeup by the MAC */ 5530 ew32(WUFC, wufc); 5531 ew32(WUC, E1000_WUC_PME_EN); 5532 } 5533 } else { 5534 ew32(WUC, 0); 5535 ew32(WUFC, 0); 5536 } 5537 5538 *enable_wake = !!wufc; 5539 5540 /* make sure adapter isn't asleep if manageability is enabled */ 5541 if ((adapter->flags & FLAG_MNG_PT_ENABLED) || 5542 (hw->mac.ops.check_mng_mode(hw))) 5543 *enable_wake = true; 5544 5545 if (adapter->hw.phy.type == e1000_phy_igp_3) 5546 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 5547 5548 /* 5549 * Release control of h/w to f/w. If f/w is AMT enabled, this 5550 * would have already happened in close and is redundant. 5551 */ 5552 e1000e_release_hw_control(adapter); 5553 5554 pci_disable_device(pdev); 5555 5556 return 0; 5557 } 5558 5559 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) 5560 { 5561 if (sleep && wake) { 5562 pci_prepare_to_sleep(pdev); 5563 return; 5564 } 5565 5566 pci_wake_from_d3(pdev, wake); 5567 pci_set_power_state(pdev, PCI_D3hot); 5568 } 5569 5570 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, 5571 bool wake) 5572 { 5573 struct net_device *netdev = pci_get_drvdata(pdev); 5574 struct e1000_adapter *adapter = netdev_priv(netdev); 5575 5576 /* 5577 * The pci-e switch on some quad port adapters will report a 5578 * correctable error when the MAC transitions from D0 to D3. To 5579 * prevent this we need to mask off the correctable errors on the 5580 * downstream port of the pci-e switch. 5581 */ 5582 if (adapter->flags & FLAG_IS_QUAD_PORT) { 5583 struct pci_dev *us_dev = pdev->bus->self; 5584 int pos = pci_pcie_cap(us_dev); 5585 u16 devctl; 5586 5587 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); 5588 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, 5589 (devctl & ~PCI_EXP_DEVCTL_CERE)); 5590 5591 e1000_power_off(pdev, sleep, wake); 5592 5593 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); 5594 } else { 5595 e1000_power_off(pdev, sleep, wake); 5596 } 5597 } 5598 5599 #ifdef CONFIG_PCIEASPM 5600 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5601 { 5602 pci_disable_link_state_locked(pdev, state); 5603 } 5604 #else 5605 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5606 { 5607 int pos; 5608 u16 reg16; 5609 5610 /* 5611 * Both device and parent should have the same ASPM setting. 5612 * Disable ASPM in downstream component first and then upstream. 5613 */ 5614 pos = pci_pcie_cap(pdev); 5615 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); 5616 reg16 &= ~state; 5617 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 5618 5619 if (!pdev->bus->self) 5620 return; 5621 5622 pos = pci_pcie_cap(pdev->bus->self); 5623 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); 5624 reg16 &= ~state; 5625 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); 5626 } 5627 #endif 5628 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5629 { 5630 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", 5631 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", 5632 (state & PCIE_LINK_STATE_L1) ? "L1" : ""); 5633 5634 __e1000e_disable_aspm(pdev, state); 5635 } 5636 5637 #ifdef CONFIG_PM 5638 static bool e1000e_pm_ready(struct e1000_adapter *adapter) 5639 { 5640 return !!adapter->tx_ring->buffer_info; 5641 } 5642 5643 static int __e1000_resume(struct pci_dev *pdev) 5644 { 5645 struct net_device *netdev = pci_get_drvdata(pdev); 5646 struct e1000_adapter *adapter = netdev_priv(netdev); 5647 struct e1000_hw *hw = &adapter->hw; 5648 u16 aspm_disable_flag = 0; 5649 u32 err; 5650 5651 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 5652 aspm_disable_flag = PCIE_LINK_STATE_L0S; 5653 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5654 aspm_disable_flag |= PCIE_LINK_STATE_L1; 5655 if (aspm_disable_flag) 5656 e1000e_disable_aspm(pdev, aspm_disable_flag); 5657 5658 pci_set_power_state(pdev, PCI_D0); 5659 pci_restore_state(pdev); 5660 pci_save_state(pdev); 5661 5662 e1000e_set_interrupt_capability(adapter); 5663 if (netif_running(netdev)) { 5664 err = e1000_request_irq(adapter); 5665 if (err) 5666 return err; 5667 } 5668 5669 if (hw->mac.type == e1000_pch2lan) 5670 e1000_resume_workarounds_pchlan(&adapter->hw); 5671 5672 e1000e_power_up_phy(adapter); 5673 5674 /* report the system wakeup cause from S3/S4 */ 5675 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 5676 u16 phy_data; 5677 5678 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); 5679 if (phy_data) { 5680 e_info("PHY Wakeup cause - %s\n", 5681 phy_data & E1000_WUS_EX ? "Unicast Packet" : 5682 phy_data & E1000_WUS_MC ? "Multicast Packet" : 5683 phy_data & E1000_WUS_BC ? "Broadcast Packet" : 5684 phy_data & E1000_WUS_MAG ? "Magic Packet" : 5685 phy_data & E1000_WUS_LNKC ? 5686 "Link Status Change" : "other"); 5687 } 5688 e1e_wphy(&adapter->hw, BM_WUS, ~0); 5689 } else { 5690 u32 wus = er32(WUS); 5691 if (wus) { 5692 e_info("MAC Wakeup cause - %s\n", 5693 wus & E1000_WUS_EX ? "Unicast Packet" : 5694 wus & E1000_WUS_MC ? "Multicast Packet" : 5695 wus & E1000_WUS_BC ? "Broadcast Packet" : 5696 wus & E1000_WUS_MAG ? "Magic Packet" : 5697 wus & E1000_WUS_LNKC ? "Link Status Change" : 5698 "other"); 5699 } 5700 ew32(WUS, ~0); 5701 } 5702 5703 e1000e_reset(adapter); 5704 5705 e1000_init_manageability_pt(adapter); 5706 5707 if (netif_running(netdev)) 5708 e1000e_up(adapter); 5709 5710 netif_device_attach(netdev); 5711 5712 /* 5713 * If the controller has AMT, do not set DRV_LOAD until the interface 5714 * is up. For all other cases, let the f/w know that the h/w is now 5715 * under the control of the driver. 5716 */ 5717 if (!(adapter->flags & FLAG_HAS_AMT)) 5718 e1000e_get_hw_control(adapter); 5719 5720 return 0; 5721 } 5722 5723 #ifdef CONFIG_PM_SLEEP 5724 static int e1000_suspend(struct device *dev) 5725 { 5726 struct pci_dev *pdev = to_pci_dev(dev); 5727 int retval; 5728 bool wake; 5729 5730 retval = __e1000_shutdown(pdev, &wake, false); 5731 if (!retval) 5732 e1000_complete_shutdown(pdev, true, wake); 5733 5734 return retval; 5735 } 5736 5737 static int e1000_resume(struct device *dev) 5738 { 5739 struct pci_dev *pdev = to_pci_dev(dev); 5740 struct net_device *netdev = pci_get_drvdata(pdev); 5741 struct e1000_adapter *adapter = netdev_priv(netdev); 5742 5743 if (e1000e_pm_ready(adapter)) 5744 adapter->idle_check = true; 5745 5746 return __e1000_resume(pdev); 5747 } 5748 #endif /* CONFIG_PM_SLEEP */ 5749 5750 #ifdef CONFIG_PM_RUNTIME 5751 static int e1000_runtime_suspend(struct device *dev) 5752 { 5753 struct pci_dev *pdev = to_pci_dev(dev); 5754 struct net_device *netdev = pci_get_drvdata(pdev); 5755 struct e1000_adapter *adapter = netdev_priv(netdev); 5756 5757 if (e1000e_pm_ready(adapter)) { 5758 bool wake; 5759 5760 __e1000_shutdown(pdev, &wake, true); 5761 } 5762 5763 return 0; 5764 } 5765 5766 static int e1000_idle(struct device *dev) 5767 { 5768 struct pci_dev *pdev = to_pci_dev(dev); 5769 struct net_device *netdev = pci_get_drvdata(pdev); 5770 struct e1000_adapter *adapter = netdev_priv(netdev); 5771 5772 if (!e1000e_pm_ready(adapter)) 5773 return 0; 5774 5775 if (adapter->idle_check) { 5776 adapter->idle_check = false; 5777 if (!e1000e_has_link(adapter)) 5778 pm_schedule_suspend(dev, MSEC_PER_SEC); 5779 } 5780 5781 return -EBUSY; 5782 } 5783 5784 static int e1000_runtime_resume(struct device *dev) 5785 { 5786 struct pci_dev *pdev = to_pci_dev(dev); 5787 struct net_device *netdev = pci_get_drvdata(pdev); 5788 struct e1000_adapter *adapter = netdev_priv(netdev); 5789 5790 if (!e1000e_pm_ready(adapter)) 5791 return 0; 5792 5793 adapter->idle_check = !dev->power.runtime_auto; 5794 return __e1000_resume(pdev); 5795 } 5796 #endif /* CONFIG_PM_RUNTIME */ 5797 #endif /* CONFIG_PM */ 5798 5799 static void e1000_shutdown(struct pci_dev *pdev) 5800 { 5801 bool wake = false; 5802 5803 __e1000_shutdown(pdev, &wake, false); 5804 5805 if (system_state == SYSTEM_POWER_OFF) 5806 e1000_complete_shutdown(pdev, false, wake); 5807 } 5808 5809 #ifdef CONFIG_NET_POLL_CONTROLLER 5810 5811 static irqreturn_t e1000_intr_msix(int irq, void *data) 5812 { 5813 struct net_device *netdev = data; 5814 struct e1000_adapter *adapter = netdev_priv(netdev); 5815 5816 if (adapter->msix_entries) { 5817 int vector, msix_irq; 5818 5819 vector = 0; 5820 msix_irq = adapter->msix_entries[vector].vector; 5821 disable_irq(msix_irq); 5822 e1000_intr_msix_rx(msix_irq, netdev); 5823 enable_irq(msix_irq); 5824 5825 vector++; 5826 msix_irq = adapter->msix_entries[vector].vector; 5827 disable_irq(msix_irq); 5828 e1000_intr_msix_tx(msix_irq, netdev); 5829 enable_irq(msix_irq); 5830 5831 vector++; 5832 msix_irq = adapter->msix_entries[vector].vector; 5833 disable_irq(msix_irq); 5834 e1000_msix_other(msix_irq, netdev); 5835 enable_irq(msix_irq); 5836 } 5837 5838 return IRQ_HANDLED; 5839 } 5840 5841 /* 5842 * Polling 'interrupt' - used by things like netconsole to send skbs 5843 * without having to re-enable interrupts. It's not called while 5844 * the interrupt routine is executing. 5845 */ 5846 static void e1000_netpoll(struct net_device *netdev) 5847 { 5848 struct e1000_adapter *adapter = netdev_priv(netdev); 5849 5850 switch (adapter->int_mode) { 5851 case E1000E_INT_MODE_MSIX: 5852 e1000_intr_msix(adapter->pdev->irq, netdev); 5853 break; 5854 case E1000E_INT_MODE_MSI: 5855 disable_irq(adapter->pdev->irq); 5856 e1000_intr_msi(adapter->pdev->irq, netdev); 5857 enable_irq(adapter->pdev->irq); 5858 break; 5859 default: /* E1000E_INT_MODE_LEGACY */ 5860 disable_irq(adapter->pdev->irq); 5861 e1000_intr(adapter->pdev->irq, netdev); 5862 enable_irq(adapter->pdev->irq); 5863 break; 5864 } 5865 } 5866 #endif 5867 5868 /** 5869 * e1000_io_error_detected - called when PCI error is detected 5870 * @pdev: Pointer to PCI device 5871 * @state: The current pci connection state 5872 * 5873 * This function is called after a PCI bus error affecting 5874 * this device has been detected. 5875 */ 5876 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5877 pci_channel_state_t state) 5878 { 5879 struct net_device *netdev = pci_get_drvdata(pdev); 5880 struct e1000_adapter *adapter = netdev_priv(netdev); 5881 5882 netif_device_detach(netdev); 5883 5884 if (state == pci_channel_io_perm_failure) 5885 return PCI_ERS_RESULT_DISCONNECT; 5886 5887 if (netif_running(netdev)) 5888 e1000e_down(adapter); 5889 pci_disable_device(pdev); 5890 5891 /* Request a slot slot reset. */ 5892 return PCI_ERS_RESULT_NEED_RESET; 5893 } 5894 5895 /** 5896 * e1000_io_slot_reset - called after the pci bus has been reset. 5897 * @pdev: Pointer to PCI device 5898 * 5899 * Restart the card from scratch, as if from a cold-boot. Implementation 5900 * resembles the first-half of the e1000_resume routine. 5901 */ 5902 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5903 { 5904 struct net_device *netdev = pci_get_drvdata(pdev); 5905 struct e1000_adapter *adapter = netdev_priv(netdev); 5906 struct e1000_hw *hw = &adapter->hw; 5907 u16 aspm_disable_flag = 0; 5908 int err; 5909 pci_ers_result_t result; 5910 5911 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 5912 aspm_disable_flag = PCIE_LINK_STATE_L0S; 5913 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5914 aspm_disable_flag |= PCIE_LINK_STATE_L1; 5915 if (aspm_disable_flag) 5916 e1000e_disable_aspm(pdev, aspm_disable_flag); 5917 5918 err = pci_enable_device_mem(pdev); 5919 if (err) { 5920 dev_err(&pdev->dev, 5921 "Cannot re-enable PCI device after reset.\n"); 5922 result = PCI_ERS_RESULT_DISCONNECT; 5923 } else { 5924 pci_set_master(pdev); 5925 pdev->state_saved = true; 5926 pci_restore_state(pdev); 5927 5928 pci_enable_wake(pdev, PCI_D3hot, 0); 5929 pci_enable_wake(pdev, PCI_D3cold, 0); 5930 5931 e1000e_reset(adapter); 5932 ew32(WUS, ~0); 5933 result = PCI_ERS_RESULT_RECOVERED; 5934 } 5935 5936 pci_cleanup_aer_uncorrect_error_status(pdev); 5937 5938 return result; 5939 } 5940 5941 /** 5942 * e1000_io_resume - called when traffic can start flowing again. 5943 * @pdev: Pointer to PCI device 5944 * 5945 * This callback is called when the error recovery driver tells us that 5946 * its OK to resume normal operation. Implementation resembles the 5947 * second-half of the e1000_resume routine. 5948 */ 5949 static void e1000_io_resume(struct pci_dev *pdev) 5950 { 5951 struct net_device *netdev = pci_get_drvdata(pdev); 5952 struct e1000_adapter *adapter = netdev_priv(netdev); 5953 5954 e1000_init_manageability_pt(adapter); 5955 5956 if (netif_running(netdev)) { 5957 if (e1000e_up(adapter)) { 5958 dev_err(&pdev->dev, 5959 "can't bring device back up after reset\n"); 5960 return; 5961 } 5962 } 5963 5964 netif_device_attach(netdev); 5965 5966 /* 5967 * If the controller has AMT, do not set DRV_LOAD until the interface 5968 * is up. For all other cases, let the f/w know that the h/w is now 5969 * under the control of the driver. 5970 */ 5971 if (!(adapter->flags & FLAG_HAS_AMT)) 5972 e1000e_get_hw_control(adapter); 5973 5974 } 5975 5976 static void e1000_print_device_info(struct e1000_adapter *adapter) 5977 { 5978 struct e1000_hw *hw = &adapter->hw; 5979 struct net_device *netdev = adapter->netdev; 5980 u32 ret_val; 5981 u8 pba_str[E1000_PBANUM_LENGTH]; 5982 5983 /* print bus type/speed/width info */ 5984 e_info("(PCI Express:2.5GT/s:%s) %pM\n", 5985 /* bus width */ 5986 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 5987 "Width x1"), 5988 /* MAC address */ 5989 netdev->dev_addr); 5990 e_info("Intel(R) PRO/%s Network Connection\n", 5991 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 5992 ret_val = e1000_read_pba_string_generic(hw, pba_str, 5993 E1000_PBANUM_LENGTH); 5994 if (ret_val) 5995 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); 5996 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 5997 hw->mac.type, hw->phy.type, pba_str); 5998 } 5999 6000 static void e1000_eeprom_checks(struct e1000_adapter *adapter) 6001 { 6002 struct e1000_hw *hw = &adapter->hw; 6003 int ret_val; 6004 u16 buf = 0; 6005 6006 if (hw->mac.type != e1000_82573) 6007 return; 6008 6009 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 6010 le16_to_cpus(&buf); 6011 if (!ret_val && (!(buf & (1 << 0)))) { 6012 /* Deep Smart Power Down (DSPD) */ 6013 dev_warn(&adapter->pdev->dev, 6014 "Warning: detected DSPD enabled in EEPROM\n"); 6015 } 6016 } 6017 6018 static int e1000_set_features(struct net_device *netdev, 6019 netdev_features_t features) 6020 { 6021 struct e1000_adapter *adapter = netdev_priv(netdev); 6022 netdev_features_t changed = features ^ netdev->features; 6023 6024 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) 6025 adapter->flags |= FLAG_TSO_FORCE; 6026 6027 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | 6028 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | 6029 NETIF_F_RXALL))) 6030 return 0; 6031 6032 /* 6033 * IP payload checksum (enabled with jumbos/packet-split when Rx 6034 * checksum is enabled) and generation of RSS hash is mutually 6035 * exclusive in the hardware. 6036 */ 6037 if (adapter->rx_ps_pages && 6038 (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) { 6039 e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n"); 6040 return -EINVAL; 6041 } 6042 6043 if (changed & NETIF_F_RXFCS) { 6044 if (features & NETIF_F_RXFCS) { 6045 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 6046 } else { 6047 /* We need to take it back to defaults, which might mean 6048 * stripping is still disabled at the adapter level. 6049 */ 6050 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) 6051 adapter->flags2 |= FLAG2_CRC_STRIPPING; 6052 else 6053 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; 6054 } 6055 } 6056 6057 netdev->features = features; 6058 6059 if (netif_running(netdev)) 6060 e1000e_reinit_locked(adapter); 6061 else 6062 e1000e_reset(adapter); 6063 6064 return 0; 6065 } 6066 6067 static const struct net_device_ops e1000e_netdev_ops = { 6068 .ndo_open = e1000_open, 6069 .ndo_stop = e1000_close, 6070 .ndo_start_xmit = e1000_xmit_frame, 6071 .ndo_get_stats64 = e1000e_get_stats64, 6072 .ndo_set_rx_mode = e1000e_set_rx_mode, 6073 .ndo_set_mac_address = e1000_set_mac, 6074 .ndo_change_mtu = e1000_change_mtu, 6075 .ndo_do_ioctl = e1000_ioctl, 6076 .ndo_tx_timeout = e1000_tx_timeout, 6077 .ndo_validate_addr = eth_validate_addr, 6078 6079 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 6080 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 6081 #ifdef CONFIG_NET_POLL_CONTROLLER 6082 .ndo_poll_controller = e1000_netpoll, 6083 #endif 6084 .ndo_set_features = e1000_set_features, 6085 }; 6086 6087 /** 6088 * e1000_probe - Device Initialization Routine 6089 * @pdev: PCI device information struct 6090 * @ent: entry in e1000_pci_tbl 6091 * 6092 * Returns 0 on success, negative on failure 6093 * 6094 * e1000_probe initializes an adapter identified by a pci_dev structure. 6095 * The OS initialization, configuring of the adapter private structure, 6096 * and a hardware reset occur. 6097 **/ 6098 static int __devinit e1000_probe(struct pci_dev *pdev, 6099 const struct pci_device_id *ent) 6100 { 6101 struct net_device *netdev; 6102 struct e1000_adapter *adapter; 6103 struct e1000_hw *hw; 6104 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 6105 resource_size_t mmio_start, mmio_len; 6106 resource_size_t flash_start, flash_len; 6107 static int cards_found; 6108 u16 aspm_disable_flag = 0; 6109 int i, err, pci_using_dac; 6110 u16 eeprom_data = 0; 6111 u16 eeprom_apme_mask = E1000_EEPROM_APME; 6112 6113 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) 6114 aspm_disable_flag = PCIE_LINK_STATE_L0S; 6115 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) 6116 aspm_disable_flag |= PCIE_LINK_STATE_L1; 6117 if (aspm_disable_flag) 6118 e1000e_disable_aspm(pdev, aspm_disable_flag); 6119 6120 err = pci_enable_device_mem(pdev); 6121 if (err) 6122 return err; 6123 6124 pci_using_dac = 0; 6125 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 6126 if (!err) { 6127 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 6128 if (!err) 6129 pci_using_dac = 1; 6130 } else { 6131 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 6132 if (err) { 6133 err = dma_set_coherent_mask(&pdev->dev, 6134 DMA_BIT_MASK(32)); 6135 if (err) { 6136 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 6137 goto err_dma; 6138 } 6139 } 6140 } 6141 6142 err = pci_request_selected_regions_exclusive(pdev, 6143 pci_select_bars(pdev, IORESOURCE_MEM), 6144 e1000e_driver_name); 6145 if (err) 6146 goto err_pci_reg; 6147 6148 /* AER (Advanced Error Reporting) hooks */ 6149 pci_enable_pcie_error_reporting(pdev); 6150 6151 pci_set_master(pdev); 6152 /* PCI config space info */ 6153 err = pci_save_state(pdev); 6154 if (err) 6155 goto err_alloc_etherdev; 6156 6157 err = -ENOMEM; 6158 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 6159 if (!netdev) 6160 goto err_alloc_etherdev; 6161 6162 SET_NETDEV_DEV(netdev, &pdev->dev); 6163 6164 netdev->irq = pdev->irq; 6165 6166 pci_set_drvdata(pdev, netdev); 6167 adapter = netdev_priv(netdev); 6168 hw = &adapter->hw; 6169 adapter->netdev = netdev; 6170 adapter->pdev = pdev; 6171 adapter->ei = ei; 6172 adapter->pba = ei->pba; 6173 adapter->flags = ei->flags; 6174 adapter->flags2 = ei->flags2; 6175 adapter->hw.adapter = adapter; 6176 adapter->hw.mac.type = ei->mac; 6177 adapter->max_hw_frame_size = ei->max_hw_frame_size; 6178 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 6179 6180 mmio_start = pci_resource_start(pdev, 0); 6181 mmio_len = pci_resource_len(pdev, 0); 6182 6183 err = -EIO; 6184 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 6185 if (!adapter->hw.hw_addr) 6186 goto err_ioremap; 6187 6188 if ((adapter->flags & FLAG_HAS_FLASH) && 6189 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 6190 flash_start = pci_resource_start(pdev, 1); 6191 flash_len = pci_resource_len(pdev, 1); 6192 adapter->hw.flash_address = ioremap(flash_start, flash_len); 6193 if (!adapter->hw.flash_address) 6194 goto err_flashmap; 6195 } 6196 6197 /* construct the net_device struct */ 6198 netdev->netdev_ops = &e1000e_netdev_ops; 6199 e1000e_set_ethtool_ops(netdev); 6200 netdev->watchdog_timeo = 5 * HZ; 6201 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 6202 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 6203 6204 netdev->mem_start = mmio_start; 6205 netdev->mem_end = mmio_start + mmio_len; 6206 6207 adapter->bd_number = cards_found++; 6208 6209 e1000e_check_options(adapter); 6210 6211 /* setup adapter struct */ 6212 err = e1000_sw_init(adapter); 6213 if (err) 6214 goto err_sw_init; 6215 6216 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 6217 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 6218 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 6219 6220 err = ei->get_variants(adapter); 6221 if (err) 6222 goto err_hw_init; 6223 6224 if ((adapter->flags & FLAG_IS_ICH) && 6225 (adapter->flags & FLAG_READ_ONLY_NVM)) 6226 e1000e_write_protect_nvm_ich8lan(&adapter->hw); 6227 6228 hw->mac.ops.get_bus_info(&adapter->hw); 6229 6230 adapter->hw.phy.autoneg_wait_to_complete = 0; 6231 6232 /* Copper options */ 6233 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 6234 adapter->hw.phy.mdix = AUTO_ALL_MODES; 6235 adapter->hw.phy.disable_polarity_correction = 0; 6236 adapter->hw.phy.ms_type = e1000_ms_hw_default; 6237 } 6238 6239 if (hw->phy.ops.check_reset_block(hw)) 6240 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6241 6242 /* Set initial default active device features */ 6243 netdev->features = (NETIF_F_SG | 6244 NETIF_F_HW_VLAN_RX | 6245 NETIF_F_HW_VLAN_TX | 6246 NETIF_F_TSO | 6247 NETIF_F_TSO6 | 6248 NETIF_F_RXHASH | 6249 NETIF_F_RXCSUM | 6250 NETIF_F_HW_CSUM); 6251 6252 /* Set user-changeable features (subset of all device features) */ 6253 netdev->hw_features = netdev->features; 6254 netdev->hw_features |= NETIF_F_RXFCS; 6255 netdev->priv_flags |= IFF_SUPP_NOFCS; 6256 netdev->hw_features |= NETIF_F_RXALL; 6257 6258 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) 6259 netdev->features |= NETIF_F_HW_VLAN_FILTER; 6260 6261 netdev->vlan_features |= (NETIF_F_SG | 6262 NETIF_F_TSO | 6263 NETIF_F_TSO6 | 6264 NETIF_F_HW_CSUM); 6265 6266 netdev->priv_flags |= IFF_UNICAST_FLT; 6267 6268 if (pci_using_dac) { 6269 netdev->features |= NETIF_F_HIGHDMA; 6270 netdev->vlan_features |= NETIF_F_HIGHDMA; 6271 } 6272 6273 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 6274 adapter->flags |= FLAG_MNG_PT_ENABLED; 6275 6276 /* 6277 * before reading the NVM, reset the controller to 6278 * put the device in a known good starting state 6279 */ 6280 adapter->hw.mac.ops.reset_hw(&adapter->hw); 6281 6282 /* 6283 * systems with ASPM and others may see the checksum fail on the first 6284 * attempt. Let's give it a few tries 6285 */ 6286 for (i = 0;; i++) { 6287 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 6288 break; 6289 if (i == 2) { 6290 e_err("The NVM Checksum Is Not Valid\n"); 6291 err = -EIO; 6292 goto err_eeprom; 6293 } 6294 } 6295 6296 e1000_eeprom_checks(adapter); 6297 6298 /* copy the MAC address */ 6299 if (e1000e_read_mac_addr(&adapter->hw)) 6300 e_err("NVM Read Error while reading MAC address\n"); 6301 6302 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 6303 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 6304 6305 if (!is_valid_ether_addr(netdev->perm_addr)) { 6306 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); 6307 err = -EIO; 6308 goto err_eeprom; 6309 } 6310 6311 init_timer(&adapter->watchdog_timer); 6312 adapter->watchdog_timer.function = e1000_watchdog; 6313 adapter->watchdog_timer.data = (unsigned long) adapter; 6314 6315 init_timer(&adapter->phy_info_timer); 6316 adapter->phy_info_timer.function = e1000_update_phy_info; 6317 adapter->phy_info_timer.data = (unsigned long) adapter; 6318 6319 INIT_WORK(&adapter->reset_task, e1000_reset_task); 6320 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 6321 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 6322 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 6323 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); 6324 6325 /* Initialize link parameters. User can change them with ethtool */ 6326 adapter->hw.mac.autoneg = 1; 6327 adapter->fc_autoneg = true; 6328 adapter->hw.fc.requested_mode = e1000_fc_default; 6329 adapter->hw.fc.current_mode = e1000_fc_default; 6330 adapter->hw.phy.autoneg_advertised = 0x2f; 6331 6332 /* ring size defaults */ 6333 adapter->rx_ring->count = 256; 6334 adapter->tx_ring->count = 256; 6335 6336 /* 6337 * Initial Wake on LAN setting - If APM wake is enabled in 6338 * the EEPROM, enable the ACPI Magic Packet filter 6339 */ 6340 if (adapter->flags & FLAG_APME_IN_WUC) { 6341 /* APME bit in EEPROM is mapped to WUC.APME */ 6342 eeprom_data = er32(WUC); 6343 eeprom_apme_mask = E1000_WUC_APME; 6344 if ((hw->mac.type > e1000_ich10lan) && 6345 (eeprom_data & E1000_WUC_PHY_WAKE)) 6346 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 6347 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 6348 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 6349 (adapter->hw.bus.func == 1)) 6350 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 6351 1, &eeprom_data); 6352 else 6353 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 6354 1, &eeprom_data); 6355 } 6356 6357 /* fetch WoL from EEPROM */ 6358 if (eeprom_data & eeprom_apme_mask) 6359 adapter->eeprom_wol |= E1000_WUFC_MAG; 6360 6361 /* 6362 * now that we have the eeprom settings, apply the special cases 6363 * where the eeprom may be wrong or the board simply won't support 6364 * wake on lan on a particular port 6365 */ 6366 if (!(adapter->flags & FLAG_HAS_WOL)) 6367 adapter->eeprom_wol = 0; 6368 6369 /* initialize the wol settings based on the eeprom settings */ 6370 adapter->wol = adapter->eeprom_wol; 6371 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 6372 6373 /* save off EEPROM version number */ 6374 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 6375 6376 /* reset the hardware with the new settings */ 6377 e1000e_reset(adapter); 6378 6379 /* 6380 * If the controller has AMT, do not set DRV_LOAD until the interface 6381 * is up. For all other cases, let the f/w know that the h/w is now 6382 * under the control of the driver. 6383 */ 6384 if (!(adapter->flags & FLAG_HAS_AMT)) 6385 e1000e_get_hw_control(adapter); 6386 6387 strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); 6388 err = register_netdev(netdev); 6389 if (err) 6390 goto err_register; 6391 6392 /* carrier off reporting is important to ethtool even BEFORE open */ 6393 netif_carrier_off(netdev); 6394 6395 e1000_print_device_info(adapter); 6396 6397 if (pci_dev_run_wake(pdev)) 6398 pm_runtime_put_noidle(&pdev->dev); 6399 6400 return 0; 6401 6402 err_register: 6403 if (!(adapter->flags & FLAG_HAS_AMT)) 6404 e1000e_release_hw_control(adapter); 6405 err_eeprom: 6406 if (!hw->phy.ops.check_reset_block(hw)) 6407 e1000_phy_hw_reset(&adapter->hw); 6408 err_hw_init: 6409 kfree(adapter->tx_ring); 6410 kfree(adapter->rx_ring); 6411 err_sw_init: 6412 if (adapter->hw.flash_address) 6413 iounmap(adapter->hw.flash_address); 6414 e1000e_reset_interrupt_capability(adapter); 6415 err_flashmap: 6416 iounmap(adapter->hw.hw_addr); 6417 err_ioremap: 6418 free_netdev(netdev); 6419 err_alloc_etherdev: 6420 pci_release_selected_regions(pdev, 6421 pci_select_bars(pdev, IORESOURCE_MEM)); 6422 err_pci_reg: 6423 err_dma: 6424 pci_disable_device(pdev); 6425 return err; 6426 } 6427 6428 /** 6429 * e1000_remove - Device Removal Routine 6430 * @pdev: PCI device information struct 6431 * 6432 * e1000_remove is called by the PCI subsystem to alert the driver 6433 * that it should release a PCI device. The could be caused by a 6434 * Hot-Plug event, or because the driver is going to be removed from 6435 * memory. 6436 **/ 6437 static void __devexit e1000_remove(struct pci_dev *pdev) 6438 { 6439 struct net_device *netdev = pci_get_drvdata(pdev); 6440 struct e1000_adapter *adapter = netdev_priv(netdev); 6441 bool down = test_bit(__E1000_DOWN, &adapter->state); 6442 6443 /* 6444 * The timers may be rescheduled, so explicitly disable them 6445 * from being rescheduled. 6446 */ 6447 if (!down) 6448 set_bit(__E1000_DOWN, &adapter->state); 6449 del_timer_sync(&adapter->watchdog_timer); 6450 del_timer_sync(&adapter->phy_info_timer); 6451 6452 cancel_work_sync(&adapter->reset_task); 6453 cancel_work_sync(&adapter->watchdog_task); 6454 cancel_work_sync(&adapter->downshift_task); 6455 cancel_work_sync(&adapter->update_phy_task); 6456 cancel_work_sync(&adapter->print_hang_task); 6457 6458 if (!(netdev->flags & IFF_UP)) 6459 e1000_power_down_phy(adapter); 6460 6461 /* Don't lie to e1000_close() down the road. */ 6462 if (!down) 6463 clear_bit(__E1000_DOWN, &adapter->state); 6464 unregister_netdev(netdev); 6465 6466 if (pci_dev_run_wake(pdev)) 6467 pm_runtime_get_noresume(&pdev->dev); 6468 6469 /* 6470 * Release control of h/w to f/w. If f/w is AMT enabled, this 6471 * would have already happened in close and is redundant. 6472 */ 6473 e1000e_release_hw_control(adapter); 6474 6475 e1000e_reset_interrupt_capability(adapter); 6476 kfree(adapter->tx_ring); 6477 kfree(adapter->rx_ring); 6478 6479 iounmap(adapter->hw.hw_addr); 6480 if (adapter->hw.flash_address) 6481 iounmap(adapter->hw.flash_address); 6482 pci_release_selected_regions(pdev, 6483 pci_select_bars(pdev, IORESOURCE_MEM)); 6484 6485 free_netdev(netdev); 6486 6487 /* AER disable */ 6488 pci_disable_pcie_error_reporting(pdev); 6489 6490 pci_disable_device(pdev); 6491 } 6492 6493 /* PCI Error Recovery (ERS) */ 6494 static struct pci_error_handlers e1000_err_handler = { 6495 .error_detected = e1000_io_error_detected, 6496 .slot_reset = e1000_io_slot_reset, 6497 .resume = e1000_io_resume, 6498 }; 6499 6500 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 6501 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 6502 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 6503 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 6504 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, 6505 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, 6506 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, 6507 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 6508 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 6509 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 6510 6511 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 6512 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 6513 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 6514 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 6515 6516 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 6517 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 6518 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 6519 6520 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, 6521 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, 6522 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, 6523 6524 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 6525 board_80003es2lan }, 6526 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 6527 board_80003es2lan }, 6528 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), 6529 board_80003es2lan }, 6530 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 6531 board_80003es2lan }, 6532 6533 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 6534 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 6535 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, 6536 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, 6537 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 6538 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 6539 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 6540 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, 6541 6542 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 6543 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 6544 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 6545 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 6546 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 6547 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, 6548 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, 6549 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, 6550 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, 6551 6552 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, 6553 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, 6554 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, 6555 6556 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, 6557 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, 6558 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, 6559 6560 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, 6561 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, 6562 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, 6563 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, 6564 6565 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, 6566 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, 6567 6568 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 6569 }; 6570 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6571 6572 #ifdef CONFIG_PM 6573 static const struct dev_pm_ops e1000_pm_ops = { 6574 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 6575 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, 6576 e1000_runtime_resume, e1000_idle) 6577 }; 6578 #endif 6579 6580 /* PCI Device API Driver */ 6581 static struct pci_driver e1000_driver = { 6582 .name = e1000e_driver_name, 6583 .id_table = e1000_pci_tbl, 6584 .probe = e1000_probe, 6585 .remove = __devexit_p(e1000_remove), 6586 #ifdef CONFIG_PM 6587 .driver = { 6588 .pm = &e1000_pm_ops, 6589 }, 6590 #endif 6591 .shutdown = e1000_shutdown, 6592 .err_handler = &e1000_err_handler 6593 }; 6594 6595 /** 6596 * e1000_init_module - Driver Registration Routine 6597 * 6598 * e1000_init_module is the first routine called when the driver is 6599 * loaded. All it does is register with the PCI subsystem. 6600 **/ 6601 static int __init e1000_init_module(void) 6602 { 6603 int ret; 6604 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 6605 e1000e_driver_version); 6606 pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n"); 6607 ret = pci_register_driver(&e1000_driver); 6608 6609 return ret; 6610 } 6611 module_init(e1000_init_module); 6612 6613 /** 6614 * e1000_exit_module - Driver Exit Cleanup Routine 6615 * 6616 * e1000_exit_module is called just before the driver is removed 6617 * from memory. 6618 **/ 6619 static void __exit e1000_exit_module(void) 6620 { 6621 pci_unregister_driver(&e1000_driver); 6622 } 6623 module_exit(e1000_exit_module); 6624 6625 6626 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 6627 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 6628 MODULE_LICENSE("GPL"); 6629 MODULE_VERSION(DRV_VERSION); 6630 6631 /* netdev.c */ 6632