1 /******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2011 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/module.h> 32 #include <linux/types.h> 33 #include <linux/init.h> 34 #include <linux/pci.h> 35 #include <linux/vmalloc.h> 36 #include <linux/pagemap.h> 37 #include <linux/delay.h> 38 #include <linux/netdevice.h> 39 #include <linux/interrupt.h> 40 #include <linux/tcp.h> 41 #include <linux/ipv6.h> 42 #include <linux/slab.h> 43 #include <net/checksum.h> 44 #include <net/ip6_checksum.h> 45 #include <linux/mii.h> 46 #include <linux/ethtool.h> 47 #include <linux/if_vlan.h> 48 #include <linux/cpu.h> 49 #include <linux/smp.h> 50 #include <linux/pm_qos.h> 51 #include <linux/pm_runtime.h> 52 #include <linux/aer.h> 53 #include <linux/prefetch.h> 54 55 #include "e1000.h" 56 57 #define DRV_EXTRAVERSION "-k" 58 59 #define DRV_VERSION "1.5.1" DRV_EXTRAVERSION 60 char e1000e_driver_name[] = "e1000e"; 61 const char e1000e_driver_version[] = DRV_VERSION; 62 63 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 64 65 static const struct e1000_info *e1000_info_tbl[] = { 66 [board_82571] = &e1000_82571_info, 67 [board_82572] = &e1000_82572_info, 68 [board_82573] = &e1000_82573_info, 69 [board_82574] = &e1000_82574_info, 70 [board_82583] = &e1000_82583_info, 71 [board_80003es2lan] = &e1000_es2_info, 72 [board_ich8lan] = &e1000_ich8_info, 73 [board_ich9lan] = &e1000_ich9_info, 74 [board_ich10lan] = &e1000_ich10_info, 75 [board_pchlan] = &e1000_pch_info, 76 [board_pch2lan] = &e1000_pch2_info, 77 }; 78 79 struct e1000_reg_info { 80 u32 ofs; 81 char *name; 82 }; 83 84 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ 85 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ 86 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ 87 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ 88 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ 89 90 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ 91 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ 92 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ 93 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ 94 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ 95 96 static const struct e1000_reg_info e1000_reg_info_tbl[] = { 97 98 /* General Registers */ 99 {E1000_CTRL, "CTRL"}, 100 {E1000_STATUS, "STATUS"}, 101 {E1000_CTRL_EXT, "CTRL_EXT"}, 102 103 /* Interrupt Registers */ 104 {E1000_ICR, "ICR"}, 105 106 /* Rx Registers */ 107 {E1000_RCTL, "RCTL"}, 108 {E1000_RDLEN, "RDLEN"}, 109 {E1000_RDH, "RDH"}, 110 {E1000_RDT, "RDT"}, 111 {E1000_RDTR, "RDTR"}, 112 {E1000_RXDCTL(0), "RXDCTL"}, 113 {E1000_ERT, "ERT"}, 114 {E1000_RDBAL, "RDBAL"}, 115 {E1000_RDBAH, "RDBAH"}, 116 {E1000_RDFH, "RDFH"}, 117 {E1000_RDFT, "RDFT"}, 118 {E1000_RDFHS, "RDFHS"}, 119 {E1000_RDFTS, "RDFTS"}, 120 {E1000_RDFPC, "RDFPC"}, 121 122 /* Tx Registers */ 123 {E1000_TCTL, "TCTL"}, 124 {E1000_TDBAL, "TDBAL"}, 125 {E1000_TDBAH, "TDBAH"}, 126 {E1000_TDLEN, "TDLEN"}, 127 {E1000_TDH, "TDH"}, 128 {E1000_TDT, "TDT"}, 129 {E1000_TIDV, "TIDV"}, 130 {E1000_TXDCTL(0), "TXDCTL"}, 131 {E1000_TADV, "TADV"}, 132 {E1000_TARC(0), "TARC"}, 133 {E1000_TDFH, "TDFH"}, 134 {E1000_TDFT, "TDFT"}, 135 {E1000_TDFHS, "TDFHS"}, 136 {E1000_TDFTS, "TDFTS"}, 137 {E1000_TDFPC, "TDFPC"}, 138 139 /* List Terminator */ 140 {} 141 }; 142 143 /* 144 * e1000_regdump - register printout routine 145 */ 146 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) 147 { 148 int n = 0; 149 char rname[16]; 150 u32 regs[8]; 151 152 switch (reginfo->ofs) { 153 case E1000_RXDCTL(0): 154 for (n = 0; n < 2; n++) 155 regs[n] = __er32(hw, E1000_RXDCTL(n)); 156 break; 157 case E1000_TXDCTL(0): 158 for (n = 0; n < 2; n++) 159 regs[n] = __er32(hw, E1000_TXDCTL(n)); 160 break; 161 case E1000_TARC(0): 162 for (n = 0; n < 2; n++) 163 regs[n] = __er32(hw, E1000_TARC(n)); 164 break; 165 default: 166 printk(KERN_INFO "%-15s %08x\n", 167 reginfo->name, __er32(hw, reginfo->ofs)); 168 return; 169 } 170 171 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); 172 printk(KERN_INFO "%-15s ", rname); 173 for (n = 0; n < 2; n++) 174 printk(KERN_CONT "%08x ", regs[n]); 175 printk(KERN_CONT "\n"); 176 } 177 178 /* 179 * e1000e_dump - Print registers, Tx-ring and Rx-ring 180 */ 181 static void e1000e_dump(struct e1000_adapter *adapter) 182 { 183 struct net_device *netdev = adapter->netdev; 184 struct e1000_hw *hw = &adapter->hw; 185 struct e1000_reg_info *reginfo; 186 struct e1000_ring *tx_ring = adapter->tx_ring; 187 struct e1000_tx_desc *tx_desc; 188 struct my_u0 { 189 u64 a; 190 u64 b; 191 } *u0; 192 struct e1000_buffer *buffer_info; 193 struct e1000_ring *rx_ring = adapter->rx_ring; 194 union e1000_rx_desc_packet_split *rx_desc_ps; 195 union e1000_rx_desc_extended *rx_desc; 196 struct my_u1 { 197 u64 a; 198 u64 b; 199 u64 c; 200 u64 d; 201 } *u1; 202 u32 staterr; 203 int i = 0; 204 205 if (!netif_msg_hw(adapter)) 206 return; 207 208 /* Print netdevice Info */ 209 if (netdev) { 210 dev_info(&adapter->pdev->dev, "Net device Info\n"); 211 printk(KERN_INFO "Device Name state " 212 "trans_start last_rx\n"); 213 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", 214 netdev->name, netdev->state, netdev->trans_start, 215 netdev->last_rx); 216 } 217 218 /* Print Registers */ 219 dev_info(&adapter->pdev->dev, "Register Dump\n"); 220 printk(KERN_INFO " Register Name Value\n"); 221 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; 222 reginfo->name; reginfo++) { 223 e1000_regdump(hw, reginfo); 224 } 225 226 /* Print Tx Ring Summary */ 227 if (!netdev || !netif_running(netdev)) 228 goto exit; 229 230 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); 231 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" 232 " leng ntw timestamp\n"); 233 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 234 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", 235 0, tx_ring->next_to_use, tx_ring->next_to_clean, 236 (unsigned long long)buffer_info->dma, 237 buffer_info->length, 238 buffer_info->next_to_watch, 239 (unsigned long long)buffer_info->time_stamp); 240 241 /* Print Tx Ring */ 242 if (!netif_msg_tx_done(adapter)) 243 goto rx_ring_summary; 244 245 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); 246 247 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 248 * 249 * Legacy Transmit Descriptor 250 * +--------------------------------------------------------------+ 251 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 252 * +--------------------------------------------------------------+ 253 * 8 | Special | CSS | Status | CMD | CSO | Length | 254 * +--------------------------------------------------------------+ 255 * 63 48 47 36 35 32 31 24 23 16 15 0 256 * 257 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 258 * 63 48 47 40 39 32 31 16 15 8 7 0 259 * +----------------------------------------------------------------+ 260 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 261 * +----------------------------------------------------------------+ 262 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 263 * +----------------------------------------------------------------+ 264 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 265 * 266 * Extended Data Descriptor (DTYP=0x1) 267 * +----------------------------------------------------------------+ 268 * 0 | Buffer Address [63:0] | 269 * +----------------------------------------------------------------+ 270 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 271 * +----------------------------------------------------------------+ 272 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 273 */ 274 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" 275 " [bi->dma ] leng ntw timestamp bi->skb " 276 "<-- Legacy format\n"); 277 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" 278 " [bi->dma ] leng ntw timestamp bi->skb " 279 "<-- Ext Context format\n"); 280 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" 281 " [bi->dma ] leng ntw timestamp bi->skb " 282 "<-- Ext Data format\n"); 283 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 284 tx_desc = E1000_TX_DESC(*tx_ring, i); 285 buffer_info = &tx_ring->buffer_info[i]; 286 u0 = (struct my_u0 *)tx_desc; 287 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " 288 "%04X %3X %016llX %p", 289 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : 290 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, 291 (unsigned long long)le64_to_cpu(u0->a), 292 (unsigned long long)le64_to_cpu(u0->b), 293 (unsigned long long)buffer_info->dma, 294 buffer_info->length, buffer_info->next_to_watch, 295 (unsigned long long)buffer_info->time_stamp, 296 buffer_info->skb); 297 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 298 printk(KERN_CONT " NTC/U\n"); 299 else if (i == tx_ring->next_to_use) 300 printk(KERN_CONT " NTU\n"); 301 else if (i == tx_ring->next_to_clean) 302 printk(KERN_CONT " NTC\n"); 303 else 304 printk(KERN_CONT "\n"); 305 306 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) 307 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 308 16, 1, phys_to_virt(buffer_info->dma), 309 buffer_info->length, true); 310 } 311 312 /* Print Rx Ring Summary */ 313 rx_ring_summary: 314 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); 315 printk(KERN_INFO "Queue [NTU] [NTC]\n"); 316 printk(KERN_INFO " %5d %5X %5X\n", 0, 317 rx_ring->next_to_use, rx_ring->next_to_clean); 318 319 /* Print Rx Ring */ 320 if (!netif_msg_rx_status(adapter)) 321 goto exit; 322 323 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); 324 switch (adapter->rx_ps_pages) { 325 case 1: 326 case 2: 327 case 3: 328 /* [Extended] Packet Split Receive Descriptor Format 329 * 330 * +-----------------------------------------------------+ 331 * 0 | Buffer Address 0 [63:0] | 332 * +-----------------------------------------------------+ 333 * 8 | Buffer Address 1 [63:0] | 334 * +-----------------------------------------------------+ 335 * 16 | Buffer Address 2 [63:0] | 336 * +-----------------------------------------------------+ 337 * 24 | Buffer Address 3 [63:0] | 338 * +-----------------------------------------------------+ 339 */ 340 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " 341 "[buffer 1 63:0 ] " 342 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " 343 "[bi->skb] <-- Ext Pkt Split format\n"); 344 /* [Extended] Receive Descriptor (Write-Back) Format 345 * 346 * 63 48 47 32 31 13 12 8 7 4 3 0 347 * +------------------------------------------------------+ 348 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | 349 * | Checksum | Ident | | Queue | | Type | 350 * +------------------------------------------------------+ 351 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 352 * +------------------------------------------------------+ 353 * 63 48 47 32 31 20 19 0 354 */ 355 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " 356 "[vl l0 ee es] " 357 "[ l3 l2 l1 hs] [reserved ] ---------------- " 358 "[bi->skb] <-- Ext Rx Write-Back format\n"); 359 for (i = 0; i < rx_ring->count; i++) { 360 buffer_info = &rx_ring->buffer_info[i]; 361 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); 362 u1 = (struct my_u1 *)rx_desc_ps; 363 staterr = 364 le32_to_cpu(rx_desc_ps->wb.middle.status_error); 365 if (staterr & E1000_RXD_STAT_DD) { 366 /* Descriptor Done */ 367 printk(KERN_INFO "RWB[0x%03X] %016llX " 368 "%016llX %016llX %016llX " 369 "---------------- %p", i, 370 (unsigned long long)le64_to_cpu(u1->a), 371 (unsigned long long)le64_to_cpu(u1->b), 372 (unsigned long long)le64_to_cpu(u1->c), 373 (unsigned long long)le64_to_cpu(u1->d), 374 buffer_info->skb); 375 } else { 376 printk(KERN_INFO "R [0x%03X] %016llX " 377 "%016llX %016llX %016llX %016llX %p", i, 378 (unsigned long long)le64_to_cpu(u1->a), 379 (unsigned long long)le64_to_cpu(u1->b), 380 (unsigned long long)le64_to_cpu(u1->c), 381 (unsigned long long)le64_to_cpu(u1->d), 382 (unsigned long long)buffer_info->dma, 383 buffer_info->skb); 384 385 if (netif_msg_pktdata(adapter)) 386 print_hex_dump(KERN_INFO, "", 387 DUMP_PREFIX_ADDRESS, 16, 1, 388 phys_to_virt(buffer_info->dma), 389 adapter->rx_ps_bsize0, true); 390 } 391 392 if (i == rx_ring->next_to_use) 393 printk(KERN_CONT " NTU\n"); 394 else if (i == rx_ring->next_to_clean) 395 printk(KERN_CONT " NTC\n"); 396 else 397 printk(KERN_CONT "\n"); 398 } 399 break; 400 default: 401 case 0: 402 /* Extended Receive Descriptor (Read) Format 403 * 404 * +-----------------------------------------------------+ 405 * 0 | Buffer Address [63:0] | 406 * +-----------------------------------------------------+ 407 * 8 | Reserved | 408 * +-----------------------------------------------------+ 409 */ 410 printk(KERN_INFO "R [desc] [buf addr 63:0 ] " 411 "[reserved 63:0 ] [bi->dma ] " 412 "[bi->skb] <-- Ext (Read) format\n"); 413 /* Extended Receive Descriptor (Write-Back) Format 414 * 415 * 63 48 47 32 31 24 23 4 3 0 416 * +------------------------------------------------------+ 417 * | RSS Hash | | | | 418 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | 419 * | Packet | IP | | | Type | 420 * | Checksum | Ident | | | | 421 * +------------------------------------------------------+ 422 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 423 * +------------------------------------------------------+ 424 * 63 48 47 32 31 20 19 0 425 */ 426 printk(KERN_INFO "RWB[desc] [cs ipid mrq] " 427 "[vt ln xe xs] " 428 "[bi->skb] <-- Ext (Write-Back) format\n"); 429 430 for (i = 0; i < rx_ring->count; i++) { 431 buffer_info = &rx_ring->buffer_info[i]; 432 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 433 u1 = (struct my_u1 *)rx_desc; 434 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 435 if (staterr & E1000_RXD_STAT_DD) { 436 /* Descriptor Done */ 437 printk(KERN_INFO "RWB[0x%03X] %016llX " 438 "%016llX ---------------- %p", i, 439 (unsigned long long)le64_to_cpu(u1->a), 440 (unsigned long long)le64_to_cpu(u1->b), 441 buffer_info->skb); 442 } else { 443 printk(KERN_INFO "R [0x%03X] %016llX " 444 "%016llX %016llX %p", i, 445 (unsigned long long)le64_to_cpu(u1->a), 446 (unsigned long long)le64_to_cpu(u1->b), 447 (unsigned long long)buffer_info->dma, 448 buffer_info->skb); 449 450 if (netif_msg_pktdata(adapter)) 451 print_hex_dump(KERN_INFO, "", 452 DUMP_PREFIX_ADDRESS, 16, 453 1, 454 phys_to_virt 455 (buffer_info->dma), 456 adapter->rx_buffer_len, 457 true); 458 } 459 460 if (i == rx_ring->next_to_use) 461 printk(KERN_CONT " NTU\n"); 462 else if (i == rx_ring->next_to_clean) 463 printk(KERN_CONT " NTC\n"); 464 else 465 printk(KERN_CONT "\n"); 466 } 467 } 468 469 exit: 470 return; 471 } 472 473 /** 474 * e1000_desc_unused - calculate if we have unused descriptors 475 **/ 476 static int e1000_desc_unused(struct e1000_ring *ring) 477 { 478 if (ring->next_to_clean > ring->next_to_use) 479 return ring->next_to_clean - ring->next_to_use - 1; 480 481 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 482 } 483 484 /** 485 * e1000_receive_skb - helper function to handle Rx indications 486 * @adapter: board private structure 487 * @status: descriptor status field as written by hardware 488 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 489 * @skb: pointer to sk_buff to be indicated to stack 490 **/ 491 static void e1000_receive_skb(struct e1000_adapter *adapter, 492 struct net_device *netdev, struct sk_buff *skb, 493 u8 status, __le16 vlan) 494 { 495 u16 tag = le16_to_cpu(vlan); 496 skb->protocol = eth_type_trans(skb, netdev); 497 498 if (status & E1000_RXD_STAT_VP) 499 __vlan_hwaccel_put_tag(skb, tag); 500 501 napi_gro_receive(&adapter->napi, skb); 502 } 503 504 /** 505 * e1000_rx_checksum - Receive Checksum Offload 506 * @adapter: board private structure 507 * @status_err: receive descriptor status and error fields 508 * @csum: receive descriptor csum field 509 * @sk_buff: socket buffer with received data 510 **/ 511 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 512 u32 csum, struct sk_buff *skb) 513 { 514 u16 status = (u16)status_err; 515 u8 errors = (u8)(status_err >> 24); 516 517 skb_checksum_none_assert(skb); 518 519 /* Ignore Checksum bit is set */ 520 if (status & E1000_RXD_STAT_IXSM) 521 return; 522 /* TCP/UDP checksum error bit is set */ 523 if (errors & E1000_RXD_ERR_TCPE) { 524 /* let the stack verify checksum errors */ 525 adapter->hw_csum_err++; 526 return; 527 } 528 529 /* TCP/UDP Checksum has not been calculated */ 530 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 531 return; 532 533 /* It must be a TCP or UDP packet with a valid checksum */ 534 if (status & E1000_RXD_STAT_TCPCS) { 535 /* TCP checksum is good */ 536 skb->ip_summed = CHECKSUM_UNNECESSARY; 537 } else { 538 /* 539 * IP fragment with UDP payload 540 * Hardware complements the payload checksum, so we undo it 541 * and then put the value in host order for further stack use. 542 */ 543 __sum16 sum = (__force __sum16)htons(csum); 544 skb->csum = csum_unfold(~sum); 545 skb->ip_summed = CHECKSUM_COMPLETE; 546 } 547 adapter->hw_csum_good++; 548 } 549 550 /** 551 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() 552 * @hw: pointer to the HW structure 553 * @tail: address of tail descriptor register 554 * @i: value to write to tail descriptor register 555 * 556 * When updating the tail register, the ME could be accessing Host CSR 557 * registers at the same time. Normally, this is handled in h/w by an 558 * arbiter but on some parts there is a bug that acknowledges Host accesses 559 * later than it should which could result in the descriptor register to 560 * have an incorrect value. Workaround this by checking the FWSM register 561 * which has bit 24 set while ME is accessing Host CSR registers, wait 562 * if it is set and try again a number of times. 563 **/ 564 static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, 565 unsigned int i) 566 { 567 unsigned int j = 0; 568 569 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && 570 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) 571 udelay(50); 572 573 writel(i, tail); 574 575 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) 576 return E1000_ERR_SWFW_SYNC; 577 578 return 0; 579 } 580 581 static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) 582 { 583 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); 584 struct e1000_hw *hw = &adapter->hw; 585 586 if (e1000e_update_tail_wa(hw, tail, i)) { 587 u32 rctl = er32(RCTL); 588 ew32(RCTL, rctl & ~E1000_RCTL_EN); 589 e_err("ME firmware caused invalid RDT - resetting\n"); 590 schedule_work(&adapter->reset_task); 591 } 592 } 593 594 static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) 595 { 596 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); 597 struct e1000_hw *hw = &adapter->hw; 598 599 if (e1000e_update_tail_wa(hw, tail, i)) { 600 u32 tctl = er32(TCTL); 601 ew32(TCTL, tctl & ~E1000_TCTL_EN); 602 e_err("ME firmware caused invalid TDT - resetting\n"); 603 schedule_work(&adapter->reset_task); 604 } 605 } 606 607 /** 608 * e1000_alloc_rx_buffers - Replace used receive buffers 609 * @adapter: address of board private structure 610 **/ 611 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 612 int cleaned_count, gfp_t gfp) 613 { 614 struct net_device *netdev = adapter->netdev; 615 struct pci_dev *pdev = adapter->pdev; 616 struct e1000_ring *rx_ring = adapter->rx_ring; 617 union e1000_rx_desc_extended *rx_desc; 618 struct e1000_buffer *buffer_info; 619 struct sk_buff *skb; 620 unsigned int i; 621 unsigned int bufsz = adapter->rx_buffer_len; 622 623 i = rx_ring->next_to_use; 624 buffer_info = &rx_ring->buffer_info[i]; 625 626 while (cleaned_count--) { 627 skb = buffer_info->skb; 628 if (skb) { 629 skb_trim(skb, 0); 630 goto map_skb; 631 } 632 633 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 634 if (!skb) { 635 /* Better luck next round */ 636 adapter->alloc_rx_buff_failed++; 637 break; 638 } 639 640 buffer_info->skb = skb; 641 map_skb: 642 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 643 adapter->rx_buffer_len, 644 DMA_FROM_DEVICE); 645 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 646 dev_err(&pdev->dev, "Rx DMA map failed\n"); 647 adapter->rx_dma_failed++; 648 break; 649 } 650 651 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 652 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 653 654 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 655 /* 656 * Force memory writes to complete before letting h/w 657 * know there are new descriptors to fetch. (Only 658 * applicable for weak-ordered memory model archs, 659 * such as IA-64). 660 */ 661 wmb(); 662 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 663 e1000e_update_rdt_wa(adapter, i); 664 else 665 writel(i, adapter->hw.hw_addr + rx_ring->tail); 666 } 667 i++; 668 if (i == rx_ring->count) 669 i = 0; 670 buffer_info = &rx_ring->buffer_info[i]; 671 } 672 673 rx_ring->next_to_use = i; 674 } 675 676 /** 677 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 678 * @adapter: address of board private structure 679 **/ 680 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 681 int cleaned_count, gfp_t gfp) 682 { 683 struct net_device *netdev = adapter->netdev; 684 struct pci_dev *pdev = adapter->pdev; 685 union e1000_rx_desc_packet_split *rx_desc; 686 struct e1000_ring *rx_ring = adapter->rx_ring; 687 struct e1000_buffer *buffer_info; 688 struct e1000_ps_page *ps_page; 689 struct sk_buff *skb; 690 unsigned int i, j; 691 692 i = rx_ring->next_to_use; 693 buffer_info = &rx_ring->buffer_info[i]; 694 695 while (cleaned_count--) { 696 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 697 698 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 699 ps_page = &buffer_info->ps_pages[j]; 700 if (j >= adapter->rx_ps_pages) { 701 /* all unused desc entries get hw null ptr */ 702 rx_desc->read.buffer_addr[j + 1] = 703 ~cpu_to_le64(0); 704 continue; 705 } 706 if (!ps_page->page) { 707 ps_page->page = alloc_page(gfp); 708 if (!ps_page->page) { 709 adapter->alloc_rx_buff_failed++; 710 goto no_buffers; 711 } 712 ps_page->dma = dma_map_page(&pdev->dev, 713 ps_page->page, 714 0, PAGE_SIZE, 715 DMA_FROM_DEVICE); 716 if (dma_mapping_error(&pdev->dev, 717 ps_page->dma)) { 718 dev_err(&adapter->pdev->dev, 719 "Rx DMA page map failed\n"); 720 adapter->rx_dma_failed++; 721 goto no_buffers; 722 } 723 } 724 /* 725 * Refresh the desc even if buffer_addrs 726 * didn't change because each write-back 727 * erases this info. 728 */ 729 rx_desc->read.buffer_addr[j + 1] = 730 cpu_to_le64(ps_page->dma); 731 } 732 733 skb = __netdev_alloc_skb_ip_align(netdev, 734 adapter->rx_ps_bsize0, 735 gfp); 736 737 if (!skb) { 738 adapter->alloc_rx_buff_failed++; 739 break; 740 } 741 742 buffer_info->skb = skb; 743 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 744 adapter->rx_ps_bsize0, 745 DMA_FROM_DEVICE); 746 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 747 dev_err(&pdev->dev, "Rx DMA map failed\n"); 748 adapter->rx_dma_failed++; 749 /* cleanup skb */ 750 dev_kfree_skb_any(skb); 751 buffer_info->skb = NULL; 752 break; 753 } 754 755 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 756 757 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 758 /* 759 * Force memory writes to complete before letting h/w 760 * know there are new descriptors to fetch. (Only 761 * applicable for weak-ordered memory model archs, 762 * such as IA-64). 763 */ 764 wmb(); 765 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 766 e1000e_update_rdt_wa(adapter, i << 1); 767 else 768 writel(i << 1, 769 adapter->hw.hw_addr + rx_ring->tail); 770 } 771 772 i++; 773 if (i == rx_ring->count) 774 i = 0; 775 buffer_info = &rx_ring->buffer_info[i]; 776 } 777 778 no_buffers: 779 rx_ring->next_to_use = i; 780 } 781 782 /** 783 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 784 * @adapter: address of board private structure 785 * @cleaned_count: number of buffers to allocate this pass 786 **/ 787 788 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 789 int cleaned_count, gfp_t gfp) 790 { 791 struct net_device *netdev = adapter->netdev; 792 struct pci_dev *pdev = adapter->pdev; 793 union e1000_rx_desc_extended *rx_desc; 794 struct e1000_ring *rx_ring = adapter->rx_ring; 795 struct e1000_buffer *buffer_info; 796 struct sk_buff *skb; 797 unsigned int i; 798 unsigned int bufsz = 256 - 16 /* for skb_reserve */; 799 800 i = rx_ring->next_to_use; 801 buffer_info = &rx_ring->buffer_info[i]; 802 803 while (cleaned_count--) { 804 skb = buffer_info->skb; 805 if (skb) { 806 skb_trim(skb, 0); 807 goto check_page; 808 } 809 810 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); 811 if (unlikely(!skb)) { 812 /* Better luck next round */ 813 adapter->alloc_rx_buff_failed++; 814 break; 815 } 816 817 buffer_info->skb = skb; 818 check_page: 819 /* allocate a new page if necessary */ 820 if (!buffer_info->page) { 821 buffer_info->page = alloc_page(gfp); 822 if (unlikely(!buffer_info->page)) { 823 adapter->alloc_rx_buff_failed++; 824 break; 825 } 826 } 827 828 if (!buffer_info->dma) 829 buffer_info->dma = dma_map_page(&pdev->dev, 830 buffer_info->page, 0, 831 PAGE_SIZE, 832 DMA_FROM_DEVICE); 833 834 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 835 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 836 837 if (unlikely(++i == rx_ring->count)) 838 i = 0; 839 buffer_info = &rx_ring->buffer_info[i]; 840 } 841 842 if (likely(rx_ring->next_to_use != i)) { 843 rx_ring->next_to_use = i; 844 if (unlikely(i-- == 0)) 845 i = (rx_ring->count - 1); 846 847 /* Force memory writes to complete before letting h/w 848 * know there are new descriptors to fetch. (Only 849 * applicable for weak-ordered memory model archs, 850 * such as IA-64). */ 851 wmb(); 852 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 853 e1000e_update_rdt_wa(adapter, i); 854 else 855 writel(i, adapter->hw.hw_addr + rx_ring->tail); 856 } 857 } 858 859 /** 860 * e1000_clean_rx_irq - Send received data up the network stack; legacy 861 * @adapter: board private structure 862 * 863 * the return value indicates whether actual cleaning was done, there 864 * is no guarantee that everything was cleaned 865 **/ 866 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 867 int *work_done, int work_to_do) 868 { 869 struct net_device *netdev = adapter->netdev; 870 struct pci_dev *pdev = adapter->pdev; 871 struct e1000_hw *hw = &adapter->hw; 872 struct e1000_ring *rx_ring = adapter->rx_ring; 873 union e1000_rx_desc_extended *rx_desc, *next_rxd; 874 struct e1000_buffer *buffer_info, *next_buffer; 875 u32 length, staterr; 876 unsigned int i; 877 int cleaned_count = 0; 878 bool cleaned = 0; 879 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 880 881 i = rx_ring->next_to_clean; 882 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 883 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 884 buffer_info = &rx_ring->buffer_info[i]; 885 886 while (staterr & E1000_RXD_STAT_DD) { 887 struct sk_buff *skb; 888 889 if (*work_done >= work_to_do) 890 break; 891 (*work_done)++; 892 rmb(); /* read descriptor and rx_buffer_info after status DD */ 893 894 skb = buffer_info->skb; 895 buffer_info->skb = NULL; 896 897 prefetch(skb->data - NET_IP_ALIGN); 898 899 i++; 900 if (i == rx_ring->count) 901 i = 0; 902 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 903 prefetch(next_rxd); 904 905 next_buffer = &rx_ring->buffer_info[i]; 906 907 cleaned = 1; 908 cleaned_count++; 909 dma_unmap_single(&pdev->dev, 910 buffer_info->dma, 911 adapter->rx_buffer_len, 912 DMA_FROM_DEVICE); 913 buffer_info->dma = 0; 914 915 length = le16_to_cpu(rx_desc->wb.upper.length); 916 917 /* 918 * !EOP means multiple descriptors were used to store a single 919 * packet, if that's the case we need to toss it. In fact, we 920 * need to toss every packet with the EOP bit clear and the 921 * next frame that _does_ have the EOP bit set, as it is by 922 * definition only a frame fragment 923 */ 924 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) 925 adapter->flags2 |= FLAG2_IS_DISCARDING; 926 927 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 928 /* All receives must fit into a single buffer */ 929 e_dbg("Receive packet consumed multiple buffers\n"); 930 /* recycle */ 931 buffer_info->skb = skb; 932 if (staterr & E1000_RXD_STAT_EOP) 933 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 934 goto next_desc; 935 } 936 937 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 938 /* recycle */ 939 buffer_info->skb = skb; 940 goto next_desc; 941 } 942 943 /* adjust length to remove Ethernet CRC */ 944 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) 945 length -= 4; 946 947 total_rx_bytes += length; 948 total_rx_packets++; 949 950 /* 951 * code added for copybreak, this should improve 952 * performance for small packets with large amounts 953 * of reassembly being done in the stack 954 */ 955 if (length < copybreak) { 956 struct sk_buff *new_skb = 957 netdev_alloc_skb_ip_align(netdev, length); 958 if (new_skb) { 959 skb_copy_to_linear_data_offset(new_skb, 960 -NET_IP_ALIGN, 961 (skb->data - 962 NET_IP_ALIGN), 963 (length + 964 NET_IP_ALIGN)); 965 /* save the skb in buffer_info as good */ 966 buffer_info->skb = skb; 967 skb = new_skb; 968 } 969 /* else just continue with the old one */ 970 } 971 /* end copybreak code */ 972 skb_put(skb, length); 973 974 /* Receive Checksum Offload */ 975 e1000_rx_checksum(adapter, staterr, 976 le16_to_cpu(rx_desc->wb.lower.hi_dword. 977 csum_ip.csum), skb); 978 979 e1000_receive_skb(adapter, netdev, skb, staterr, 980 rx_desc->wb.upper.vlan); 981 982 next_desc: 983 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 984 985 /* return some buffers to hardware, one at a time is too slow */ 986 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 987 adapter->alloc_rx_buf(adapter, cleaned_count, 988 GFP_ATOMIC); 989 cleaned_count = 0; 990 } 991 992 /* use prefetched values */ 993 rx_desc = next_rxd; 994 buffer_info = next_buffer; 995 996 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 997 } 998 rx_ring->next_to_clean = i; 999 1000 cleaned_count = e1000_desc_unused(rx_ring); 1001 if (cleaned_count) 1002 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); 1003 1004 adapter->total_rx_bytes += total_rx_bytes; 1005 adapter->total_rx_packets += total_rx_packets; 1006 return cleaned; 1007 } 1008 1009 static void e1000_put_txbuf(struct e1000_adapter *adapter, 1010 struct e1000_buffer *buffer_info) 1011 { 1012 if (buffer_info->dma) { 1013 if (buffer_info->mapped_as_page) 1014 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1015 buffer_info->length, DMA_TO_DEVICE); 1016 else 1017 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1018 buffer_info->length, DMA_TO_DEVICE); 1019 buffer_info->dma = 0; 1020 } 1021 if (buffer_info->skb) { 1022 dev_kfree_skb_any(buffer_info->skb); 1023 buffer_info->skb = NULL; 1024 } 1025 buffer_info->time_stamp = 0; 1026 } 1027 1028 static void e1000_print_hw_hang(struct work_struct *work) 1029 { 1030 struct e1000_adapter *adapter = container_of(work, 1031 struct e1000_adapter, 1032 print_hang_task); 1033 struct e1000_ring *tx_ring = adapter->tx_ring; 1034 unsigned int i = tx_ring->next_to_clean; 1035 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 1036 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 1037 struct e1000_hw *hw = &adapter->hw; 1038 u16 phy_status, phy_1000t_status, phy_ext_status; 1039 u16 pci_status; 1040 1041 if (test_bit(__E1000_DOWN, &adapter->state)) 1042 return; 1043 1044 e1e_rphy(hw, PHY_STATUS, &phy_status); 1045 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 1046 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); 1047 1048 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); 1049 1050 /* detected Hardware unit hang */ 1051 e_err("Detected Hardware Unit Hang:\n" 1052 " TDH <%x>\n" 1053 " TDT <%x>\n" 1054 " next_to_use <%x>\n" 1055 " next_to_clean <%x>\n" 1056 "buffer_info[next_to_clean]:\n" 1057 " time_stamp <%lx>\n" 1058 " next_to_watch <%x>\n" 1059 " jiffies <%lx>\n" 1060 " next_to_watch.status <%x>\n" 1061 "MAC Status <%x>\n" 1062 "PHY Status <%x>\n" 1063 "PHY 1000BASE-T Status <%x>\n" 1064 "PHY Extended Status <%x>\n" 1065 "PCI Status <%x>\n", 1066 readl(adapter->hw.hw_addr + tx_ring->head), 1067 readl(adapter->hw.hw_addr + tx_ring->tail), 1068 tx_ring->next_to_use, 1069 tx_ring->next_to_clean, 1070 tx_ring->buffer_info[eop].time_stamp, 1071 eop, 1072 jiffies, 1073 eop_desc->upper.fields.status, 1074 er32(STATUS), 1075 phy_status, 1076 phy_1000t_status, 1077 phy_ext_status, 1078 pci_status); 1079 } 1080 1081 /** 1082 * e1000_clean_tx_irq - Reclaim resources after transmit completes 1083 * @adapter: board private structure 1084 * 1085 * the return value indicates whether actual cleaning was done, there 1086 * is no guarantee that everything was cleaned 1087 **/ 1088 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) 1089 { 1090 struct net_device *netdev = adapter->netdev; 1091 struct e1000_hw *hw = &adapter->hw; 1092 struct e1000_ring *tx_ring = adapter->tx_ring; 1093 struct e1000_tx_desc *tx_desc, *eop_desc; 1094 struct e1000_buffer *buffer_info; 1095 unsigned int i, eop; 1096 unsigned int count = 0; 1097 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 1098 1099 i = tx_ring->next_to_clean; 1100 eop = tx_ring->buffer_info[i].next_to_watch; 1101 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1102 1103 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 1104 (count < tx_ring->count)) { 1105 bool cleaned = false; 1106 rmb(); /* read buffer_info after eop_desc */ 1107 for (; !cleaned; count++) { 1108 tx_desc = E1000_TX_DESC(*tx_ring, i); 1109 buffer_info = &tx_ring->buffer_info[i]; 1110 cleaned = (i == eop); 1111 1112 if (cleaned) { 1113 total_tx_packets += buffer_info->segs; 1114 total_tx_bytes += buffer_info->bytecount; 1115 } 1116 1117 e1000_put_txbuf(adapter, buffer_info); 1118 tx_desc->upper.data = 0; 1119 1120 i++; 1121 if (i == tx_ring->count) 1122 i = 0; 1123 } 1124 1125 if (i == tx_ring->next_to_use) 1126 break; 1127 eop = tx_ring->buffer_info[i].next_to_watch; 1128 eop_desc = E1000_TX_DESC(*tx_ring, eop); 1129 } 1130 1131 tx_ring->next_to_clean = i; 1132 1133 #define TX_WAKE_THRESHOLD 32 1134 if (count && netif_carrier_ok(netdev) && 1135 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { 1136 /* Make sure that anybody stopping the queue after this 1137 * sees the new next_to_clean. 1138 */ 1139 smp_mb(); 1140 1141 if (netif_queue_stopped(netdev) && 1142 !(test_bit(__E1000_DOWN, &adapter->state))) { 1143 netif_wake_queue(netdev); 1144 ++adapter->restart_queue; 1145 } 1146 } 1147 1148 if (adapter->detect_tx_hung) { 1149 /* 1150 * Detect a transmit hang in hardware, this serializes the 1151 * check with the clearing of time_stamp and movement of i 1152 */ 1153 adapter->detect_tx_hung = 0; 1154 if (tx_ring->buffer_info[i].time_stamp && 1155 time_after(jiffies, tx_ring->buffer_info[i].time_stamp 1156 + (adapter->tx_timeout_factor * HZ)) && 1157 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 1158 schedule_work(&adapter->print_hang_task); 1159 netif_stop_queue(netdev); 1160 } 1161 } 1162 adapter->total_tx_bytes += total_tx_bytes; 1163 adapter->total_tx_packets += total_tx_packets; 1164 return count < tx_ring->count; 1165 } 1166 1167 /** 1168 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 1169 * @adapter: board private structure 1170 * 1171 * the return value indicates whether actual cleaning was done, there 1172 * is no guarantee that everything was cleaned 1173 **/ 1174 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 1175 int *work_done, int work_to_do) 1176 { 1177 struct e1000_hw *hw = &adapter->hw; 1178 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 1179 struct net_device *netdev = adapter->netdev; 1180 struct pci_dev *pdev = adapter->pdev; 1181 struct e1000_ring *rx_ring = adapter->rx_ring; 1182 struct e1000_buffer *buffer_info, *next_buffer; 1183 struct e1000_ps_page *ps_page; 1184 struct sk_buff *skb; 1185 unsigned int i, j; 1186 u32 length, staterr; 1187 int cleaned_count = 0; 1188 bool cleaned = 0; 1189 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1190 1191 i = rx_ring->next_to_clean; 1192 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 1193 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1194 buffer_info = &rx_ring->buffer_info[i]; 1195 1196 while (staterr & E1000_RXD_STAT_DD) { 1197 if (*work_done >= work_to_do) 1198 break; 1199 (*work_done)++; 1200 skb = buffer_info->skb; 1201 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1202 1203 /* in the packet split case this is header only */ 1204 prefetch(skb->data - NET_IP_ALIGN); 1205 1206 i++; 1207 if (i == rx_ring->count) 1208 i = 0; 1209 next_rxd = E1000_RX_DESC_PS(*rx_ring, i); 1210 prefetch(next_rxd); 1211 1212 next_buffer = &rx_ring->buffer_info[i]; 1213 1214 cleaned = 1; 1215 cleaned_count++; 1216 dma_unmap_single(&pdev->dev, buffer_info->dma, 1217 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); 1218 buffer_info->dma = 0; 1219 1220 /* see !EOP comment in other Rx routine */ 1221 if (!(staterr & E1000_RXD_STAT_EOP)) 1222 adapter->flags2 |= FLAG2_IS_DISCARDING; 1223 1224 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 1225 e_dbg("Packet Split buffers didn't pick up the full " 1226 "packet\n"); 1227 dev_kfree_skb_irq(skb); 1228 if (staterr & E1000_RXD_STAT_EOP) 1229 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1230 goto next_desc; 1231 } 1232 1233 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 1234 dev_kfree_skb_irq(skb); 1235 goto next_desc; 1236 } 1237 1238 length = le16_to_cpu(rx_desc->wb.middle.length0); 1239 1240 if (!length) { 1241 e_dbg("Last part of the packet spanning multiple " 1242 "descriptors\n"); 1243 dev_kfree_skb_irq(skb); 1244 goto next_desc; 1245 } 1246 1247 /* Good Receive */ 1248 skb_put(skb, length); 1249 1250 { 1251 /* 1252 * this looks ugly, but it seems compiler issues make it 1253 * more efficient than reusing j 1254 */ 1255 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 1256 1257 /* 1258 * page alloc/put takes too long and effects small packet 1259 * throughput, so unsplit small packets and save the alloc/put 1260 * only valid in softirq (napi) context to call kmap_* 1261 */ 1262 if (l1 && (l1 <= copybreak) && 1263 ((length + l1) <= adapter->rx_ps_bsize0)) { 1264 u8 *vaddr; 1265 1266 ps_page = &buffer_info->ps_pages[0]; 1267 1268 /* 1269 * there is no documentation about how to call 1270 * kmap_atomic, so we can't hold the mapping 1271 * very long 1272 */ 1273 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, 1274 PAGE_SIZE, DMA_FROM_DEVICE); 1275 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); 1276 memcpy(skb_tail_pointer(skb), vaddr, l1); 1277 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 1278 dma_sync_single_for_device(&pdev->dev, ps_page->dma, 1279 PAGE_SIZE, DMA_FROM_DEVICE); 1280 1281 /* remove the CRC */ 1282 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) 1283 l1 -= 4; 1284 1285 skb_put(skb, l1); 1286 goto copydone; 1287 } /* if */ 1288 } 1289 1290 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1291 length = le16_to_cpu(rx_desc->wb.upper.length[j]); 1292 if (!length) 1293 break; 1294 1295 ps_page = &buffer_info->ps_pages[j]; 1296 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1297 DMA_FROM_DEVICE); 1298 ps_page->dma = 0; 1299 skb_fill_page_desc(skb, j, ps_page->page, 0, length); 1300 ps_page->page = NULL; 1301 skb->len += length; 1302 skb->data_len += length; 1303 skb->truesize += PAGE_SIZE; 1304 } 1305 1306 /* strip the ethernet crc, problem is we're using pages now so 1307 * this whole operation can get a little cpu intensive 1308 */ 1309 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) 1310 pskb_trim(skb, skb->len - 4); 1311 1312 copydone: 1313 total_rx_bytes += skb->len; 1314 total_rx_packets++; 1315 1316 e1000_rx_checksum(adapter, staterr, le16_to_cpu( 1317 rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); 1318 1319 if (rx_desc->wb.upper.header_status & 1320 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) 1321 adapter->rx_hdr_split++; 1322 1323 e1000_receive_skb(adapter, netdev, skb, 1324 staterr, rx_desc->wb.middle.vlan); 1325 1326 next_desc: 1327 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); 1328 buffer_info->skb = NULL; 1329 1330 /* return some buffers to hardware, one at a time is too slow */ 1331 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 1332 adapter->alloc_rx_buf(adapter, cleaned_count, 1333 GFP_ATOMIC); 1334 cleaned_count = 0; 1335 } 1336 1337 /* use prefetched values */ 1338 rx_desc = next_rxd; 1339 buffer_info = next_buffer; 1340 1341 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 1342 } 1343 rx_ring->next_to_clean = i; 1344 1345 cleaned_count = e1000_desc_unused(rx_ring); 1346 if (cleaned_count) 1347 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); 1348 1349 adapter->total_rx_bytes += total_rx_bytes; 1350 adapter->total_rx_packets += total_rx_packets; 1351 return cleaned; 1352 } 1353 1354 /** 1355 * e1000_consume_page - helper function 1356 **/ 1357 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 1358 u16 length) 1359 { 1360 bi->page = NULL; 1361 skb->len += length; 1362 skb->data_len += length; 1363 skb->truesize += PAGE_SIZE; 1364 } 1365 1366 /** 1367 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 1368 * @adapter: board private structure 1369 * 1370 * the return value indicates whether actual cleaning was done, there 1371 * is no guarantee that everything was cleaned 1372 **/ 1373 1374 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 1375 int *work_done, int work_to_do) 1376 { 1377 struct net_device *netdev = adapter->netdev; 1378 struct pci_dev *pdev = adapter->pdev; 1379 struct e1000_ring *rx_ring = adapter->rx_ring; 1380 union e1000_rx_desc_extended *rx_desc, *next_rxd; 1381 struct e1000_buffer *buffer_info, *next_buffer; 1382 u32 length, staterr; 1383 unsigned int i; 1384 int cleaned_count = 0; 1385 bool cleaned = false; 1386 unsigned int total_rx_bytes=0, total_rx_packets=0; 1387 1388 i = rx_ring->next_to_clean; 1389 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 1390 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1391 buffer_info = &rx_ring->buffer_info[i]; 1392 1393 while (staterr & E1000_RXD_STAT_DD) { 1394 struct sk_buff *skb; 1395 1396 if (*work_done >= work_to_do) 1397 break; 1398 (*work_done)++; 1399 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1400 1401 skb = buffer_info->skb; 1402 buffer_info->skb = NULL; 1403 1404 ++i; 1405 if (i == rx_ring->count) 1406 i = 0; 1407 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); 1408 prefetch(next_rxd); 1409 1410 next_buffer = &rx_ring->buffer_info[i]; 1411 1412 cleaned = true; 1413 cleaned_count++; 1414 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, 1415 DMA_FROM_DEVICE); 1416 buffer_info->dma = 0; 1417 1418 length = le16_to_cpu(rx_desc->wb.upper.length); 1419 1420 /* errors is only valid for DD + EOP descriptors */ 1421 if (unlikely((staterr & E1000_RXD_STAT_EOP) && 1422 (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) { 1423 /* recycle both page and skb */ 1424 buffer_info->skb = skb; 1425 /* an error means any chain goes out the window too */ 1426 if (rx_ring->rx_skb_top) 1427 dev_kfree_skb_irq(rx_ring->rx_skb_top); 1428 rx_ring->rx_skb_top = NULL; 1429 goto next_desc; 1430 } 1431 1432 #define rxtop (rx_ring->rx_skb_top) 1433 if (!(staterr & E1000_RXD_STAT_EOP)) { 1434 /* this descriptor is only the beginning (or middle) */ 1435 if (!rxtop) { 1436 /* this is the beginning of a chain */ 1437 rxtop = skb; 1438 skb_fill_page_desc(rxtop, 0, buffer_info->page, 1439 0, length); 1440 } else { 1441 /* this is the middle of a chain */ 1442 skb_fill_page_desc(rxtop, 1443 skb_shinfo(rxtop)->nr_frags, 1444 buffer_info->page, 0, length); 1445 /* re-use the skb, only consumed the page */ 1446 buffer_info->skb = skb; 1447 } 1448 e1000_consume_page(buffer_info, rxtop, length); 1449 goto next_desc; 1450 } else { 1451 if (rxtop) { 1452 /* end of the chain */ 1453 skb_fill_page_desc(rxtop, 1454 skb_shinfo(rxtop)->nr_frags, 1455 buffer_info->page, 0, length); 1456 /* re-use the current skb, we only consumed the 1457 * page */ 1458 buffer_info->skb = skb; 1459 skb = rxtop; 1460 rxtop = NULL; 1461 e1000_consume_page(buffer_info, skb, length); 1462 } else { 1463 /* no chain, got EOP, this buf is the packet 1464 * copybreak to save the put_page/alloc_page */ 1465 if (length <= copybreak && 1466 skb_tailroom(skb) >= length) { 1467 u8 *vaddr; 1468 vaddr = kmap_atomic(buffer_info->page, 1469 KM_SKB_DATA_SOFTIRQ); 1470 memcpy(skb_tail_pointer(skb), vaddr, 1471 length); 1472 kunmap_atomic(vaddr, 1473 KM_SKB_DATA_SOFTIRQ); 1474 /* re-use the page, so don't erase 1475 * buffer_info->page */ 1476 skb_put(skb, length); 1477 } else { 1478 skb_fill_page_desc(skb, 0, 1479 buffer_info->page, 0, 1480 length); 1481 e1000_consume_page(buffer_info, skb, 1482 length); 1483 } 1484 } 1485 } 1486 1487 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 1488 e1000_rx_checksum(adapter, staterr, 1489 le16_to_cpu(rx_desc->wb.lower.hi_dword. 1490 csum_ip.csum), skb); 1491 1492 /* probably a little skewed due to removing CRC */ 1493 total_rx_bytes += skb->len; 1494 total_rx_packets++; 1495 1496 /* eth type trans needs skb->data to point to something */ 1497 if (!pskb_may_pull(skb, ETH_HLEN)) { 1498 e_err("pskb_may_pull failed.\n"); 1499 dev_kfree_skb_irq(skb); 1500 goto next_desc; 1501 } 1502 1503 e1000_receive_skb(adapter, netdev, skb, staterr, 1504 rx_desc->wb.upper.vlan); 1505 1506 next_desc: 1507 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); 1508 1509 /* return some buffers to hardware, one at a time is too slow */ 1510 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 1511 adapter->alloc_rx_buf(adapter, cleaned_count, 1512 GFP_ATOMIC); 1513 cleaned_count = 0; 1514 } 1515 1516 /* use prefetched values */ 1517 rx_desc = next_rxd; 1518 buffer_info = next_buffer; 1519 1520 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1521 } 1522 rx_ring->next_to_clean = i; 1523 1524 cleaned_count = e1000_desc_unused(rx_ring); 1525 if (cleaned_count) 1526 adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); 1527 1528 adapter->total_rx_bytes += total_rx_bytes; 1529 adapter->total_rx_packets += total_rx_packets; 1530 return cleaned; 1531 } 1532 1533 /** 1534 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1535 * @adapter: board private structure 1536 **/ 1537 static void e1000_clean_rx_ring(struct e1000_adapter *adapter) 1538 { 1539 struct e1000_ring *rx_ring = adapter->rx_ring; 1540 struct e1000_buffer *buffer_info; 1541 struct e1000_ps_page *ps_page; 1542 struct pci_dev *pdev = adapter->pdev; 1543 unsigned int i, j; 1544 1545 /* Free all the Rx ring sk_buffs */ 1546 for (i = 0; i < rx_ring->count; i++) { 1547 buffer_info = &rx_ring->buffer_info[i]; 1548 if (buffer_info->dma) { 1549 if (adapter->clean_rx == e1000_clean_rx_irq) 1550 dma_unmap_single(&pdev->dev, buffer_info->dma, 1551 adapter->rx_buffer_len, 1552 DMA_FROM_DEVICE); 1553 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1554 dma_unmap_page(&pdev->dev, buffer_info->dma, 1555 PAGE_SIZE, 1556 DMA_FROM_DEVICE); 1557 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1558 dma_unmap_single(&pdev->dev, buffer_info->dma, 1559 adapter->rx_ps_bsize0, 1560 DMA_FROM_DEVICE); 1561 buffer_info->dma = 0; 1562 } 1563 1564 if (buffer_info->page) { 1565 put_page(buffer_info->page); 1566 buffer_info->page = NULL; 1567 } 1568 1569 if (buffer_info->skb) { 1570 dev_kfree_skb(buffer_info->skb); 1571 buffer_info->skb = NULL; 1572 } 1573 1574 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1575 ps_page = &buffer_info->ps_pages[j]; 1576 if (!ps_page->page) 1577 break; 1578 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, 1579 DMA_FROM_DEVICE); 1580 ps_page->dma = 0; 1581 put_page(ps_page->page); 1582 ps_page->page = NULL; 1583 } 1584 } 1585 1586 /* there also may be some cached data from a chained receive */ 1587 if (rx_ring->rx_skb_top) { 1588 dev_kfree_skb(rx_ring->rx_skb_top); 1589 rx_ring->rx_skb_top = NULL; 1590 } 1591 1592 /* Zero out the descriptor ring */ 1593 memset(rx_ring->desc, 0, rx_ring->size); 1594 1595 rx_ring->next_to_clean = 0; 1596 rx_ring->next_to_use = 0; 1597 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1598 1599 writel(0, adapter->hw.hw_addr + rx_ring->head); 1600 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1601 } 1602 1603 static void e1000e_downshift_workaround(struct work_struct *work) 1604 { 1605 struct e1000_adapter *adapter = container_of(work, 1606 struct e1000_adapter, downshift_task); 1607 1608 if (test_bit(__E1000_DOWN, &adapter->state)) 1609 return; 1610 1611 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1612 } 1613 1614 /** 1615 * e1000_intr_msi - Interrupt Handler 1616 * @irq: interrupt number 1617 * @data: pointer to a network interface device structure 1618 **/ 1619 static irqreturn_t e1000_intr_msi(int irq, void *data) 1620 { 1621 struct net_device *netdev = data; 1622 struct e1000_adapter *adapter = netdev_priv(netdev); 1623 struct e1000_hw *hw = &adapter->hw; 1624 u32 icr = er32(ICR); 1625 1626 /* 1627 * read ICR disables interrupts using IAM 1628 */ 1629 1630 if (icr & E1000_ICR_LSC) { 1631 hw->mac.get_link_status = 1; 1632 /* 1633 * ICH8 workaround-- Call gig speed drop workaround on cable 1634 * disconnect (LSC) before accessing any PHY registers 1635 */ 1636 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1637 (!(er32(STATUS) & E1000_STATUS_LU))) 1638 schedule_work(&adapter->downshift_task); 1639 1640 /* 1641 * 80003ES2LAN workaround-- For packet buffer work-around on 1642 * link down event; disable receives here in the ISR and reset 1643 * adapter in watchdog 1644 */ 1645 if (netif_carrier_ok(netdev) && 1646 adapter->flags & FLAG_RX_NEEDS_RESTART) { 1647 /* disable receives */ 1648 u32 rctl = er32(RCTL); 1649 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1650 adapter->flags |= FLAG_RX_RESTART_NOW; 1651 } 1652 /* guard against interrupt when we're going down */ 1653 if (!test_bit(__E1000_DOWN, &adapter->state)) 1654 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1655 } 1656 1657 if (napi_schedule_prep(&adapter->napi)) { 1658 adapter->total_tx_bytes = 0; 1659 adapter->total_tx_packets = 0; 1660 adapter->total_rx_bytes = 0; 1661 adapter->total_rx_packets = 0; 1662 __napi_schedule(&adapter->napi); 1663 } 1664 1665 return IRQ_HANDLED; 1666 } 1667 1668 /** 1669 * e1000_intr - Interrupt Handler 1670 * @irq: interrupt number 1671 * @data: pointer to a network interface device structure 1672 **/ 1673 static irqreturn_t e1000_intr(int irq, void *data) 1674 { 1675 struct net_device *netdev = data; 1676 struct e1000_adapter *adapter = netdev_priv(netdev); 1677 struct e1000_hw *hw = &adapter->hw; 1678 u32 rctl, icr = er32(ICR); 1679 1680 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) 1681 return IRQ_NONE; /* Not our interrupt */ 1682 1683 /* 1684 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is 1685 * not set, then the adapter didn't send an interrupt 1686 */ 1687 if (!(icr & E1000_ICR_INT_ASSERTED)) 1688 return IRQ_NONE; 1689 1690 /* 1691 * Interrupt Auto-Mask...upon reading ICR, 1692 * interrupts are masked. No need for the 1693 * IMC write 1694 */ 1695 1696 if (icr & E1000_ICR_LSC) { 1697 hw->mac.get_link_status = 1; 1698 /* 1699 * ICH8 workaround-- Call gig speed drop workaround on cable 1700 * disconnect (LSC) before accessing any PHY registers 1701 */ 1702 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1703 (!(er32(STATUS) & E1000_STATUS_LU))) 1704 schedule_work(&adapter->downshift_task); 1705 1706 /* 1707 * 80003ES2LAN workaround-- 1708 * For packet buffer work-around on link down event; 1709 * disable receives here in the ISR and 1710 * reset adapter in watchdog 1711 */ 1712 if (netif_carrier_ok(netdev) && 1713 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { 1714 /* disable receives */ 1715 rctl = er32(RCTL); 1716 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1717 adapter->flags |= FLAG_RX_RESTART_NOW; 1718 } 1719 /* guard against interrupt when we're going down */ 1720 if (!test_bit(__E1000_DOWN, &adapter->state)) 1721 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1722 } 1723 1724 if (napi_schedule_prep(&adapter->napi)) { 1725 adapter->total_tx_bytes = 0; 1726 adapter->total_tx_packets = 0; 1727 adapter->total_rx_bytes = 0; 1728 adapter->total_rx_packets = 0; 1729 __napi_schedule(&adapter->napi); 1730 } 1731 1732 return IRQ_HANDLED; 1733 } 1734 1735 static irqreturn_t e1000_msix_other(int irq, void *data) 1736 { 1737 struct net_device *netdev = data; 1738 struct e1000_adapter *adapter = netdev_priv(netdev); 1739 struct e1000_hw *hw = &adapter->hw; 1740 u32 icr = er32(ICR); 1741 1742 if (!(icr & E1000_ICR_INT_ASSERTED)) { 1743 if (!test_bit(__E1000_DOWN, &adapter->state)) 1744 ew32(IMS, E1000_IMS_OTHER); 1745 return IRQ_NONE; 1746 } 1747 1748 if (icr & adapter->eiac_mask) 1749 ew32(ICS, (icr & adapter->eiac_mask)); 1750 1751 if (icr & E1000_ICR_OTHER) { 1752 if (!(icr & E1000_ICR_LSC)) 1753 goto no_link_interrupt; 1754 hw->mac.get_link_status = 1; 1755 /* guard against interrupt when we're going down */ 1756 if (!test_bit(__E1000_DOWN, &adapter->state)) 1757 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1758 } 1759 1760 no_link_interrupt: 1761 if (!test_bit(__E1000_DOWN, &adapter->state)) 1762 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); 1763 1764 return IRQ_HANDLED; 1765 } 1766 1767 1768 static irqreturn_t e1000_intr_msix_tx(int irq, void *data) 1769 { 1770 struct net_device *netdev = data; 1771 struct e1000_adapter *adapter = netdev_priv(netdev); 1772 struct e1000_hw *hw = &adapter->hw; 1773 struct e1000_ring *tx_ring = adapter->tx_ring; 1774 1775 1776 adapter->total_tx_bytes = 0; 1777 adapter->total_tx_packets = 0; 1778 1779 if (!e1000_clean_tx_irq(adapter)) 1780 /* Ring was not completely cleaned, so fire another interrupt */ 1781 ew32(ICS, tx_ring->ims_val); 1782 1783 return IRQ_HANDLED; 1784 } 1785 1786 static irqreturn_t e1000_intr_msix_rx(int irq, void *data) 1787 { 1788 struct net_device *netdev = data; 1789 struct e1000_adapter *adapter = netdev_priv(netdev); 1790 1791 /* Write the ITR value calculated at the end of the 1792 * previous interrupt. 1793 */ 1794 if (adapter->rx_ring->set_itr) { 1795 writel(1000000000 / (adapter->rx_ring->itr_val * 256), 1796 adapter->hw.hw_addr + adapter->rx_ring->itr_register); 1797 adapter->rx_ring->set_itr = 0; 1798 } 1799 1800 if (napi_schedule_prep(&adapter->napi)) { 1801 adapter->total_rx_bytes = 0; 1802 adapter->total_rx_packets = 0; 1803 __napi_schedule(&adapter->napi); 1804 } 1805 return IRQ_HANDLED; 1806 } 1807 1808 /** 1809 * e1000_configure_msix - Configure MSI-X hardware 1810 * 1811 * e1000_configure_msix sets up the hardware to properly 1812 * generate MSI-X interrupts. 1813 **/ 1814 static void e1000_configure_msix(struct e1000_adapter *adapter) 1815 { 1816 struct e1000_hw *hw = &adapter->hw; 1817 struct e1000_ring *rx_ring = adapter->rx_ring; 1818 struct e1000_ring *tx_ring = adapter->tx_ring; 1819 int vector = 0; 1820 u32 ctrl_ext, ivar = 0; 1821 1822 adapter->eiac_mask = 0; 1823 1824 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ 1825 if (hw->mac.type == e1000_82574) { 1826 u32 rfctl = er32(RFCTL); 1827 rfctl |= E1000_RFCTL_ACK_DIS; 1828 ew32(RFCTL, rfctl); 1829 } 1830 1831 #define E1000_IVAR_INT_ALLOC_VALID 0x8 1832 /* Configure Rx vector */ 1833 rx_ring->ims_val = E1000_IMS_RXQ0; 1834 adapter->eiac_mask |= rx_ring->ims_val; 1835 if (rx_ring->itr_val) 1836 writel(1000000000 / (rx_ring->itr_val * 256), 1837 hw->hw_addr + rx_ring->itr_register); 1838 else 1839 writel(1, hw->hw_addr + rx_ring->itr_register); 1840 ivar = E1000_IVAR_INT_ALLOC_VALID | vector; 1841 1842 /* Configure Tx vector */ 1843 tx_ring->ims_val = E1000_IMS_TXQ0; 1844 vector++; 1845 if (tx_ring->itr_val) 1846 writel(1000000000 / (tx_ring->itr_val * 256), 1847 hw->hw_addr + tx_ring->itr_register); 1848 else 1849 writel(1, hw->hw_addr + tx_ring->itr_register); 1850 adapter->eiac_mask |= tx_ring->ims_val; 1851 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); 1852 1853 /* set vector for Other Causes, e.g. link changes */ 1854 vector++; 1855 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); 1856 if (rx_ring->itr_val) 1857 writel(1000000000 / (rx_ring->itr_val * 256), 1858 hw->hw_addr + E1000_EITR_82574(vector)); 1859 else 1860 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); 1861 1862 /* Cause Tx interrupts on every write back */ 1863 ivar |= (1 << 31); 1864 1865 ew32(IVAR, ivar); 1866 1867 /* enable MSI-X PBA support */ 1868 ctrl_ext = er32(CTRL_EXT); 1869 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; 1870 1871 /* Auto-Mask Other interrupts upon ICR read */ 1872 #define E1000_EIAC_MASK_82574 0x01F00000 1873 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); 1874 ctrl_ext |= E1000_CTRL_EXT_EIAME; 1875 ew32(CTRL_EXT, ctrl_ext); 1876 e1e_flush(); 1877 } 1878 1879 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) 1880 { 1881 if (adapter->msix_entries) { 1882 pci_disable_msix(adapter->pdev); 1883 kfree(adapter->msix_entries); 1884 adapter->msix_entries = NULL; 1885 } else if (adapter->flags & FLAG_MSI_ENABLED) { 1886 pci_disable_msi(adapter->pdev); 1887 adapter->flags &= ~FLAG_MSI_ENABLED; 1888 } 1889 } 1890 1891 /** 1892 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported 1893 * 1894 * Attempt to configure interrupts using the best available 1895 * capabilities of the hardware and kernel. 1896 **/ 1897 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) 1898 { 1899 int err; 1900 int i; 1901 1902 switch (adapter->int_mode) { 1903 case E1000E_INT_MODE_MSIX: 1904 if (adapter->flags & FLAG_HAS_MSIX) { 1905 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ 1906 adapter->msix_entries = kcalloc(adapter->num_vectors, 1907 sizeof(struct msix_entry), 1908 GFP_KERNEL); 1909 if (adapter->msix_entries) { 1910 for (i = 0; i < adapter->num_vectors; i++) 1911 adapter->msix_entries[i].entry = i; 1912 1913 err = pci_enable_msix(adapter->pdev, 1914 adapter->msix_entries, 1915 adapter->num_vectors); 1916 if (err == 0) 1917 return; 1918 } 1919 /* MSI-X failed, so fall through and try MSI */ 1920 e_err("Failed to initialize MSI-X interrupts. " 1921 "Falling back to MSI interrupts.\n"); 1922 e1000e_reset_interrupt_capability(adapter); 1923 } 1924 adapter->int_mode = E1000E_INT_MODE_MSI; 1925 /* Fall through */ 1926 case E1000E_INT_MODE_MSI: 1927 if (!pci_enable_msi(adapter->pdev)) { 1928 adapter->flags |= FLAG_MSI_ENABLED; 1929 } else { 1930 adapter->int_mode = E1000E_INT_MODE_LEGACY; 1931 e_err("Failed to initialize MSI interrupts. Falling " 1932 "back to legacy interrupts.\n"); 1933 } 1934 /* Fall through */ 1935 case E1000E_INT_MODE_LEGACY: 1936 /* Don't do anything; this is the system default */ 1937 break; 1938 } 1939 1940 /* store the number of vectors being used */ 1941 adapter->num_vectors = 1; 1942 } 1943 1944 /** 1945 * e1000_request_msix - Initialize MSI-X interrupts 1946 * 1947 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the 1948 * kernel. 1949 **/ 1950 static int e1000_request_msix(struct e1000_adapter *adapter) 1951 { 1952 struct net_device *netdev = adapter->netdev; 1953 int err = 0, vector = 0; 1954 1955 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1956 snprintf(adapter->rx_ring->name, 1957 sizeof(adapter->rx_ring->name) - 1, 1958 "%s-rx-0", netdev->name); 1959 else 1960 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1961 err = request_irq(adapter->msix_entries[vector].vector, 1962 e1000_intr_msix_rx, 0, adapter->rx_ring->name, 1963 netdev); 1964 if (err) 1965 goto out; 1966 adapter->rx_ring->itr_register = E1000_EITR_82574(vector); 1967 adapter->rx_ring->itr_val = adapter->itr; 1968 vector++; 1969 1970 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1971 snprintf(adapter->tx_ring->name, 1972 sizeof(adapter->tx_ring->name) - 1, 1973 "%s-tx-0", netdev->name); 1974 else 1975 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1976 err = request_irq(adapter->msix_entries[vector].vector, 1977 e1000_intr_msix_tx, 0, adapter->tx_ring->name, 1978 netdev); 1979 if (err) 1980 goto out; 1981 adapter->tx_ring->itr_register = E1000_EITR_82574(vector); 1982 adapter->tx_ring->itr_val = adapter->itr; 1983 vector++; 1984 1985 err = request_irq(adapter->msix_entries[vector].vector, 1986 e1000_msix_other, 0, netdev->name, netdev); 1987 if (err) 1988 goto out; 1989 1990 e1000_configure_msix(adapter); 1991 return 0; 1992 out: 1993 return err; 1994 } 1995 1996 /** 1997 * e1000_request_irq - initialize interrupts 1998 * 1999 * Attempts to configure interrupts using the best available 2000 * capabilities of the hardware and kernel. 2001 **/ 2002 static int e1000_request_irq(struct e1000_adapter *adapter) 2003 { 2004 struct net_device *netdev = adapter->netdev; 2005 int err; 2006 2007 if (adapter->msix_entries) { 2008 err = e1000_request_msix(adapter); 2009 if (!err) 2010 return err; 2011 /* fall back to MSI */ 2012 e1000e_reset_interrupt_capability(adapter); 2013 adapter->int_mode = E1000E_INT_MODE_MSI; 2014 e1000e_set_interrupt_capability(adapter); 2015 } 2016 if (adapter->flags & FLAG_MSI_ENABLED) { 2017 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, 2018 netdev->name, netdev); 2019 if (!err) 2020 return err; 2021 2022 /* fall back to legacy interrupt */ 2023 e1000e_reset_interrupt_capability(adapter); 2024 adapter->int_mode = E1000E_INT_MODE_LEGACY; 2025 } 2026 2027 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, 2028 netdev->name, netdev); 2029 if (err) 2030 e_err("Unable to allocate interrupt, Error: %d\n", err); 2031 2032 return err; 2033 } 2034 2035 static void e1000_free_irq(struct e1000_adapter *adapter) 2036 { 2037 struct net_device *netdev = adapter->netdev; 2038 2039 if (adapter->msix_entries) { 2040 int vector = 0; 2041 2042 free_irq(adapter->msix_entries[vector].vector, netdev); 2043 vector++; 2044 2045 free_irq(adapter->msix_entries[vector].vector, netdev); 2046 vector++; 2047 2048 /* Other Causes interrupt vector */ 2049 free_irq(adapter->msix_entries[vector].vector, netdev); 2050 return; 2051 } 2052 2053 free_irq(adapter->pdev->irq, netdev); 2054 } 2055 2056 /** 2057 * e1000_irq_disable - Mask off interrupt generation on the NIC 2058 **/ 2059 static void e1000_irq_disable(struct e1000_adapter *adapter) 2060 { 2061 struct e1000_hw *hw = &adapter->hw; 2062 2063 ew32(IMC, ~0); 2064 if (adapter->msix_entries) 2065 ew32(EIAC_82574, 0); 2066 e1e_flush(); 2067 2068 if (adapter->msix_entries) { 2069 int i; 2070 for (i = 0; i < adapter->num_vectors; i++) 2071 synchronize_irq(adapter->msix_entries[i].vector); 2072 } else { 2073 synchronize_irq(adapter->pdev->irq); 2074 } 2075 } 2076 2077 /** 2078 * e1000_irq_enable - Enable default interrupt generation settings 2079 **/ 2080 static void e1000_irq_enable(struct e1000_adapter *adapter) 2081 { 2082 struct e1000_hw *hw = &adapter->hw; 2083 2084 if (adapter->msix_entries) { 2085 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2086 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); 2087 } else { 2088 ew32(IMS, IMS_ENABLE_MASK); 2089 } 2090 e1e_flush(); 2091 } 2092 2093 /** 2094 * e1000e_get_hw_control - get control of the h/w from f/w 2095 * @adapter: address of board private structure 2096 * 2097 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2098 * For ASF and Pass Through versions of f/w this means that 2099 * the driver is loaded. For AMT version (only with 82573) 2100 * of the f/w this means that the network i/f is open. 2101 **/ 2102 void e1000e_get_hw_control(struct e1000_adapter *adapter) 2103 { 2104 struct e1000_hw *hw = &adapter->hw; 2105 u32 ctrl_ext; 2106 u32 swsm; 2107 2108 /* Let firmware know the driver has taken over */ 2109 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2110 swsm = er32(SWSM); 2111 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 2112 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2113 ctrl_ext = er32(CTRL_EXT); 2114 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2115 } 2116 } 2117 2118 /** 2119 * e1000e_release_hw_control - release control of the h/w to f/w 2120 * @adapter: address of board private structure 2121 * 2122 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2123 * For ASF and Pass Through versions of f/w this means that the 2124 * driver is no longer loaded. For AMT version (only with 82573) i 2125 * of the f/w this means that the network i/f is closed. 2126 * 2127 **/ 2128 void e1000e_release_hw_control(struct e1000_adapter *adapter) 2129 { 2130 struct e1000_hw *hw = &adapter->hw; 2131 u32 ctrl_ext; 2132 u32 swsm; 2133 2134 /* Let firmware taken over control of h/w */ 2135 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { 2136 swsm = er32(SWSM); 2137 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 2138 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 2139 ctrl_ext = er32(CTRL_EXT); 2140 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2141 } 2142 } 2143 2144 /** 2145 * @e1000_alloc_ring - allocate memory for a ring structure 2146 **/ 2147 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, 2148 struct e1000_ring *ring) 2149 { 2150 struct pci_dev *pdev = adapter->pdev; 2151 2152 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2153 GFP_KERNEL); 2154 if (!ring->desc) 2155 return -ENOMEM; 2156 2157 return 0; 2158 } 2159 2160 /** 2161 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) 2162 * @adapter: board private structure 2163 * 2164 * Return 0 on success, negative on failure 2165 **/ 2166 int e1000e_setup_tx_resources(struct e1000_adapter *adapter) 2167 { 2168 struct e1000_ring *tx_ring = adapter->tx_ring; 2169 int err = -ENOMEM, size; 2170 2171 size = sizeof(struct e1000_buffer) * tx_ring->count; 2172 tx_ring->buffer_info = vzalloc(size); 2173 if (!tx_ring->buffer_info) 2174 goto err; 2175 2176 /* round up to nearest 4K */ 2177 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2178 tx_ring->size = ALIGN(tx_ring->size, 4096); 2179 2180 err = e1000_alloc_ring_dma(adapter, tx_ring); 2181 if (err) 2182 goto err; 2183 2184 tx_ring->next_to_use = 0; 2185 tx_ring->next_to_clean = 0; 2186 2187 return 0; 2188 err: 2189 vfree(tx_ring->buffer_info); 2190 e_err("Unable to allocate memory for the transmit descriptor ring\n"); 2191 return err; 2192 } 2193 2194 /** 2195 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) 2196 * @adapter: board private structure 2197 * 2198 * Returns 0 on success, negative on failure 2199 **/ 2200 int e1000e_setup_rx_resources(struct e1000_adapter *adapter) 2201 { 2202 struct e1000_ring *rx_ring = adapter->rx_ring; 2203 struct e1000_buffer *buffer_info; 2204 int i, size, desc_len, err = -ENOMEM; 2205 2206 size = sizeof(struct e1000_buffer) * rx_ring->count; 2207 rx_ring->buffer_info = vzalloc(size); 2208 if (!rx_ring->buffer_info) 2209 goto err; 2210 2211 for (i = 0; i < rx_ring->count; i++) { 2212 buffer_info = &rx_ring->buffer_info[i]; 2213 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, 2214 sizeof(struct e1000_ps_page), 2215 GFP_KERNEL); 2216 if (!buffer_info->ps_pages) 2217 goto err_pages; 2218 } 2219 2220 desc_len = sizeof(union e1000_rx_desc_packet_split); 2221 2222 /* Round up to nearest 4K */ 2223 rx_ring->size = rx_ring->count * desc_len; 2224 rx_ring->size = ALIGN(rx_ring->size, 4096); 2225 2226 err = e1000_alloc_ring_dma(adapter, rx_ring); 2227 if (err) 2228 goto err_pages; 2229 2230 rx_ring->next_to_clean = 0; 2231 rx_ring->next_to_use = 0; 2232 rx_ring->rx_skb_top = NULL; 2233 2234 return 0; 2235 2236 err_pages: 2237 for (i = 0; i < rx_ring->count; i++) { 2238 buffer_info = &rx_ring->buffer_info[i]; 2239 kfree(buffer_info->ps_pages); 2240 } 2241 err: 2242 vfree(rx_ring->buffer_info); 2243 e_err("Unable to allocate memory for the receive descriptor ring\n"); 2244 return err; 2245 } 2246 2247 /** 2248 * e1000_clean_tx_ring - Free Tx Buffers 2249 * @adapter: board private structure 2250 **/ 2251 static void e1000_clean_tx_ring(struct e1000_adapter *adapter) 2252 { 2253 struct e1000_ring *tx_ring = adapter->tx_ring; 2254 struct e1000_buffer *buffer_info; 2255 unsigned long size; 2256 unsigned int i; 2257 2258 for (i = 0; i < tx_ring->count; i++) { 2259 buffer_info = &tx_ring->buffer_info[i]; 2260 e1000_put_txbuf(adapter, buffer_info); 2261 } 2262 2263 size = sizeof(struct e1000_buffer) * tx_ring->count; 2264 memset(tx_ring->buffer_info, 0, size); 2265 2266 memset(tx_ring->desc, 0, tx_ring->size); 2267 2268 tx_ring->next_to_use = 0; 2269 tx_ring->next_to_clean = 0; 2270 2271 writel(0, adapter->hw.hw_addr + tx_ring->head); 2272 writel(0, adapter->hw.hw_addr + tx_ring->tail); 2273 } 2274 2275 /** 2276 * e1000e_free_tx_resources - Free Tx Resources per Queue 2277 * @adapter: board private structure 2278 * 2279 * Free all transmit software resources 2280 **/ 2281 void e1000e_free_tx_resources(struct e1000_adapter *adapter) 2282 { 2283 struct pci_dev *pdev = adapter->pdev; 2284 struct e1000_ring *tx_ring = adapter->tx_ring; 2285 2286 e1000_clean_tx_ring(adapter); 2287 2288 vfree(tx_ring->buffer_info); 2289 tx_ring->buffer_info = NULL; 2290 2291 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2292 tx_ring->dma); 2293 tx_ring->desc = NULL; 2294 } 2295 2296 /** 2297 * e1000e_free_rx_resources - Free Rx Resources 2298 * @adapter: board private structure 2299 * 2300 * Free all receive software resources 2301 **/ 2302 2303 void e1000e_free_rx_resources(struct e1000_adapter *adapter) 2304 { 2305 struct pci_dev *pdev = adapter->pdev; 2306 struct e1000_ring *rx_ring = adapter->rx_ring; 2307 int i; 2308 2309 e1000_clean_rx_ring(adapter); 2310 2311 for (i = 0; i < rx_ring->count; i++) 2312 kfree(rx_ring->buffer_info[i].ps_pages); 2313 2314 vfree(rx_ring->buffer_info); 2315 rx_ring->buffer_info = NULL; 2316 2317 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2318 rx_ring->dma); 2319 rx_ring->desc = NULL; 2320 } 2321 2322 /** 2323 * e1000_update_itr - update the dynamic ITR value based on statistics 2324 * @adapter: pointer to adapter 2325 * @itr_setting: current adapter->itr 2326 * @packets: the number of packets during this measurement interval 2327 * @bytes: the number of bytes during this measurement interval 2328 * 2329 * Stores a new ITR value based on packets and byte 2330 * counts during the last interrupt. The advantage of per interrupt 2331 * computation is faster updates and more accurate ITR for the current 2332 * traffic pattern. Constants in this function were computed 2333 * based on theoretical maximum wire speed and thresholds were set based 2334 * on testing data as well as attempting to minimize response time 2335 * while increasing bulk throughput. This functionality is controlled 2336 * by the InterruptThrottleRate module parameter. 2337 **/ 2338 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2339 u16 itr_setting, int packets, 2340 int bytes) 2341 { 2342 unsigned int retval = itr_setting; 2343 2344 if (packets == 0) 2345 goto update_itr_done; 2346 2347 switch (itr_setting) { 2348 case lowest_latency: 2349 /* handle TSO and jumbo frames */ 2350 if (bytes/packets > 8000) 2351 retval = bulk_latency; 2352 else if ((packets < 5) && (bytes > 512)) 2353 retval = low_latency; 2354 break; 2355 case low_latency: /* 50 usec aka 20000 ints/s */ 2356 if (bytes > 10000) { 2357 /* this if handles the TSO accounting */ 2358 if (bytes/packets > 8000) 2359 retval = bulk_latency; 2360 else if ((packets < 10) || ((bytes/packets) > 1200)) 2361 retval = bulk_latency; 2362 else if ((packets > 35)) 2363 retval = lowest_latency; 2364 } else if (bytes/packets > 2000) { 2365 retval = bulk_latency; 2366 } else if (packets <= 2 && bytes < 512) { 2367 retval = lowest_latency; 2368 } 2369 break; 2370 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2371 if (bytes > 25000) { 2372 if (packets > 35) 2373 retval = low_latency; 2374 } else if (bytes < 6000) { 2375 retval = low_latency; 2376 } 2377 break; 2378 } 2379 2380 update_itr_done: 2381 return retval; 2382 } 2383 2384 static void e1000_set_itr(struct e1000_adapter *adapter) 2385 { 2386 struct e1000_hw *hw = &adapter->hw; 2387 u16 current_itr; 2388 u32 new_itr = adapter->itr; 2389 2390 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2391 if (adapter->link_speed != SPEED_1000) { 2392 current_itr = 0; 2393 new_itr = 4000; 2394 goto set_itr_now; 2395 } 2396 2397 if (adapter->flags2 & FLAG2_DISABLE_AIM) { 2398 new_itr = 0; 2399 goto set_itr_now; 2400 } 2401 2402 adapter->tx_itr = e1000_update_itr(adapter, 2403 adapter->tx_itr, 2404 adapter->total_tx_packets, 2405 adapter->total_tx_bytes); 2406 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2407 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2408 adapter->tx_itr = low_latency; 2409 2410 adapter->rx_itr = e1000_update_itr(adapter, 2411 adapter->rx_itr, 2412 adapter->total_rx_packets, 2413 adapter->total_rx_bytes); 2414 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2415 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2416 adapter->rx_itr = low_latency; 2417 2418 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2419 2420 switch (current_itr) { 2421 /* counts and packets in update_itr are dependent on these numbers */ 2422 case lowest_latency: 2423 new_itr = 70000; 2424 break; 2425 case low_latency: 2426 new_itr = 20000; /* aka hwitr = ~200 */ 2427 break; 2428 case bulk_latency: 2429 new_itr = 4000; 2430 break; 2431 default: 2432 break; 2433 } 2434 2435 set_itr_now: 2436 if (new_itr != adapter->itr) { 2437 /* 2438 * this attempts to bias the interrupt rate towards Bulk 2439 * by adding intermediate steps when interrupt rate is 2440 * increasing 2441 */ 2442 new_itr = new_itr > adapter->itr ? 2443 min(adapter->itr + (new_itr >> 2), new_itr) : 2444 new_itr; 2445 adapter->itr = new_itr; 2446 adapter->rx_ring->itr_val = new_itr; 2447 if (adapter->msix_entries) 2448 adapter->rx_ring->set_itr = 1; 2449 else 2450 if (new_itr) 2451 ew32(ITR, 1000000000 / (new_itr * 256)); 2452 else 2453 ew32(ITR, 0); 2454 } 2455 } 2456 2457 /** 2458 * e1000_alloc_queues - Allocate memory for all rings 2459 * @adapter: board private structure to initialize 2460 **/ 2461 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 2462 { 2463 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 2464 if (!adapter->tx_ring) 2465 goto err; 2466 2467 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 2468 if (!adapter->rx_ring) 2469 goto err; 2470 2471 return 0; 2472 err: 2473 e_err("Unable to allocate memory for queues\n"); 2474 kfree(adapter->rx_ring); 2475 kfree(adapter->tx_ring); 2476 return -ENOMEM; 2477 } 2478 2479 /** 2480 * e1000_clean - NAPI Rx polling callback 2481 * @napi: struct associated with this polling callback 2482 * @budget: amount of packets driver is allowed to process this poll 2483 **/ 2484 static int e1000_clean(struct napi_struct *napi, int budget) 2485 { 2486 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 2487 struct e1000_hw *hw = &adapter->hw; 2488 struct net_device *poll_dev = adapter->netdev; 2489 int tx_cleaned = 1, work_done = 0; 2490 2491 adapter = netdev_priv(poll_dev); 2492 2493 if (adapter->msix_entries && 2494 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2495 goto clean_rx; 2496 2497 tx_cleaned = e1000_clean_tx_irq(adapter); 2498 2499 clean_rx: 2500 adapter->clean_rx(adapter, &work_done, budget); 2501 2502 if (!tx_cleaned) 2503 work_done = budget; 2504 2505 /* If budget not fully consumed, exit the polling mode */ 2506 if (work_done < budget) { 2507 if (adapter->itr_setting & 3) 2508 e1000_set_itr(adapter); 2509 napi_complete(napi); 2510 if (!test_bit(__E1000_DOWN, &adapter->state)) { 2511 if (adapter->msix_entries) 2512 ew32(IMS, adapter->rx_ring->ims_val); 2513 else 2514 e1000_irq_enable(adapter); 2515 } 2516 } 2517 2518 return work_done; 2519 } 2520 2521 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 2522 { 2523 struct e1000_adapter *adapter = netdev_priv(netdev); 2524 struct e1000_hw *hw = &adapter->hw; 2525 u32 vfta, index; 2526 2527 /* don't update vlan cookie if already programmed */ 2528 if ((adapter->hw.mng_cookie.status & 2529 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2530 (vid == adapter->mng_vlan_id)) 2531 return; 2532 2533 /* add VID to filter table */ 2534 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2535 index = (vid >> 5) & 0x7F; 2536 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2537 vfta |= (1 << (vid & 0x1F)); 2538 hw->mac.ops.write_vfta(hw, index, vfta); 2539 } 2540 2541 set_bit(vid, adapter->active_vlans); 2542 } 2543 2544 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2545 { 2546 struct e1000_adapter *adapter = netdev_priv(netdev); 2547 struct e1000_hw *hw = &adapter->hw; 2548 u32 vfta, index; 2549 2550 if ((adapter->hw.mng_cookie.status & 2551 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2552 (vid == adapter->mng_vlan_id)) { 2553 /* release control to f/w */ 2554 e1000e_release_hw_control(adapter); 2555 return; 2556 } 2557 2558 /* remove VID from filter table */ 2559 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2560 index = (vid >> 5) & 0x7F; 2561 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); 2562 vfta &= ~(1 << (vid & 0x1F)); 2563 hw->mac.ops.write_vfta(hw, index, vfta); 2564 } 2565 2566 clear_bit(vid, adapter->active_vlans); 2567 } 2568 2569 /** 2570 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering 2571 * @adapter: board private structure to initialize 2572 **/ 2573 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) 2574 { 2575 struct net_device *netdev = adapter->netdev; 2576 struct e1000_hw *hw = &adapter->hw; 2577 u32 rctl; 2578 2579 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2580 /* disable VLAN receive filtering */ 2581 rctl = er32(RCTL); 2582 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); 2583 ew32(RCTL, rctl); 2584 2585 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { 2586 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2587 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2588 } 2589 } 2590 } 2591 2592 /** 2593 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering 2594 * @adapter: board private structure to initialize 2595 **/ 2596 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) 2597 { 2598 struct e1000_hw *hw = &adapter->hw; 2599 u32 rctl; 2600 2601 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2602 /* enable VLAN receive filtering */ 2603 rctl = er32(RCTL); 2604 rctl |= E1000_RCTL_VFE; 2605 rctl &= ~E1000_RCTL_CFIEN; 2606 ew32(RCTL, rctl); 2607 } 2608 } 2609 2610 /** 2611 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping 2612 * @adapter: board private structure to initialize 2613 **/ 2614 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) 2615 { 2616 struct e1000_hw *hw = &adapter->hw; 2617 u32 ctrl; 2618 2619 /* disable VLAN tag insert/strip */ 2620 ctrl = er32(CTRL); 2621 ctrl &= ~E1000_CTRL_VME; 2622 ew32(CTRL, ctrl); 2623 } 2624 2625 /** 2626 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping 2627 * @adapter: board private structure to initialize 2628 **/ 2629 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) 2630 { 2631 struct e1000_hw *hw = &adapter->hw; 2632 u32 ctrl; 2633 2634 /* enable VLAN tag insert/strip */ 2635 ctrl = er32(CTRL); 2636 ctrl |= E1000_CTRL_VME; 2637 ew32(CTRL, ctrl); 2638 } 2639 2640 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 2641 { 2642 struct net_device *netdev = adapter->netdev; 2643 u16 vid = adapter->hw.mng_cookie.vlan_id; 2644 u16 old_vid = adapter->mng_vlan_id; 2645 2646 if (adapter->hw.mng_cookie.status & 2647 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { 2648 e1000_vlan_rx_add_vid(netdev, vid); 2649 adapter->mng_vlan_id = vid; 2650 } 2651 2652 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) 2653 e1000_vlan_rx_kill_vid(netdev, old_vid); 2654 } 2655 2656 static void e1000_restore_vlan(struct e1000_adapter *adapter) 2657 { 2658 u16 vid; 2659 2660 e1000_vlan_rx_add_vid(adapter->netdev, 0); 2661 2662 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2663 e1000_vlan_rx_add_vid(adapter->netdev, vid); 2664 } 2665 2666 static void e1000_init_manageability_pt(struct e1000_adapter *adapter) 2667 { 2668 struct e1000_hw *hw = &adapter->hw; 2669 u32 manc, manc2h, mdef, i, j; 2670 2671 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) 2672 return; 2673 2674 manc = er32(MANC); 2675 2676 /* 2677 * enable receiving management packets to the host. this will probably 2678 * generate destination unreachable messages from the host OS, but 2679 * the packets will be handled on SMBUS 2680 */ 2681 manc |= E1000_MANC_EN_MNG2HOST; 2682 manc2h = er32(MANC2H); 2683 2684 switch (hw->mac.type) { 2685 default: 2686 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); 2687 break; 2688 case e1000_82574: 2689 case e1000_82583: 2690 /* 2691 * Check if IPMI pass-through decision filter already exists; 2692 * if so, enable it. 2693 */ 2694 for (i = 0, j = 0; i < 8; i++) { 2695 mdef = er32(MDEF(i)); 2696 2697 /* Ignore filters with anything other than IPMI ports */ 2698 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2699 continue; 2700 2701 /* Enable this decision filter in MANC2H */ 2702 if (mdef) 2703 manc2h |= (1 << i); 2704 2705 j |= mdef; 2706 } 2707 2708 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2709 break; 2710 2711 /* Create new decision filter in an empty filter */ 2712 for (i = 0, j = 0; i < 8; i++) 2713 if (er32(MDEF(i)) == 0) { 2714 ew32(MDEF(i), (E1000_MDEF_PORT_623 | 2715 E1000_MDEF_PORT_664)); 2716 manc2h |= (1 << 1); 2717 j++; 2718 break; 2719 } 2720 2721 if (!j) 2722 e_warn("Unable to create IPMI pass-through filter\n"); 2723 break; 2724 } 2725 2726 ew32(MANC2H, manc2h); 2727 ew32(MANC, manc); 2728 } 2729 2730 /** 2731 * e1000_configure_tx - Configure Transmit Unit after Reset 2732 * @adapter: board private structure 2733 * 2734 * Configure the Tx unit of the MAC after a reset. 2735 **/ 2736 static void e1000_configure_tx(struct e1000_adapter *adapter) 2737 { 2738 struct e1000_hw *hw = &adapter->hw; 2739 struct e1000_ring *tx_ring = adapter->tx_ring; 2740 u64 tdba; 2741 u32 tdlen, tctl, tipg, tarc; 2742 u32 ipgr1, ipgr2; 2743 2744 /* Setup the HW Tx Head and Tail descriptor pointers */ 2745 tdba = tx_ring->dma; 2746 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); 2747 ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); 2748 ew32(TDBAH, (tdba >> 32)); 2749 ew32(TDLEN, tdlen); 2750 ew32(TDH, 0); 2751 ew32(TDT, 0); 2752 tx_ring->head = E1000_TDH; 2753 tx_ring->tail = E1000_TDT; 2754 2755 /* Set the default values for the Tx Inter Packet Gap timer */ 2756 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ 2757 ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ 2758 ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ 2759 2760 if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) 2761 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ 2762 2763 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 2764 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 2765 ew32(TIPG, tipg); 2766 2767 /* Set the Tx Interrupt Delay register */ 2768 ew32(TIDV, adapter->tx_int_delay); 2769 /* Tx irq moderation */ 2770 ew32(TADV, adapter->tx_abs_int_delay); 2771 2772 if (adapter->flags2 & FLAG2_DMA_BURST) { 2773 u32 txdctl = er32(TXDCTL(0)); 2774 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2775 E1000_TXDCTL_WTHRESH); 2776 /* 2777 * set up some performance related parameters to encourage the 2778 * hardware to use the bus more efficiently in bursts, depends 2779 * on the tx_int_delay to be enabled, 2780 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time 2781 * hthresh = 1 ==> prefetch when one or more available 2782 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2783 * BEWARE: this seems to work but should be considered first if 2784 * there are Tx hangs or other Tx related bugs 2785 */ 2786 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; 2787 ew32(TXDCTL(0), txdctl); 2788 /* erratum work around: set txdctl the same for both queues */ 2789 ew32(TXDCTL(1), txdctl); 2790 } 2791 2792 /* Program the Transmit Control Register */ 2793 tctl = er32(TCTL); 2794 tctl &= ~E1000_TCTL_CT; 2795 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 2796 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2797 2798 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2799 tarc = er32(TARC(0)); 2800 /* 2801 * set the speed mode bit, we'll clear it if we're not at 2802 * gigabit link later 2803 */ 2804 #define SPEED_MODE_BIT (1 << 21) 2805 tarc |= SPEED_MODE_BIT; 2806 ew32(TARC(0), tarc); 2807 } 2808 2809 /* errata: program both queues to unweighted RR */ 2810 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { 2811 tarc = er32(TARC(0)); 2812 tarc |= 1; 2813 ew32(TARC(0), tarc); 2814 tarc = er32(TARC(1)); 2815 tarc |= 1; 2816 ew32(TARC(1), tarc); 2817 } 2818 2819 /* Setup Transmit Descriptor Settings for eop descriptor */ 2820 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 2821 2822 /* only set IDE if we are delaying interrupts using the timers */ 2823 if (adapter->tx_int_delay) 2824 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2825 2826 /* enable Report Status bit */ 2827 adapter->txd_cmd |= E1000_TXD_CMD_RS; 2828 2829 ew32(TCTL, tctl); 2830 2831 e1000e_config_collision_dist(hw); 2832 } 2833 2834 /** 2835 * e1000_setup_rctl - configure the receive control registers 2836 * @adapter: Board private structure 2837 **/ 2838 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 2839 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 2840 static void e1000_setup_rctl(struct e1000_adapter *adapter) 2841 { 2842 struct e1000_hw *hw = &adapter->hw; 2843 u32 rctl, rfctl; 2844 u32 pages = 0; 2845 2846 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2847 if (hw->mac.type == e1000_pch2lan) { 2848 s32 ret_val; 2849 2850 if (adapter->netdev->mtu > ETH_DATA_LEN) 2851 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2852 else 2853 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2854 2855 if (ret_val) 2856 e_dbg("failed to enable jumbo frame workaround mode\n"); 2857 } 2858 2859 /* Program MC offset vector base */ 2860 rctl = er32(RCTL); 2861 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2862 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 2863 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 2864 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2865 2866 /* Do not Store bad packets */ 2867 rctl &= ~E1000_RCTL_SBP; 2868 2869 /* Enable Long Packet receive */ 2870 if (adapter->netdev->mtu <= ETH_DATA_LEN) 2871 rctl &= ~E1000_RCTL_LPE; 2872 else 2873 rctl |= E1000_RCTL_LPE; 2874 2875 /* Some systems expect that the CRC is included in SMBUS traffic. The 2876 * hardware strips the CRC before sending to both SMBUS (BMC) and to 2877 * host memory when this is enabled 2878 */ 2879 if (adapter->flags2 & FLAG2_CRC_STRIPPING) 2880 rctl |= E1000_RCTL_SECRC; 2881 2882 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ 2883 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { 2884 u16 phy_data; 2885 2886 e1e_rphy(hw, PHY_REG(770, 26), &phy_data); 2887 phy_data &= 0xfff8; 2888 phy_data |= (1 << 2); 2889 e1e_wphy(hw, PHY_REG(770, 26), phy_data); 2890 2891 e1e_rphy(hw, 22, &phy_data); 2892 phy_data &= 0x0fff; 2893 phy_data |= (1 << 14); 2894 e1e_wphy(hw, 0x10, 0x2823); 2895 e1e_wphy(hw, 0x11, 0x0003); 2896 e1e_wphy(hw, 22, phy_data); 2897 } 2898 2899 /* Setup buffer sizes */ 2900 rctl &= ~E1000_RCTL_SZ_4096; 2901 rctl |= E1000_RCTL_BSEX; 2902 switch (adapter->rx_buffer_len) { 2903 case 2048: 2904 default: 2905 rctl |= E1000_RCTL_SZ_2048; 2906 rctl &= ~E1000_RCTL_BSEX; 2907 break; 2908 case 4096: 2909 rctl |= E1000_RCTL_SZ_4096; 2910 break; 2911 case 8192: 2912 rctl |= E1000_RCTL_SZ_8192; 2913 break; 2914 case 16384: 2915 rctl |= E1000_RCTL_SZ_16384; 2916 break; 2917 } 2918 2919 /* Enable Extended Status in all Receive Descriptors */ 2920 rfctl = er32(RFCTL); 2921 rfctl |= E1000_RFCTL_EXTEN; 2922 2923 /* 2924 * 82571 and greater support packet-split where the protocol 2925 * header is placed in skb->data and the packet data is 2926 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 2927 * In the case of a non-split, skb->data is linearly filled, 2928 * followed by the page buffers. Therefore, skb->data is 2929 * sized to hold the largest protocol header. 2930 * 2931 * allocations using alloc_page take too long for regular MTU 2932 * so only enable packet split for jumbo frames 2933 * 2934 * Using pages when the page size is greater than 16k wastes 2935 * a lot of memory, since we allocate 3 pages at all times 2936 * per packet. 2937 */ 2938 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2939 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) && 2940 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2941 adapter->rx_ps_pages = pages; 2942 else 2943 adapter->rx_ps_pages = 0; 2944 2945 if (adapter->rx_ps_pages) { 2946 u32 psrctl = 0; 2947 2948 /* 2949 * disable packet split support for IPv6 extension headers, 2950 * because some malformed IPv6 headers can hang the Rx 2951 */ 2952 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 2953 E1000_RFCTL_NEW_IPV6_EXT_DIS); 2954 2955 /* Enable Packet split descriptors */ 2956 rctl |= E1000_RCTL_DTYP_PS; 2957 2958 psrctl |= adapter->rx_ps_bsize0 >> 2959 E1000_PSRCTL_BSIZE0_SHIFT; 2960 2961 switch (adapter->rx_ps_pages) { 2962 case 3: 2963 psrctl |= PAGE_SIZE << 2964 E1000_PSRCTL_BSIZE3_SHIFT; 2965 case 2: 2966 psrctl |= PAGE_SIZE << 2967 E1000_PSRCTL_BSIZE2_SHIFT; 2968 case 1: 2969 psrctl |= PAGE_SIZE >> 2970 E1000_PSRCTL_BSIZE1_SHIFT; 2971 break; 2972 } 2973 2974 ew32(PSRCTL, psrctl); 2975 } 2976 2977 ew32(RFCTL, rfctl); 2978 ew32(RCTL, rctl); 2979 /* just started the receive unit, no need to restart */ 2980 adapter->flags &= ~FLAG_RX_RESTART_NOW; 2981 } 2982 2983 /** 2984 * e1000_configure_rx - Configure Receive Unit after Reset 2985 * @adapter: board private structure 2986 * 2987 * Configure the Rx unit of the MAC after a reset. 2988 **/ 2989 static void e1000_configure_rx(struct e1000_adapter *adapter) 2990 { 2991 struct e1000_hw *hw = &adapter->hw; 2992 struct e1000_ring *rx_ring = adapter->rx_ring; 2993 u64 rdba; 2994 u32 rdlen, rctl, rxcsum, ctrl_ext; 2995 2996 if (adapter->rx_ps_pages) { 2997 /* this is a 32 byte descriptor */ 2998 rdlen = rx_ring->count * 2999 sizeof(union e1000_rx_desc_packet_split); 3000 adapter->clean_rx = e1000_clean_rx_irq_ps; 3001 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 3002 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 3003 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3004 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 3005 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 3006 } else { 3007 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); 3008 adapter->clean_rx = e1000_clean_rx_irq; 3009 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 3010 } 3011 3012 /* disable receives while setting up the descriptors */ 3013 rctl = er32(RCTL); 3014 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3015 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3016 e1e_flush(); 3017 usleep_range(10000, 20000); 3018 3019 if (adapter->flags2 & FLAG2_DMA_BURST) { 3020 /* 3021 * set the writeback threshold (only takes effect if the RDTR 3022 * is set). set GRAN=1 and write back up to 0x4 worth, and 3023 * enable prefetching of 0x20 Rx descriptors 3024 * granularity = 01 3025 * wthresh = 04, 3026 * hthresh = 04, 3027 * pthresh = 0x20 3028 */ 3029 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); 3030 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); 3031 3032 /* 3033 * override the delay timers for enabling bursting, only if 3034 * the value was not set by the user via module options 3035 */ 3036 if (adapter->rx_int_delay == DEFAULT_RDTR) 3037 adapter->rx_int_delay = BURST_RDTR; 3038 if (adapter->rx_abs_int_delay == DEFAULT_RADV) 3039 adapter->rx_abs_int_delay = BURST_RADV; 3040 } 3041 3042 /* set the Receive Delay Timer Register */ 3043 ew32(RDTR, adapter->rx_int_delay); 3044 3045 /* irq moderation */ 3046 ew32(RADV, adapter->rx_abs_int_delay); 3047 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) 3048 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3049 3050 ctrl_ext = er32(CTRL_EXT); 3051 /* Auto-Mask interrupts upon ICR access */ 3052 ctrl_ext |= E1000_CTRL_EXT_IAME; 3053 ew32(IAM, 0xffffffff); 3054 ew32(CTRL_EXT, ctrl_ext); 3055 e1e_flush(); 3056 3057 /* 3058 * Setup the HW Rx Head and Tail Descriptor Pointers and 3059 * the Base and Length of the Rx Descriptor Ring 3060 */ 3061 rdba = rx_ring->dma; 3062 ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); 3063 ew32(RDBAH, (rdba >> 32)); 3064 ew32(RDLEN, rdlen); 3065 ew32(RDH, 0); 3066 ew32(RDT, 0); 3067 rx_ring->head = E1000_RDH; 3068 rx_ring->tail = E1000_RDT; 3069 3070 /* Enable Receive Checksum Offload for TCP and UDP */ 3071 rxcsum = er32(RXCSUM); 3072 if (adapter->netdev->features & NETIF_F_RXCSUM) { 3073 rxcsum |= E1000_RXCSUM_TUOFL; 3074 3075 /* 3076 * IPv4 payload checksum for UDP fragments must be 3077 * used in conjunction with packet-split. 3078 */ 3079 if (adapter->rx_ps_pages) 3080 rxcsum |= E1000_RXCSUM_IPPCSE; 3081 } else { 3082 rxcsum &= ~E1000_RXCSUM_TUOFL; 3083 /* no need to clear IPPCSE as it defaults to 0 */ 3084 } 3085 ew32(RXCSUM, rxcsum); 3086 3087 /* 3088 * Enable early receives on supported devices, only takes effect when 3089 * packet size is equal or larger than the specified value (in 8 byte 3090 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 3091 */ 3092 if ((adapter->flags & FLAG_HAS_ERT) || 3093 (adapter->hw.mac.type == e1000_pch2lan)) { 3094 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3095 u32 rxdctl = er32(RXDCTL(0)); 3096 ew32(RXDCTL(0), rxdctl | 0x3); 3097 if (adapter->flags & FLAG_HAS_ERT) 3098 ew32(ERT, E1000_ERT_2048 | (1 << 13)); 3099 /* 3100 * With jumbo frames and early-receive enabled, 3101 * excessive C-state transition latencies result in 3102 * dropped transactions. 3103 */ 3104 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); 3105 } else { 3106 pm_qos_update_request(&adapter->netdev->pm_qos_req, 3107 PM_QOS_DEFAULT_VALUE); 3108 } 3109 } 3110 3111 /* Enable Receives */ 3112 ew32(RCTL, rctl); 3113 } 3114 3115 /** 3116 * e1000_update_mc_addr_list - Update Multicast addresses 3117 * @hw: pointer to the HW structure 3118 * @mc_addr_list: array of multicast addresses to program 3119 * @mc_addr_count: number of multicast addresses to program 3120 * 3121 * Updates the Multicast Table Array. 3122 * The caller must have a packed mc_addr_list of multicast addresses. 3123 **/ 3124 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, 3125 u32 mc_addr_count) 3126 { 3127 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); 3128 } 3129 3130 /** 3131 * e1000_set_multi - Multicast and Promiscuous mode set 3132 * @netdev: network interface device structure 3133 * 3134 * The set_multi entry point is called whenever the multicast address 3135 * list or the network interface flags are updated. This routine is 3136 * responsible for configuring the hardware for proper multicast, 3137 * promiscuous mode, and all-multi behavior. 3138 **/ 3139 static void e1000_set_multi(struct net_device *netdev) 3140 { 3141 struct e1000_adapter *adapter = netdev_priv(netdev); 3142 struct e1000_hw *hw = &adapter->hw; 3143 struct netdev_hw_addr *ha; 3144 u8 *mta_list; 3145 u32 rctl; 3146 3147 /* Check for Promiscuous and All Multicast modes */ 3148 3149 rctl = er32(RCTL); 3150 3151 if (netdev->flags & IFF_PROMISC) { 3152 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3153 rctl &= ~E1000_RCTL_VFE; 3154 /* Do not hardware filter VLANs in promisc mode */ 3155 e1000e_vlan_filter_disable(adapter); 3156 } else { 3157 if (netdev->flags & IFF_ALLMULTI) { 3158 rctl |= E1000_RCTL_MPE; 3159 rctl &= ~E1000_RCTL_UPE; 3160 } else { 3161 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); 3162 } 3163 e1000e_vlan_filter_enable(adapter); 3164 } 3165 3166 ew32(RCTL, rctl); 3167 3168 if (!netdev_mc_empty(netdev)) { 3169 int i = 0; 3170 3171 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); 3172 if (!mta_list) 3173 return; 3174 3175 /* prepare a packed array of only addresses. */ 3176 netdev_for_each_mc_addr(ha, netdev) 3177 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3178 3179 e1000_update_mc_addr_list(hw, mta_list, i); 3180 kfree(mta_list); 3181 } else { 3182 /* 3183 * if we're called from probe, we might not have 3184 * anything to do here, so clear out the list 3185 */ 3186 e1000_update_mc_addr_list(hw, NULL, 0); 3187 } 3188 3189 if (netdev->features & NETIF_F_HW_VLAN_RX) 3190 e1000e_vlan_strip_enable(adapter); 3191 else 3192 e1000e_vlan_strip_disable(adapter); 3193 } 3194 3195 /** 3196 * e1000_configure - configure the hardware for Rx and Tx 3197 * @adapter: private board structure 3198 **/ 3199 static void e1000_configure(struct e1000_adapter *adapter) 3200 { 3201 e1000_set_multi(adapter->netdev); 3202 3203 e1000_restore_vlan(adapter); 3204 e1000_init_manageability_pt(adapter); 3205 3206 e1000_configure_tx(adapter); 3207 e1000_setup_rctl(adapter); 3208 e1000_configure_rx(adapter); 3209 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), 3210 GFP_KERNEL); 3211 } 3212 3213 /** 3214 * e1000e_power_up_phy - restore link in case the phy was powered down 3215 * @adapter: address of board private structure 3216 * 3217 * The phy may be powered down to save power and turn off link when the 3218 * driver is unloaded and wake on lan is not enabled (among others) 3219 * *** this routine MUST be followed by a call to e1000e_reset *** 3220 **/ 3221 void e1000e_power_up_phy(struct e1000_adapter *adapter) 3222 { 3223 if (adapter->hw.phy.ops.power_up) 3224 adapter->hw.phy.ops.power_up(&adapter->hw); 3225 3226 adapter->hw.mac.ops.setup_link(&adapter->hw); 3227 } 3228 3229 /** 3230 * e1000_power_down_phy - Power down the PHY 3231 * 3232 * Power down the PHY so no link is implied when interface is down. 3233 * The PHY cannot be powered down if management or WoL is active. 3234 */ 3235 static void e1000_power_down_phy(struct e1000_adapter *adapter) 3236 { 3237 /* WoL is enabled */ 3238 if (adapter->wol) 3239 return; 3240 3241 if (adapter->hw.phy.ops.power_down) 3242 adapter->hw.phy.ops.power_down(&adapter->hw); 3243 } 3244 3245 /** 3246 * e1000e_reset - bring the hardware into a known good state 3247 * 3248 * This function boots the hardware and enables some settings that 3249 * require a configuration cycle of the hardware - those cannot be 3250 * set/changed during runtime. After reset the device needs to be 3251 * properly configured for Rx, Tx etc. 3252 */ 3253 void e1000e_reset(struct e1000_adapter *adapter) 3254 { 3255 struct e1000_mac_info *mac = &adapter->hw.mac; 3256 struct e1000_fc_info *fc = &adapter->hw.fc; 3257 struct e1000_hw *hw = &adapter->hw; 3258 u32 tx_space, min_tx_space, min_rx_space; 3259 u32 pba = adapter->pba; 3260 u16 hwm; 3261 3262 /* reset Packet Buffer Allocation to default */ 3263 ew32(PBA, pba); 3264 3265 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 3266 /* 3267 * To maintain wire speed transmits, the Tx FIFO should be 3268 * large enough to accommodate two full transmit packets, 3269 * rounded up to the next 1KB and expressed in KB. Likewise, 3270 * the Rx FIFO should be large enough to accommodate at least 3271 * one full receive packet and is similarly rounded up and 3272 * expressed in KB. 3273 */ 3274 pba = er32(PBA); 3275 /* upper 16 bits has Tx packet buffer allocation size in KB */ 3276 tx_space = pba >> 16; 3277 /* lower 16 bits has Rx packet buffer allocation size in KB */ 3278 pba &= 0xffff; 3279 /* 3280 * the Tx fifo also stores 16 bytes of information about the Tx 3281 * but don't include ethernet FCS because hardware appends it 3282 */ 3283 min_tx_space = (adapter->max_frame_size + 3284 sizeof(struct e1000_tx_desc) - 3285 ETH_FCS_LEN) * 2; 3286 min_tx_space = ALIGN(min_tx_space, 1024); 3287 min_tx_space >>= 10; 3288 /* software strips receive CRC, so leave room for it */ 3289 min_rx_space = adapter->max_frame_size; 3290 min_rx_space = ALIGN(min_rx_space, 1024); 3291 min_rx_space >>= 10; 3292 3293 /* 3294 * If current Tx allocation is less than the min Tx FIFO size, 3295 * and the min Tx FIFO size is less than the current Rx FIFO 3296 * allocation, take space away from current Rx allocation 3297 */ 3298 if ((tx_space < min_tx_space) && 3299 ((min_tx_space - tx_space) < pba)) { 3300 pba -= min_tx_space - tx_space; 3301 3302 /* 3303 * if short on Rx space, Rx wins and must trump Tx 3304 * adjustment or use Early Receive if available 3305 */ 3306 if ((pba < min_rx_space) && 3307 (!(adapter->flags & FLAG_HAS_ERT))) 3308 /* ERT enabled in e1000_configure_rx */ 3309 pba = min_rx_space; 3310 } 3311 3312 ew32(PBA, pba); 3313 } 3314 3315 /* 3316 * flow control settings 3317 * 3318 * The high water mark must be low enough to fit one full frame 3319 * (or the size used for early receive) above it in the Rx FIFO. 3320 * Set it to the lower of: 3321 * - 90% of the Rx FIFO size, and 3322 * - the full Rx FIFO size minus the early receive size (for parts 3323 * with ERT support assuming ERT set to E1000_ERT_2048), or 3324 * - the full Rx FIFO size minus one full frame 3325 */ 3326 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 3327 fc->pause_time = 0xFFFF; 3328 else 3329 fc->pause_time = E1000_FC_PAUSE_TIME; 3330 fc->send_xon = 1; 3331 fc->current_mode = fc->requested_mode; 3332 3333 switch (hw->mac.type) { 3334 default: 3335 if ((adapter->flags & FLAG_HAS_ERT) && 3336 (adapter->netdev->mtu > ETH_DATA_LEN)) 3337 hwm = min(((pba << 10) * 9 / 10), 3338 ((pba << 10) - (E1000_ERT_2048 << 3))); 3339 else 3340 hwm = min(((pba << 10) * 9 / 10), 3341 ((pba << 10) - adapter->max_frame_size)); 3342 3343 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 3344 fc->low_water = fc->high_water - 8; 3345 break; 3346 case e1000_pchlan: 3347 /* 3348 * Workaround PCH LOM adapter hangs with certain network 3349 * loads. If hangs persist, try disabling Tx flow control. 3350 */ 3351 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3352 fc->high_water = 0x3500; 3353 fc->low_water = 0x1500; 3354 } else { 3355 fc->high_water = 0x5000; 3356 fc->low_water = 0x3000; 3357 } 3358 fc->refresh_time = 0x1000; 3359 break; 3360 case e1000_pch2lan: 3361 fc->high_water = 0x05C20; 3362 fc->low_water = 0x05048; 3363 fc->pause_time = 0x0650; 3364 fc->refresh_time = 0x0400; 3365 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3366 pba = 14; 3367 ew32(PBA, pba); 3368 } 3369 break; 3370 } 3371 3372 /* 3373 * Disable Adaptive Interrupt Moderation if 2 full packets cannot 3374 * fit in receive buffer and early-receive not supported. 3375 */ 3376 if (adapter->itr_setting & 0x3) { 3377 if (((adapter->max_frame_size * 2) > (pba << 10)) && 3378 !(adapter->flags & FLAG_HAS_ERT)) { 3379 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { 3380 dev_info(&adapter->pdev->dev, 3381 "Interrupt Throttle Rate turned off\n"); 3382 adapter->flags2 |= FLAG2_DISABLE_AIM; 3383 ew32(ITR, 0); 3384 } 3385 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { 3386 dev_info(&adapter->pdev->dev, 3387 "Interrupt Throttle Rate turned on\n"); 3388 adapter->flags2 &= ~FLAG2_DISABLE_AIM; 3389 adapter->itr = 20000; 3390 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3391 } 3392 } 3393 3394 /* Allow time for pending master requests to run */ 3395 mac->ops.reset_hw(hw); 3396 3397 /* 3398 * For parts with AMT enabled, let the firmware know 3399 * that the network interface is in control 3400 */ 3401 if (adapter->flags & FLAG_HAS_AMT) 3402 e1000e_get_hw_control(adapter); 3403 3404 ew32(WUC, 0); 3405 3406 if (mac->ops.init_hw(hw)) 3407 e_err("Hardware Error\n"); 3408 3409 e1000_update_mng_vlan(adapter); 3410 3411 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 3412 ew32(VET, ETH_P_8021Q); 3413 3414 e1000e_reset_adaptive(hw); 3415 3416 if (!netif_running(adapter->netdev) && 3417 !test_bit(__E1000_TESTING, &adapter->state)) { 3418 e1000_power_down_phy(adapter); 3419 return; 3420 } 3421 3422 e1000_get_phy_info(hw); 3423 3424 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 3425 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { 3426 u16 phy_data = 0; 3427 /* 3428 * speed up time to link by disabling smart power down, ignore 3429 * the return value of this function because there is nothing 3430 * different we would do if it failed 3431 */ 3432 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 3433 phy_data &= ~IGP02E1000_PM_SPD; 3434 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 3435 } 3436 } 3437 3438 int e1000e_up(struct e1000_adapter *adapter) 3439 { 3440 struct e1000_hw *hw = &adapter->hw; 3441 3442 /* hardware has been reset, we need to reload some things */ 3443 e1000_configure(adapter); 3444 3445 clear_bit(__E1000_DOWN, &adapter->state); 3446 3447 napi_enable(&adapter->napi); 3448 if (adapter->msix_entries) 3449 e1000_configure_msix(adapter); 3450 e1000_irq_enable(adapter); 3451 3452 netif_start_queue(adapter->netdev); 3453 3454 /* fire a link change interrupt to start the watchdog */ 3455 if (adapter->msix_entries) 3456 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); 3457 else 3458 ew32(ICS, E1000_ICS_LSC); 3459 3460 return 0; 3461 } 3462 3463 static void e1000e_flush_descriptors(struct e1000_adapter *adapter) 3464 { 3465 struct e1000_hw *hw = &adapter->hw; 3466 3467 if (!(adapter->flags2 & FLAG2_DMA_BURST)) 3468 return; 3469 3470 /* flush pending descriptor writebacks to memory */ 3471 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 3472 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 3473 3474 /* execute the writes immediately */ 3475 e1e_flush(); 3476 } 3477 3478 static void e1000e_update_stats(struct e1000_adapter *adapter); 3479 3480 void e1000e_down(struct e1000_adapter *adapter) 3481 { 3482 struct net_device *netdev = adapter->netdev; 3483 struct e1000_hw *hw = &adapter->hw; 3484 u32 tctl, rctl; 3485 3486 /* 3487 * signal that we're down so the interrupt handler does not 3488 * reschedule our watchdog timer 3489 */ 3490 set_bit(__E1000_DOWN, &adapter->state); 3491 3492 /* disable receives in the hardware */ 3493 rctl = er32(RCTL); 3494 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 3495 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3496 /* flush and sleep below */ 3497 3498 netif_stop_queue(netdev); 3499 3500 /* disable transmits in the hardware */ 3501 tctl = er32(TCTL); 3502 tctl &= ~E1000_TCTL_EN; 3503 ew32(TCTL, tctl); 3504 3505 /* flush both disables and wait for them to finish */ 3506 e1e_flush(); 3507 usleep_range(10000, 20000); 3508 3509 napi_disable(&adapter->napi); 3510 e1000_irq_disable(adapter); 3511 3512 del_timer_sync(&adapter->watchdog_timer); 3513 del_timer_sync(&adapter->phy_info_timer); 3514 3515 netif_carrier_off(netdev); 3516 3517 spin_lock(&adapter->stats64_lock); 3518 e1000e_update_stats(adapter); 3519 spin_unlock(&adapter->stats64_lock); 3520 3521 e1000e_flush_descriptors(adapter); 3522 e1000_clean_tx_ring(adapter); 3523 e1000_clean_rx_ring(adapter); 3524 3525 adapter->link_speed = 0; 3526 adapter->link_duplex = 0; 3527 3528 if (!pci_channel_offline(adapter->pdev)) 3529 e1000e_reset(adapter); 3530 3531 /* 3532 * TODO: for power management, we could drop the link and 3533 * pci_disable_device here. 3534 */ 3535 } 3536 3537 void e1000e_reinit_locked(struct e1000_adapter *adapter) 3538 { 3539 might_sleep(); 3540 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 3541 usleep_range(1000, 2000); 3542 e1000e_down(adapter); 3543 e1000e_up(adapter); 3544 clear_bit(__E1000_RESETTING, &adapter->state); 3545 } 3546 3547 /** 3548 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 3549 * @adapter: board private structure to initialize 3550 * 3551 * e1000_sw_init initializes the Adapter private data structure. 3552 * Fields are initialized based on PCI device information and 3553 * OS network device settings (MTU size). 3554 **/ 3555 static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 3556 { 3557 struct net_device *netdev = adapter->netdev; 3558 3559 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 3560 adapter->rx_ps_bsize0 = 128; 3561 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3562 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3563 3564 spin_lock_init(&adapter->stats64_lock); 3565 3566 e1000e_set_interrupt_capability(adapter); 3567 3568 if (e1000_alloc_queues(adapter)) 3569 return -ENOMEM; 3570 3571 /* Explicitly disable IRQ since the NIC can be in any state. */ 3572 e1000_irq_disable(adapter); 3573 3574 set_bit(__E1000_DOWN, &adapter->state); 3575 return 0; 3576 } 3577 3578 /** 3579 * e1000_intr_msi_test - Interrupt Handler 3580 * @irq: interrupt number 3581 * @data: pointer to a network interface device structure 3582 **/ 3583 static irqreturn_t e1000_intr_msi_test(int irq, void *data) 3584 { 3585 struct net_device *netdev = data; 3586 struct e1000_adapter *adapter = netdev_priv(netdev); 3587 struct e1000_hw *hw = &adapter->hw; 3588 u32 icr = er32(ICR); 3589 3590 e_dbg("icr is %08X\n", icr); 3591 if (icr & E1000_ICR_RXSEQ) { 3592 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 3593 wmb(); 3594 } 3595 3596 return IRQ_HANDLED; 3597 } 3598 3599 /** 3600 * e1000_test_msi_interrupt - Returns 0 for successful test 3601 * @adapter: board private struct 3602 * 3603 * code flow taken from tg3.c 3604 **/ 3605 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) 3606 { 3607 struct net_device *netdev = adapter->netdev; 3608 struct e1000_hw *hw = &adapter->hw; 3609 int err; 3610 3611 /* poll_enable hasn't been called yet, so don't need disable */ 3612 /* clear any pending events */ 3613 er32(ICR); 3614 3615 /* free the real vector and request a test handler */ 3616 e1000_free_irq(adapter); 3617 e1000e_reset_interrupt_capability(adapter); 3618 3619 /* Assume that the test fails, if it succeeds then the test 3620 * MSI irq handler will unset this flag */ 3621 adapter->flags |= FLAG_MSI_TEST_FAILED; 3622 3623 err = pci_enable_msi(adapter->pdev); 3624 if (err) 3625 goto msi_test_failed; 3626 3627 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, 3628 netdev->name, netdev); 3629 if (err) { 3630 pci_disable_msi(adapter->pdev); 3631 goto msi_test_failed; 3632 } 3633 3634 wmb(); 3635 3636 e1000_irq_enable(adapter); 3637 3638 /* fire an unusual interrupt on the test handler */ 3639 ew32(ICS, E1000_ICS_RXSEQ); 3640 e1e_flush(); 3641 msleep(50); 3642 3643 e1000_irq_disable(adapter); 3644 3645 rmb(); 3646 3647 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 3648 adapter->int_mode = E1000E_INT_MODE_LEGACY; 3649 e_info("MSI interrupt test failed, using legacy interrupt.\n"); 3650 } else 3651 e_dbg("MSI interrupt test succeeded!\n"); 3652 3653 free_irq(adapter->pdev->irq, netdev); 3654 pci_disable_msi(adapter->pdev); 3655 3656 msi_test_failed: 3657 e1000e_set_interrupt_capability(adapter); 3658 return e1000_request_irq(adapter); 3659 } 3660 3661 /** 3662 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored 3663 * @adapter: board private struct 3664 * 3665 * code flow taken from tg3.c, called with e1000 interrupts disabled. 3666 **/ 3667 static int e1000_test_msi(struct e1000_adapter *adapter) 3668 { 3669 int err; 3670 u16 pci_cmd; 3671 3672 if (!(adapter->flags & FLAG_MSI_ENABLED)) 3673 return 0; 3674 3675 /* disable SERR in case the MSI write causes a master abort */ 3676 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3677 if (pci_cmd & PCI_COMMAND_SERR) 3678 pci_write_config_word(adapter->pdev, PCI_COMMAND, 3679 pci_cmd & ~PCI_COMMAND_SERR); 3680 3681 err = e1000_test_msi_interrupt(adapter); 3682 3683 /* re-enable SERR */ 3684 if (pci_cmd & PCI_COMMAND_SERR) { 3685 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3686 pci_cmd |= PCI_COMMAND_SERR; 3687 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 3688 } 3689 3690 return err; 3691 } 3692 3693 /** 3694 * e1000_open - Called when a network interface is made active 3695 * @netdev: network interface device structure 3696 * 3697 * Returns 0 on success, negative value on failure 3698 * 3699 * The open entry point is called when a network interface is made 3700 * active by the system (IFF_UP). At this point all resources needed 3701 * for transmit and receive operations are allocated, the interrupt 3702 * handler is registered with the OS, the watchdog timer is started, 3703 * and the stack is notified that the interface is ready. 3704 **/ 3705 static int e1000_open(struct net_device *netdev) 3706 { 3707 struct e1000_adapter *adapter = netdev_priv(netdev); 3708 struct e1000_hw *hw = &adapter->hw; 3709 struct pci_dev *pdev = adapter->pdev; 3710 int err; 3711 3712 /* disallow open during test */ 3713 if (test_bit(__E1000_TESTING, &adapter->state)) 3714 return -EBUSY; 3715 3716 pm_runtime_get_sync(&pdev->dev); 3717 3718 netif_carrier_off(netdev); 3719 3720 /* allocate transmit descriptors */ 3721 err = e1000e_setup_tx_resources(adapter); 3722 if (err) 3723 goto err_setup_tx; 3724 3725 /* allocate receive descriptors */ 3726 err = e1000e_setup_rx_resources(adapter); 3727 if (err) 3728 goto err_setup_rx; 3729 3730 /* 3731 * If AMT is enabled, let the firmware know that the network 3732 * interface is now open and reset the part to a known state. 3733 */ 3734 if (adapter->flags & FLAG_HAS_AMT) { 3735 e1000e_get_hw_control(adapter); 3736 e1000e_reset(adapter); 3737 } 3738 3739 e1000e_power_up_phy(adapter); 3740 3741 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 3742 if ((adapter->hw.mng_cookie.status & 3743 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 3744 e1000_update_mng_vlan(adapter); 3745 3746 /* DMA latency requirement to workaround early-receive/jumbo issue */ 3747 if ((adapter->flags & FLAG_HAS_ERT) || 3748 (adapter->hw.mac.type == e1000_pch2lan)) 3749 pm_qos_add_request(&adapter->netdev->pm_qos_req, 3750 PM_QOS_CPU_DMA_LATENCY, 3751 PM_QOS_DEFAULT_VALUE); 3752 3753 /* 3754 * before we allocate an interrupt, we must be ready to handle it. 3755 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3756 * as soon as we call pci_request_irq, so we have to setup our 3757 * clean_rx handler before we do so. 3758 */ 3759 e1000_configure(adapter); 3760 3761 err = e1000_request_irq(adapter); 3762 if (err) 3763 goto err_req_irq; 3764 3765 /* 3766 * Work around PCIe errata with MSI interrupts causing some chipsets to 3767 * ignore e1000e MSI messages, which means we need to test our MSI 3768 * interrupt now 3769 */ 3770 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { 3771 err = e1000_test_msi(adapter); 3772 if (err) { 3773 e_err("Interrupt allocation failed\n"); 3774 goto err_req_irq; 3775 } 3776 } 3777 3778 /* From here on the code is the same as e1000e_up() */ 3779 clear_bit(__E1000_DOWN, &adapter->state); 3780 3781 napi_enable(&adapter->napi); 3782 3783 e1000_irq_enable(adapter); 3784 3785 netif_start_queue(netdev); 3786 3787 adapter->idle_check = true; 3788 pm_runtime_put(&pdev->dev); 3789 3790 /* fire a link status change interrupt to start the watchdog */ 3791 if (adapter->msix_entries) 3792 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); 3793 else 3794 ew32(ICS, E1000_ICS_LSC); 3795 3796 return 0; 3797 3798 err_req_irq: 3799 e1000e_release_hw_control(adapter); 3800 e1000_power_down_phy(adapter); 3801 e1000e_free_rx_resources(adapter); 3802 err_setup_rx: 3803 e1000e_free_tx_resources(adapter); 3804 err_setup_tx: 3805 e1000e_reset(adapter); 3806 pm_runtime_put_sync(&pdev->dev); 3807 3808 return err; 3809 } 3810 3811 /** 3812 * e1000_close - Disables a network interface 3813 * @netdev: network interface device structure 3814 * 3815 * Returns 0, this is not allowed to fail 3816 * 3817 * The close entry point is called when an interface is de-activated 3818 * by the OS. The hardware is still under the drivers control, but 3819 * needs to be disabled. A global MAC reset is issued to stop the 3820 * hardware, and all transmit and receive resources are freed. 3821 **/ 3822 static int e1000_close(struct net_device *netdev) 3823 { 3824 struct e1000_adapter *adapter = netdev_priv(netdev); 3825 struct pci_dev *pdev = adapter->pdev; 3826 3827 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3828 3829 pm_runtime_get_sync(&pdev->dev); 3830 3831 if (!test_bit(__E1000_DOWN, &adapter->state)) { 3832 e1000e_down(adapter); 3833 e1000_free_irq(adapter); 3834 } 3835 e1000_power_down_phy(adapter); 3836 3837 e1000e_free_tx_resources(adapter); 3838 e1000e_free_rx_resources(adapter); 3839 3840 /* 3841 * kill manageability vlan ID if supported, but not if a vlan with 3842 * the same ID is registered on the host OS (let 8021q kill it) 3843 */ 3844 if (adapter->hw.mng_cookie.status & 3845 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) 3846 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 3847 3848 /* 3849 * If AMT is enabled, let the firmware know that the network 3850 * interface is now closed 3851 */ 3852 if ((adapter->flags & FLAG_HAS_AMT) && 3853 !test_bit(__E1000_TESTING, &adapter->state)) 3854 e1000e_release_hw_control(adapter); 3855 3856 if ((adapter->flags & FLAG_HAS_ERT) || 3857 (adapter->hw.mac.type == e1000_pch2lan)) 3858 pm_qos_remove_request(&adapter->netdev->pm_qos_req); 3859 3860 pm_runtime_put_sync(&pdev->dev); 3861 3862 return 0; 3863 } 3864 /** 3865 * e1000_set_mac - Change the Ethernet Address of the NIC 3866 * @netdev: network interface device structure 3867 * @p: pointer to an address structure 3868 * 3869 * Returns 0 on success, negative on failure 3870 **/ 3871 static int e1000_set_mac(struct net_device *netdev, void *p) 3872 { 3873 struct e1000_adapter *adapter = netdev_priv(netdev); 3874 struct sockaddr *addr = p; 3875 3876 if (!is_valid_ether_addr(addr->sa_data)) 3877 return -EADDRNOTAVAIL; 3878 3879 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3880 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 3881 3882 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 3883 3884 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { 3885 /* activate the work around */ 3886 e1000e_set_laa_state_82571(&adapter->hw, 1); 3887 3888 /* 3889 * Hold a copy of the LAA in RAR[14] This is done so that 3890 * between the time RAR[0] gets clobbered and the time it 3891 * gets fixed (in e1000_watchdog), the actual LAA is in one 3892 * of the RARs and no incoming packets directed to this port 3893 * are dropped. Eventually the LAA will be in RAR[0] and 3894 * RAR[14] 3895 */ 3896 e1000e_rar_set(&adapter->hw, 3897 adapter->hw.mac.addr, 3898 adapter->hw.mac.rar_entry_count - 1); 3899 } 3900 3901 return 0; 3902 } 3903 3904 /** 3905 * e1000e_update_phy_task - work thread to update phy 3906 * @work: pointer to our work struct 3907 * 3908 * this worker thread exists because we must acquire a 3909 * semaphore to read the phy, which we could msleep while 3910 * waiting for it, and we can't msleep in a timer. 3911 **/ 3912 static void e1000e_update_phy_task(struct work_struct *work) 3913 { 3914 struct e1000_adapter *adapter = container_of(work, 3915 struct e1000_adapter, update_phy_task); 3916 3917 if (test_bit(__E1000_DOWN, &adapter->state)) 3918 return; 3919 3920 e1000_get_phy_info(&adapter->hw); 3921 } 3922 3923 /* 3924 * Need to wait a few seconds after link up to get diagnostic information from 3925 * the phy 3926 */ 3927 static void e1000_update_phy_info(unsigned long data) 3928 { 3929 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 3930 3931 if (test_bit(__E1000_DOWN, &adapter->state)) 3932 return; 3933 3934 schedule_work(&adapter->update_phy_task); 3935 } 3936 3937 /** 3938 * e1000e_update_phy_stats - Update the PHY statistics counters 3939 * @adapter: board private structure 3940 * 3941 * Read/clear the upper 16-bit PHY registers and read/accumulate lower 3942 **/ 3943 static void e1000e_update_phy_stats(struct e1000_adapter *adapter) 3944 { 3945 struct e1000_hw *hw = &adapter->hw; 3946 s32 ret_val; 3947 u16 phy_data; 3948 3949 ret_val = hw->phy.ops.acquire(hw); 3950 if (ret_val) 3951 return; 3952 3953 /* 3954 * A page set is expensive so check if already on desired page. 3955 * If not, set to the page with the PHY status registers. 3956 */ 3957 hw->phy.addr = 1; 3958 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 3959 &phy_data); 3960 if (ret_val) 3961 goto release; 3962 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { 3963 ret_val = hw->phy.ops.set_page(hw, 3964 HV_STATS_PAGE << IGP_PAGE_SHIFT); 3965 if (ret_val) 3966 goto release; 3967 } 3968 3969 /* Single Collision Count */ 3970 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); 3971 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); 3972 if (!ret_val) 3973 adapter->stats.scc += phy_data; 3974 3975 /* Excessive Collision Count */ 3976 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); 3977 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); 3978 if (!ret_val) 3979 adapter->stats.ecol += phy_data; 3980 3981 /* Multiple Collision Count */ 3982 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); 3983 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); 3984 if (!ret_val) 3985 adapter->stats.mcc += phy_data; 3986 3987 /* Late Collision Count */ 3988 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); 3989 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); 3990 if (!ret_val) 3991 adapter->stats.latecol += phy_data; 3992 3993 /* Collision Count - also used for adaptive IFS */ 3994 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); 3995 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); 3996 if (!ret_val) 3997 hw->mac.collision_delta = phy_data; 3998 3999 /* Defer Count */ 4000 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); 4001 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); 4002 if (!ret_val) 4003 adapter->stats.dc += phy_data; 4004 4005 /* Transmit with no CRS */ 4006 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); 4007 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); 4008 if (!ret_val) 4009 adapter->stats.tncrs += phy_data; 4010 4011 release: 4012 hw->phy.ops.release(hw); 4013 } 4014 4015 /** 4016 * e1000e_update_stats - Update the board statistics counters 4017 * @adapter: board private structure 4018 **/ 4019 static void e1000e_update_stats(struct e1000_adapter *adapter) 4020 { 4021 struct net_device *netdev = adapter->netdev; 4022 struct e1000_hw *hw = &adapter->hw; 4023 struct pci_dev *pdev = adapter->pdev; 4024 4025 /* 4026 * Prevent stats update while adapter is being reset, or if the pci 4027 * connection is down. 4028 */ 4029 if (adapter->link_speed == 0) 4030 return; 4031 if (pci_channel_offline(pdev)) 4032 return; 4033 4034 adapter->stats.crcerrs += er32(CRCERRS); 4035 adapter->stats.gprc += er32(GPRC); 4036 adapter->stats.gorc += er32(GORCL); 4037 er32(GORCH); /* Clear gorc */ 4038 adapter->stats.bprc += er32(BPRC); 4039 adapter->stats.mprc += er32(MPRC); 4040 adapter->stats.roc += er32(ROC); 4041 4042 adapter->stats.mpc += er32(MPC); 4043 4044 /* Half-duplex statistics */ 4045 if (adapter->link_duplex == HALF_DUPLEX) { 4046 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { 4047 e1000e_update_phy_stats(adapter); 4048 } else { 4049 adapter->stats.scc += er32(SCC); 4050 adapter->stats.ecol += er32(ECOL); 4051 adapter->stats.mcc += er32(MCC); 4052 adapter->stats.latecol += er32(LATECOL); 4053 adapter->stats.dc += er32(DC); 4054 4055 hw->mac.collision_delta = er32(COLC); 4056 4057 if ((hw->mac.type != e1000_82574) && 4058 (hw->mac.type != e1000_82583)) 4059 adapter->stats.tncrs += er32(TNCRS); 4060 } 4061 adapter->stats.colc += hw->mac.collision_delta; 4062 } 4063 4064 adapter->stats.xonrxc += er32(XONRXC); 4065 adapter->stats.xontxc += er32(XONTXC); 4066 adapter->stats.xoffrxc += er32(XOFFRXC); 4067 adapter->stats.xofftxc += er32(XOFFTXC); 4068 adapter->stats.gptc += er32(GPTC); 4069 adapter->stats.gotc += er32(GOTCL); 4070 er32(GOTCH); /* Clear gotc */ 4071 adapter->stats.rnbc += er32(RNBC); 4072 adapter->stats.ruc += er32(RUC); 4073 4074 adapter->stats.mptc += er32(MPTC); 4075 adapter->stats.bptc += er32(BPTC); 4076 4077 /* used for adaptive IFS */ 4078 4079 hw->mac.tx_packet_delta = er32(TPT); 4080 adapter->stats.tpt += hw->mac.tx_packet_delta; 4081 4082 adapter->stats.algnerrc += er32(ALGNERRC); 4083 adapter->stats.rxerrc += er32(RXERRC); 4084 adapter->stats.cexterr += er32(CEXTERR); 4085 adapter->stats.tsctc += er32(TSCTC); 4086 adapter->stats.tsctfc += er32(TSCTFC); 4087 4088 /* Fill out the OS statistics structure */ 4089 netdev->stats.multicast = adapter->stats.mprc; 4090 netdev->stats.collisions = adapter->stats.colc; 4091 4092 /* Rx Errors */ 4093 4094 /* 4095 * RLEC on some newer hardware can be incorrect so build 4096 * our own version based on RUC and ROC 4097 */ 4098 netdev->stats.rx_errors = adapter->stats.rxerrc + 4099 adapter->stats.crcerrs + adapter->stats.algnerrc + 4100 adapter->stats.ruc + adapter->stats.roc + 4101 adapter->stats.cexterr; 4102 netdev->stats.rx_length_errors = adapter->stats.ruc + 4103 adapter->stats.roc; 4104 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 4105 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 4106 netdev->stats.rx_missed_errors = adapter->stats.mpc; 4107 4108 /* Tx Errors */ 4109 netdev->stats.tx_errors = adapter->stats.ecol + 4110 adapter->stats.latecol; 4111 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 4112 netdev->stats.tx_window_errors = adapter->stats.latecol; 4113 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 4114 4115 /* Tx Dropped needs to be maintained elsewhere */ 4116 4117 /* Management Stats */ 4118 adapter->stats.mgptc += er32(MGTPTC); 4119 adapter->stats.mgprc += er32(MGTPRC); 4120 adapter->stats.mgpdc += er32(MGTPDC); 4121 } 4122 4123 /** 4124 * e1000_phy_read_status - Update the PHY register status snapshot 4125 * @adapter: board private structure 4126 **/ 4127 static void e1000_phy_read_status(struct e1000_adapter *adapter) 4128 { 4129 struct e1000_hw *hw = &adapter->hw; 4130 struct e1000_phy_regs *phy = &adapter->phy_regs; 4131 4132 if ((er32(STATUS) & E1000_STATUS_LU) && 4133 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4134 int ret_val; 4135 4136 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); 4137 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); 4138 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); 4139 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); 4140 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); 4141 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); 4142 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); 4143 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); 4144 if (ret_val) 4145 e_warn("Error reading PHY register\n"); 4146 } else { 4147 /* 4148 * Do not read PHY registers if link is not up 4149 * Set values to typical power-on defaults 4150 */ 4151 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); 4152 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | 4153 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | 4154 BMSR_ERCAP); 4155 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | 4156 ADVERTISE_ALL | ADVERTISE_CSMA); 4157 phy->lpa = 0; 4158 phy->expansion = EXPANSION_ENABLENPAGE; 4159 phy->ctrl1000 = ADVERTISE_1000FULL; 4160 phy->stat1000 = 0; 4161 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); 4162 } 4163 } 4164 4165 static void e1000_print_link_info(struct e1000_adapter *adapter) 4166 { 4167 struct e1000_hw *hw = &adapter->hw; 4168 u32 ctrl = er32(CTRL); 4169 4170 /* Link status message must follow this format for user tools */ 4171 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, " 4172 "Flow Control: %s\n", 4173 adapter->netdev->name, 4174 adapter->link_speed, 4175 (adapter->link_duplex == FULL_DUPLEX) ? 4176 "Full Duplex" : "Half Duplex", 4177 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? 4178 "Rx/Tx" : 4179 ((ctrl & E1000_CTRL_RFCE) ? "Rx" : 4180 ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); 4181 } 4182 4183 static bool e1000e_has_link(struct e1000_adapter *adapter) 4184 { 4185 struct e1000_hw *hw = &adapter->hw; 4186 bool link_active = 0; 4187 s32 ret_val = 0; 4188 4189 /* 4190 * get_link_status is set on LSC (link status) interrupt or 4191 * Rx sequence error interrupt. get_link_status will stay 4192 * false until the check_for_link establishes link 4193 * for copper adapters ONLY 4194 */ 4195 switch (hw->phy.media_type) { 4196 case e1000_media_type_copper: 4197 if (hw->mac.get_link_status) { 4198 ret_val = hw->mac.ops.check_for_link(hw); 4199 link_active = !hw->mac.get_link_status; 4200 } else { 4201 link_active = 1; 4202 } 4203 break; 4204 case e1000_media_type_fiber: 4205 ret_val = hw->mac.ops.check_for_link(hw); 4206 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 4207 break; 4208 case e1000_media_type_internal_serdes: 4209 ret_val = hw->mac.ops.check_for_link(hw); 4210 link_active = adapter->hw.mac.serdes_has_link; 4211 break; 4212 default: 4213 case e1000_media_type_unknown: 4214 break; 4215 } 4216 4217 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 4218 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 4219 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 4220 e_info("Gigabit has been disabled, downgrading speed\n"); 4221 } 4222 4223 return link_active; 4224 } 4225 4226 static void e1000e_enable_receives(struct e1000_adapter *adapter) 4227 { 4228 /* make sure the receive unit is started */ 4229 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4230 (adapter->flags & FLAG_RX_RESTART_NOW)) { 4231 struct e1000_hw *hw = &adapter->hw; 4232 u32 rctl = er32(RCTL); 4233 ew32(RCTL, rctl | E1000_RCTL_EN); 4234 adapter->flags &= ~FLAG_RX_RESTART_NOW; 4235 } 4236 } 4237 4238 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) 4239 { 4240 struct e1000_hw *hw = &adapter->hw; 4241 4242 /* 4243 * With 82574 controllers, PHY needs to be checked periodically 4244 * for hung state and reset, if two calls return true 4245 */ 4246 if (e1000_check_phy_82574(hw)) 4247 adapter->phy_hang_count++; 4248 else 4249 adapter->phy_hang_count = 0; 4250 4251 if (adapter->phy_hang_count > 1) { 4252 adapter->phy_hang_count = 0; 4253 schedule_work(&adapter->reset_task); 4254 } 4255 } 4256 4257 /** 4258 * e1000_watchdog - Timer Call-back 4259 * @data: pointer to adapter cast into an unsigned long 4260 **/ 4261 static void e1000_watchdog(unsigned long data) 4262 { 4263 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4264 4265 /* Do the rest outside of interrupt context */ 4266 schedule_work(&adapter->watchdog_task); 4267 4268 /* TODO: make this use queue_delayed_work() */ 4269 } 4270 4271 static void e1000_watchdog_task(struct work_struct *work) 4272 { 4273 struct e1000_adapter *adapter = container_of(work, 4274 struct e1000_adapter, watchdog_task); 4275 struct net_device *netdev = adapter->netdev; 4276 struct e1000_mac_info *mac = &adapter->hw.mac; 4277 struct e1000_phy_info *phy = &adapter->hw.phy; 4278 struct e1000_ring *tx_ring = adapter->tx_ring; 4279 struct e1000_hw *hw = &adapter->hw; 4280 u32 link, tctl; 4281 4282 if (test_bit(__E1000_DOWN, &adapter->state)) 4283 return; 4284 4285 link = e1000e_has_link(adapter); 4286 if ((netif_carrier_ok(netdev)) && link) { 4287 /* Cancel scheduled suspend requests. */ 4288 pm_runtime_resume(netdev->dev.parent); 4289 4290 e1000e_enable_receives(adapter); 4291 goto link_up; 4292 } 4293 4294 if ((e1000e_enable_tx_pkt_filtering(hw)) && 4295 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) 4296 e1000_update_mng_vlan(adapter); 4297 4298 if (link) { 4299 if (!netif_carrier_ok(netdev)) { 4300 bool txb2b = 1; 4301 4302 /* Cancel scheduled suspend requests. */ 4303 pm_runtime_resume(netdev->dev.parent); 4304 4305 /* update snapshot of PHY registers on LSC */ 4306 e1000_phy_read_status(adapter); 4307 mac->ops.get_link_up_info(&adapter->hw, 4308 &adapter->link_speed, 4309 &adapter->link_duplex); 4310 e1000_print_link_info(adapter); 4311 /* 4312 * On supported PHYs, check for duplex mismatch only 4313 * if link has autonegotiated at 10/100 half 4314 */ 4315 if ((hw->phy.type == e1000_phy_igp_3 || 4316 hw->phy.type == e1000_phy_bm) && 4317 (hw->mac.autoneg == true) && 4318 (adapter->link_speed == SPEED_10 || 4319 adapter->link_speed == SPEED_100) && 4320 (adapter->link_duplex == HALF_DUPLEX)) { 4321 u16 autoneg_exp; 4322 4323 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); 4324 4325 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) 4326 e_info("Autonegotiated half duplex but" 4327 " link partner cannot autoneg. " 4328 " Try forcing full duplex if " 4329 "link gets many collisions.\n"); 4330 } 4331 4332 /* adjust timeout factor according to speed/duplex */ 4333 adapter->tx_timeout_factor = 1; 4334 switch (adapter->link_speed) { 4335 case SPEED_10: 4336 txb2b = 0; 4337 adapter->tx_timeout_factor = 16; 4338 break; 4339 case SPEED_100: 4340 txb2b = 0; 4341 adapter->tx_timeout_factor = 10; 4342 break; 4343 } 4344 4345 /* 4346 * workaround: re-program speed mode bit after 4347 * link-up event 4348 */ 4349 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 4350 !txb2b) { 4351 u32 tarc0; 4352 tarc0 = er32(TARC(0)); 4353 tarc0 &= ~SPEED_MODE_BIT; 4354 ew32(TARC(0), tarc0); 4355 } 4356 4357 /* 4358 * disable TSO for pcie and 10/100 speeds, to avoid 4359 * some hardware issues 4360 */ 4361 if (!(adapter->flags & FLAG_TSO_FORCE)) { 4362 switch (adapter->link_speed) { 4363 case SPEED_10: 4364 case SPEED_100: 4365 e_info("10/100 speed: disabling TSO\n"); 4366 netdev->features &= ~NETIF_F_TSO; 4367 netdev->features &= ~NETIF_F_TSO6; 4368 break; 4369 case SPEED_1000: 4370 netdev->features |= NETIF_F_TSO; 4371 netdev->features |= NETIF_F_TSO6; 4372 break; 4373 default: 4374 /* oops */ 4375 break; 4376 } 4377 } 4378 4379 /* 4380 * enable transmits in the hardware, need to do this 4381 * after setting TARC(0) 4382 */ 4383 tctl = er32(TCTL); 4384 tctl |= E1000_TCTL_EN; 4385 ew32(TCTL, tctl); 4386 4387 /* 4388 * Perform any post-link-up configuration before 4389 * reporting link up. 4390 */ 4391 if (phy->ops.cfg_on_link_up) 4392 phy->ops.cfg_on_link_up(hw); 4393 4394 netif_carrier_on(netdev); 4395 4396 if (!test_bit(__E1000_DOWN, &adapter->state)) 4397 mod_timer(&adapter->phy_info_timer, 4398 round_jiffies(jiffies + 2 * HZ)); 4399 } 4400 } else { 4401 if (netif_carrier_ok(netdev)) { 4402 adapter->link_speed = 0; 4403 adapter->link_duplex = 0; 4404 /* Link status message must follow this format */ 4405 printk(KERN_INFO "e1000e: %s NIC Link is Down\n", 4406 adapter->netdev->name); 4407 netif_carrier_off(netdev); 4408 if (!test_bit(__E1000_DOWN, &adapter->state)) 4409 mod_timer(&adapter->phy_info_timer, 4410 round_jiffies(jiffies + 2 * HZ)); 4411 4412 if (adapter->flags & FLAG_RX_NEEDS_RESTART) 4413 schedule_work(&adapter->reset_task); 4414 else 4415 pm_schedule_suspend(netdev->dev.parent, 4416 LINK_TIMEOUT); 4417 } 4418 } 4419 4420 link_up: 4421 spin_lock(&adapter->stats64_lock); 4422 e1000e_update_stats(adapter); 4423 4424 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 4425 adapter->tpt_old = adapter->stats.tpt; 4426 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 4427 adapter->colc_old = adapter->stats.colc; 4428 4429 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; 4430 adapter->gorc_old = adapter->stats.gorc; 4431 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; 4432 adapter->gotc_old = adapter->stats.gotc; 4433 spin_unlock(&adapter->stats64_lock); 4434 4435 e1000e_update_adaptive(&adapter->hw); 4436 4437 if (!netif_carrier_ok(netdev) && 4438 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { 4439 /* 4440 * We've lost link, so the controller stops DMA, 4441 * but we've got queued Tx work that's never going 4442 * to get done, so reset controller to flush Tx. 4443 * (Do the reset outside of interrupt context). 4444 */ 4445 schedule_work(&adapter->reset_task); 4446 /* return immediately since reset is imminent */ 4447 return; 4448 } 4449 4450 /* Simple mode for Interrupt Throttle Rate (ITR) */ 4451 if (adapter->itr_setting == 4) { 4452 /* 4453 * Symmetric Tx/Rx gets a reduced ITR=2000; 4454 * Total asymmetrical Tx or Rx gets ITR=8000; 4455 * everyone else is between 2000-8000. 4456 */ 4457 u32 goc = (adapter->gotc + adapter->gorc) / 10000; 4458 u32 dif = (adapter->gotc > adapter->gorc ? 4459 adapter->gotc - adapter->gorc : 4460 adapter->gorc - adapter->gotc) / 10000; 4461 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 4462 4463 ew32(ITR, 1000000000 / (itr * 256)); 4464 } 4465 4466 /* Cause software interrupt to ensure Rx ring is cleaned */ 4467 if (adapter->msix_entries) 4468 ew32(ICS, adapter->rx_ring->ims_val); 4469 else 4470 ew32(ICS, E1000_ICS_RXDMT0); 4471 4472 /* flush pending descriptors to memory before detecting Tx hang */ 4473 e1000e_flush_descriptors(adapter); 4474 4475 /* Force detection of hung controller every watchdog period */ 4476 adapter->detect_tx_hung = 1; 4477 4478 /* 4479 * With 82571 controllers, LAA may be overwritten due to controller 4480 * reset from the other port. Set the appropriate LAA in RAR[0] 4481 */ 4482 if (e1000e_get_laa_state_82571(hw)) 4483 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 4484 4485 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) 4486 e1000e_check_82574_phy_workaround(adapter); 4487 4488 /* Reset the timer */ 4489 if (!test_bit(__E1000_DOWN, &adapter->state)) 4490 mod_timer(&adapter->watchdog_timer, 4491 round_jiffies(jiffies + 2 * HZ)); 4492 } 4493 4494 #define E1000_TX_FLAGS_CSUM 0x00000001 4495 #define E1000_TX_FLAGS_VLAN 0x00000002 4496 #define E1000_TX_FLAGS_TSO 0x00000004 4497 #define E1000_TX_FLAGS_IPV4 0x00000008 4498 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 4499 #define E1000_TX_FLAGS_VLAN_SHIFT 16 4500 4501 static int e1000_tso(struct e1000_adapter *adapter, 4502 struct sk_buff *skb) 4503 { 4504 struct e1000_ring *tx_ring = adapter->tx_ring; 4505 struct e1000_context_desc *context_desc; 4506 struct e1000_buffer *buffer_info; 4507 unsigned int i; 4508 u32 cmd_length = 0; 4509 u16 ipcse = 0, tucse, mss; 4510 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4511 4512 if (!skb_is_gso(skb)) 4513 return 0; 4514 4515 if (skb_header_cloned(skb)) { 4516 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4517 4518 if (err) 4519 return err; 4520 } 4521 4522 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4523 mss = skb_shinfo(skb)->gso_size; 4524 if (skb->protocol == htons(ETH_P_IP)) { 4525 struct iphdr *iph = ip_hdr(skb); 4526 iph->tot_len = 0; 4527 iph->check = 0; 4528 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 4529 0, IPPROTO_TCP, 0); 4530 cmd_length = E1000_TXD_CMD_IP; 4531 ipcse = skb_transport_offset(skb) - 1; 4532 } else if (skb_is_gso_v6(skb)) { 4533 ipv6_hdr(skb)->payload_len = 0; 4534 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4535 &ipv6_hdr(skb)->daddr, 4536 0, IPPROTO_TCP, 0); 4537 ipcse = 0; 4538 } 4539 ipcss = skb_network_offset(skb); 4540 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 4541 tucss = skb_transport_offset(skb); 4542 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 4543 tucse = 0; 4544 4545 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 4546 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 4547 4548 i = tx_ring->next_to_use; 4549 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 4550 buffer_info = &tx_ring->buffer_info[i]; 4551 4552 context_desc->lower_setup.ip_fields.ipcss = ipcss; 4553 context_desc->lower_setup.ip_fields.ipcso = ipcso; 4554 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 4555 context_desc->upper_setup.tcp_fields.tucss = tucss; 4556 context_desc->upper_setup.tcp_fields.tucso = tucso; 4557 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 4558 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 4559 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 4560 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 4561 4562 buffer_info->time_stamp = jiffies; 4563 buffer_info->next_to_watch = i; 4564 4565 i++; 4566 if (i == tx_ring->count) 4567 i = 0; 4568 tx_ring->next_to_use = i; 4569 4570 return 1; 4571 } 4572 4573 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) 4574 { 4575 struct e1000_ring *tx_ring = adapter->tx_ring; 4576 struct e1000_context_desc *context_desc; 4577 struct e1000_buffer *buffer_info; 4578 unsigned int i; 4579 u8 css; 4580 u32 cmd_len = E1000_TXD_CMD_DEXT; 4581 __be16 protocol; 4582 4583 if (skb->ip_summed != CHECKSUM_PARTIAL) 4584 return 0; 4585 4586 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) 4587 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 4588 else 4589 protocol = skb->protocol; 4590 4591 switch (protocol) { 4592 case cpu_to_be16(ETH_P_IP): 4593 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 4594 cmd_len |= E1000_TXD_CMD_TCP; 4595 break; 4596 case cpu_to_be16(ETH_P_IPV6): 4597 /* XXX not handling all IPV6 headers */ 4598 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 4599 cmd_len |= E1000_TXD_CMD_TCP; 4600 break; 4601 default: 4602 if (unlikely(net_ratelimit())) 4603 e_warn("checksum_partial proto=%x!\n", 4604 be16_to_cpu(protocol)); 4605 break; 4606 } 4607 4608 css = skb_checksum_start_offset(skb); 4609 4610 i = tx_ring->next_to_use; 4611 buffer_info = &tx_ring->buffer_info[i]; 4612 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 4613 4614 context_desc->lower_setup.ip_config = 0; 4615 context_desc->upper_setup.tcp_fields.tucss = css; 4616 context_desc->upper_setup.tcp_fields.tucso = 4617 css + skb->csum_offset; 4618 context_desc->upper_setup.tcp_fields.tucse = 0; 4619 context_desc->tcp_seg_setup.data = 0; 4620 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 4621 4622 buffer_info->time_stamp = jiffies; 4623 buffer_info->next_to_watch = i; 4624 4625 i++; 4626 if (i == tx_ring->count) 4627 i = 0; 4628 tx_ring->next_to_use = i; 4629 4630 return 1; 4631 } 4632 4633 #define E1000_MAX_PER_TXD 8192 4634 #define E1000_MAX_TXD_PWR 12 4635 4636 static int e1000_tx_map(struct e1000_adapter *adapter, 4637 struct sk_buff *skb, unsigned int first, 4638 unsigned int max_per_txd, unsigned int nr_frags, 4639 unsigned int mss) 4640 { 4641 struct e1000_ring *tx_ring = adapter->tx_ring; 4642 struct pci_dev *pdev = adapter->pdev; 4643 struct e1000_buffer *buffer_info; 4644 unsigned int len = skb_headlen(skb); 4645 unsigned int offset = 0, size, count = 0, i; 4646 unsigned int f, bytecount, segs; 4647 4648 i = tx_ring->next_to_use; 4649 4650 while (len) { 4651 buffer_info = &tx_ring->buffer_info[i]; 4652 size = min(len, max_per_txd); 4653 4654 buffer_info->length = size; 4655 buffer_info->time_stamp = jiffies; 4656 buffer_info->next_to_watch = i; 4657 buffer_info->dma = dma_map_single(&pdev->dev, 4658 skb->data + offset, 4659 size, DMA_TO_DEVICE); 4660 buffer_info->mapped_as_page = false; 4661 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4662 goto dma_error; 4663 4664 len -= size; 4665 offset += size; 4666 count++; 4667 4668 if (len) { 4669 i++; 4670 if (i == tx_ring->count) 4671 i = 0; 4672 } 4673 } 4674 4675 for (f = 0; f < nr_frags; f++) { 4676 const struct skb_frag_struct *frag; 4677 4678 frag = &skb_shinfo(skb)->frags[f]; 4679 len = skb_frag_size(frag); 4680 offset = 0; 4681 4682 while (len) { 4683 i++; 4684 if (i == tx_ring->count) 4685 i = 0; 4686 4687 buffer_info = &tx_ring->buffer_info[i]; 4688 size = min(len, max_per_txd); 4689 4690 buffer_info->length = size; 4691 buffer_info->time_stamp = jiffies; 4692 buffer_info->next_to_watch = i; 4693 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 4694 offset, size, DMA_TO_DEVICE); 4695 buffer_info->mapped_as_page = true; 4696 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4697 goto dma_error; 4698 4699 len -= size; 4700 offset += size; 4701 count++; 4702 } 4703 } 4704 4705 segs = skb_shinfo(skb)->gso_segs ? : 1; 4706 /* multiply data chunks by size of headers */ 4707 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 4708 4709 tx_ring->buffer_info[i].skb = skb; 4710 tx_ring->buffer_info[i].segs = segs; 4711 tx_ring->buffer_info[i].bytecount = bytecount; 4712 tx_ring->buffer_info[first].next_to_watch = i; 4713 4714 return count; 4715 4716 dma_error: 4717 dev_err(&pdev->dev, "Tx DMA map failed\n"); 4718 buffer_info->dma = 0; 4719 if (count) 4720 count--; 4721 4722 while (count--) { 4723 if (i == 0) 4724 i += tx_ring->count; 4725 i--; 4726 buffer_info = &tx_ring->buffer_info[i]; 4727 e1000_put_txbuf(adapter, buffer_info); 4728 } 4729 4730 return 0; 4731 } 4732 4733 static void e1000_tx_queue(struct e1000_adapter *adapter, 4734 int tx_flags, int count) 4735 { 4736 struct e1000_ring *tx_ring = adapter->tx_ring; 4737 struct e1000_tx_desc *tx_desc = NULL; 4738 struct e1000_buffer *buffer_info; 4739 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 4740 unsigned int i; 4741 4742 if (tx_flags & E1000_TX_FLAGS_TSO) { 4743 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 4744 E1000_TXD_CMD_TSE; 4745 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 4746 4747 if (tx_flags & E1000_TX_FLAGS_IPV4) 4748 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 4749 } 4750 4751 if (tx_flags & E1000_TX_FLAGS_CSUM) { 4752 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 4753 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 4754 } 4755 4756 if (tx_flags & E1000_TX_FLAGS_VLAN) { 4757 txd_lower |= E1000_TXD_CMD_VLE; 4758 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 4759 } 4760 4761 i = tx_ring->next_to_use; 4762 4763 do { 4764 buffer_info = &tx_ring->buffer_info[i]; 4765 tx_desc = E1000_TX_DESC(*tx_ring, i); 4766 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4767 tx_desc->lower.data = 4768 cpu_to_le32(txd_lower | buffer_info->length); 4769 tx_desc->upper.data = cpu_to_le32(txd_upper); 4770 4771 i++; 4772 if (i == tx_ring->count) 4773 i = 0; 4774 } while (--count > 0); 4775 4776 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 4777 4778 /* 4779 * Force memory writes to complete before letting h/w 4780 * know there are new descriptors to fetch. (Only 4781 * applicable for weak-ordered memory model archs, 4782 * such as IA-64). 4783 */ 4784 wmb(); 4785 4786 tx_ring->next_to_use = i; 4787 4788 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 4789 e1000e_update_tdt_wa(adapter, i); 4790 else 4791 writel(i, adapter->hw.hw_addr + tx_ring->tail); 4792 4793 /* 4794 * we need this if more than one processor can write to our tail 4795 * at a time, it synchronizes IO on IA64/Altix systems 4796 */ 4797 mmiowb(); 4798 } 4799 4800 #define MINIMUM_DHCP_PACKET_SIZE 282 4801 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, 4802 struct sk_buff *skb) 4803 { 4804 struct e1000_hw *hw = &adapter->hw; 4805 u16 length, offset; 4806 4807 if (vlan_tx_tag_present(skb)) { 4808 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 4809 (adapter->hw.mng_cookie.status & 4810 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 4811 return 0; 4812 } 4813 4814 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) 4815 return 0; 4816 4817 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) 4818 return 0; 4819 4820 { 4821 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); 4822 struct udphdr *udp; 4823 4824 if (ip->protocol != IPPROTO_UDP) 4825 return 0; 4826 4827 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); 4828 if (ntohs(udp->dest) != 67) 4829 return 0; 4830 4831 offset = (u8 *)udp + 8 - skb->data; 4832 length = skb->len - offset; 4833 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); 4834 } 4835 4836 return 0; 4837 } 4838 4839 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 4840 { 4841 struct e1000_adapter *adapter = netdev_priv(netdev); 4842 4843 netif_stop_queue(netdev); 4844 /* 4845 * Herbert's original patch had: 4846 * smp_mb__after_netif_stop_queue(); 4847 * but since that doesn't exist yet, just open code it. 4848 */ 4849 smp_mb(); 4850 4851 /* 4852 * We need to check again in a case another CPU has just 4853 * made room available. 4854 */ 4855 if (e1000_desc_unused(adapter->tx_ring) < size) 4856 return -EBUSY; 4857 4858 /* A reprieve! */ 4859 netif_start_queue(netdev); 4860 ++adapter->restart_queue; 4861 return 0; 4862 } 4863 4864 static int e1000_maybe_stop_tx(struct net_device *netdev, int size) 4865 { 4866 struct e1000_adapter *adapter = netdev_priv(netdev); 4867 4868 if (e1000_desc_unused(adapter->tx_ring) >= size) 4869 return 0; 4870 return __e1000_maybe_stop_tx(netdev, size); 4871 } 4872 4873 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 4874 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 4875 struct net_device *netdev) 4876 { 4877 struct e1000_adapter *adapter = netdev_priv(netdev); 4878 struct e1000_ring *tx_ring = adapter->tx_ring; 4879 unsigned int first; 4880 unsigned int max_per_txd = E1000_MAX_PER_TXD; 4881 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 4882 unsigned int tx_flags = 0; 4883 unsigned int len = skb_headlen(skb); 4884 unsigned int nr_frags; 4885 unsigned int mss; 4886 int count = 0; 4887 int tso; 4888 unsigned int f; 4889 4890 if (test_bit(__E1000_DOWN, &adapter->state)) { 4891 dev_kfree_skb_any(skb); 4892 return NETDEV_TX_OK; 4893 } 4894 4895 if (skb->len <= 0) { 4896 dev_kfree_skb_any(skb); 4897 return NETDEV_TX_OK; 4898 } 4899 4900 mss = skb_shinfo(skb)->gso_size; 4901 /* 4902 * The controller does a simple calculation to 4903 * make sure there is enough room in the FIFO before 4904 * initiating the DMA for each buffer. The calc is: 4905 * 4 = ceil(buffer len/mss). To make sure we don't 4906 * overrun the FIFO, adjust the max buffer len if mss 4907 * drops. 4908 */ 4909 if (mss) { 4910 u8 hdr_len; 4911 max_per_txd = min(mss << 2, max_per_txd); 4912 max_txd_pwr = fls(max_per_txd) - 1; 4913 4914 /* 4915 * TSO Workaround for 82571/2/3 Controllers -- if skb->data 4916 * points to just header, pull a few bytes of payload from 4917 * frags into skb->data 4918 */ 4919 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4920 /* 4921 * we do this workaround for ES2LAN, but it is un-necessary, 4922 * avoiding it could save a lot of cycles 4923 */ 4924 if (skb->data_len && (hdr_len == len)) { 4925 unsigned int pull_size; 4926 4927 pull_size = min((unsigned int)4, skb->data_len); 4928 if (!__pskb_pull_tail(skb, pull_size)) { 4929 e_err("__pskb_pull_tail failed.\n"); 4930 dev_kfree_skb_any(skb); 4931 return NETDEV_TX_OK; 4932 } 4933 len = skb_headlen(skb); 4934 } 4935 } 4936 4937 /* reserve a descriptor for the offload context */ 4938 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 4939 count++; 4940 count++; 4941 4942 count += TXD_USE_COUNT(len, max_txd_pwr); 4943 4944 nr_frags = skb_shinfo(skb)->nr_frags; 4945 for (f = 0; f < nr_frags; f++) 4946 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 4947 max_txd_pwr); 4948 4949 if (adapter->hw.mac.tx_pkt_filtering) 4950 e1000_transfer_dhcp_info(adapter, skb); 4951 4952 /* 4953 * need: count + 2 desc gap to keep tail from touching 4954 * head, otherwise try next time 4955 */ 4956 if (e1000_maybe_stop_tx(netdev, count + 2)) 4957 return NETDEV_TX_BUSY; 4958 4959 if (vlan_tx_tag_present(skb)) { 4960 tx_flags |= E1000_TX_FLAGS_VLAN; 4961 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 4962 } 4963 4964 first = tx_ring->next_to_use; 4965 4966 tso = e1000_tso(adapter, skb); 4967 if (tso < 0) { 4968 dev_kfree_skb_any(skb); 4969 return NETDEV_TX_OK; 4970 } 4971 4972 if (tso) 4973 tx_flags |= E1000_TX_FLAGS_TSO; 4974 else if (e1000_tx_csum(adapter, skb)) 4975 tx_flags |= E1000_TX_FLAGS_CSUM; 4976 4977 /* 4978 * Old method was to assume IPv4 packet by default if TSO was enabled. 4979 * 82571 hardware supports TSO capabilities for IPv6 as well... 4980 * no longer assume, we must. 4981 */ 4982 if (skb->protocol == htons(ETH_P_IP)) 4983 tx_flags |= E1000_TX_FLAGS_IPV4; 4984 4985 /* if count is 0 then mapping error has occurred */ 4986 count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); 4987 if (count) { 4988 e1000_tx_queue(adapter, tx_flags, count); 4989 /* Make sure there is space in the ring for the next send. */ 4990 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); 4991 4992 } else { 4993 dev_kfree_skb_any(skb); 4994 tx_ring->buffer_info[first].time_stamp = 0; 4995 tx_ring->next_to_use = first; 4996 } 4997 4998 return NETDEV_TX_OK; 4999 } 5000 5001 /** 5002 * e1000_tx_timeout - Respond to a Tx Hang 5003 * @netdev: network interface device structure 5004 **/ 5005 static void e1000_tx_timeout(struct net_device *netdev) 5006 { 5007 struct e1000_adapter *adapter = netdev_priv(netdev); 5008 5009 /* Do the reset outside of interrupt context */ 5010 adapter->tx_timeout_count++; 5011 schedule_work(&adapter->reset_task); 5012 } 5013 5014 static void e1000_reset_task(struct work_struct *work) 5015 { 5016 struct e1000_adapter *adapter; 5017 adapter = container_of(work, struct e1000_adapter, reset_task); 5018 5019 /* don't run the task if already down */ 5020 if (test_bit(__E1000_DOWN, &adapter->state)) 5021 return; 5022 5023 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 5024 (adapter->flags & FLAG_RX_RESTART_NOW))) { 5025 e1000e_dump(adapter); 5026 e_err("Reset adapter\n"); 5027 } 5028 e1000e_reinit_locked(adapter); 5029 } 5030 5031 /** 5032 * e1000_get_stats64 - Get System Network Statistics 5033 * @netdev: network interface device structure 5034 * @stats: rtnl_link_stats64 pointer 5035 * 5036 * Returns the address of the device statistics structure. 5037 **/ 5038 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 5039 struct rtnl_link_stats64 *stats) 5040 { 5041 struct e1000_adapter *adapter = netdev_priv(netdev); 5042 5043 memset(stats, 0, sizeof(struct rtnl_link_stats64)); 5044 spin_lock(&adapter->stats64_lock); 5045 e1000e_update_stats(adapter); 5046 /* Fill out the OS statistics structure */ 5047 stats->rx_bytes = adapter->stats.gorc; 5048 stats->rx_packets = adapter->stats.gprc; 5049 stats->tx_bytes = adapter->stats.gotc; 5050 stats->tx_packets = adapter->stats.gptc; 5051 stats->multicast = adapter->stats.mprc; 5052 stats->collisions = adapter->stats.colc; 5053 5054 /* Rx Errors */ 5055 5056 /* 5057 * RLEC on some newer hardware can be incorrect so build 5058 * our own version based on RUC and ROC 5059 */ 5060 stats->rx_errors = adapter->stats.rxerrc + 5061 adapter->stats.crcerrs + adapter->stats.algnerrc + 5062 adapter->stats.ruc + adapter->stats.roc + 5063 adapter->stats.cexterr; 5064 stats->rx_length_errors = adapter->stats.ruc + 5065 adapter->stats.roc; 5066 stats->rx_crc_errors = adapter->stats.crcerrs; 5067 stats->rx_frame_errors = adapter->stats.algnerrc; 5068 stats->rx_missed_errors = adapter->stats.mpc; 5069 5070 /* Tx Errors */ 5071 stats->tx_errors = adapter->stats.ecol + 5072 adapter->stats.latecol; 5073 stats->tx_aborted_errors = adapter->stats.ecol; 5074 stats->tx_window_errors = adapter->stats.latecol; 5075 stats->tx_carrier_errors = adapter->stats.tncrs; 5076 5077 /* Tx Dropped needs to be maintained elsewhere */ 5078 5079 spin_unlock(&adapter->stats64_lock); 5080 return stats; 5081 } 5082 5083 /** 5084 * e1000_change_mtu - Change the Maximum Transfer Unit 5085 * @netdev: network interface device structure 5086 * @new_mtu: new value for maximum frame size 5087 * 5088 * Returns 0 on success, negative on failure 5089 **/ 5090 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5091 { 5092 struct e1000_adapter *adapter = netdev_priv(netdev); 5093 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5094 5095 /* Jumbo frame support */ 5096 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && 5097 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 5098 e_err("Jumbo Frames not supported.\n"); 5099 return -EINVAL; 5100 } 5101 5102 /* Supported frame sizes */ 5103 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || 5104 (max_frame > adapter->max_hw_frame_size)) { 5105 e_err("Unsupported MTU setting\n"); 5106 return -EINVAL; 5107 } 5108 5109 /* Jumbo frame workaround on 82579 requires CRC be stripped */ 5110 if ((adapter->hw.mac.type == e1000_pch2lan) && 5111 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 5112 (new_mtu > ETH_DATA_LEN)) { 5113 e_err("Jumbo Frames not supported on 82579 when CRC " 5114 "stripping is disabled.\n"); 5115 return -EINVAL; 5116 } 5117 5118 /* 82573 Errata 17 */ 5119 if (((adapter->hw.mac.type == e1000_82573) || 5120 (adapter->hw.mac.type == e1000_82574)) && 5121 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { 5122 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; 5123 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); 5124 } 5125 5126 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 5127 usleep_range(1000, 2000); 5128 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 5129 adapter->max_frame_size = max_frame; 5130 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5131 netdev->mtu = new_mtu; 5132 if (netif_running(netdev)) 5133 e1000e_down(adapter); 5134 5135 /* 5136 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 5137 * means we reserve 2 more, this pushes us to allocate from the next 5138 * larger slab size. 5139 * i.e. RXBUFFER_2048 --> size-4096 slab 5140 * However with the new *_jumbo_rx* routines, jumbo receives will use 5141 * fragmented skbs 5142 */ 5143 5144 if (max_frame <= 2048) 5145 adapter->rx_buffer_len = 2048; 5146 else 5147 adapter->rx_buffer_len = 4096; 5148 5149 /* adjust allocation if LPE protects us, and we aren't using SBP */ 5150 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 5151 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 5152 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 5153 + ETH_FCS_LEN; 5154 5155 if (netif_running(netdev)) 5156 e1000e_up(adapter); 5157 else 5158 e1000e_reset(adapter); 5159 5160 clear_bit(__E1000_RESETTING, &adapter->state); 5161 5162 return 0; 5163 } 5164 5165 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 5166 int cmd) 5167 { 5168 struct e1000_adapter *adapter = netdev_priv(netdev); 5169 struct mii_ioctl_data *data = if_mii(ifr); 5170 5171 if (adapter->hw.phy.media_type != e1000_media_type_copper) 5172 return -EOPNOTSUPP; 5173 5174 switch (cmd) { 5175 case SIOCGMIIPHY: 5176 data->phy_id = adapter->hw.phy.addr; 5177 break; 5178 case SIOCGMIIREG: 5179 e1000_phy_read_status(adapter); 5180 5181 switch (data->reg_num & 0x1F) { 5182 case MII_BMCR: 5183 data->val_out = adapter->phy_regs.bmcr; 5184 break; 5185 case MII_BMSR: 5186 data->val_out = adapter->phy_regs.bmsr; 5187 break; 5188 case MII_PHYSID1: 5189 data->val_out = (adapter->hw.phy.id >> 16); 5190 break; 5191 case MII_PHYSID2: 5192 data->val_out = (adapter->hw.phy.id & 0xFFFF); 5193 break; 5194 case MII_ADVERTISE: 5195 data->val_out = adapter->phy_regs.advertise; 5196 break; 5197 case MII_LPA: 5198 data->val_out = adapter->phy_regs.lpa; 5199 break; 5200 case MII_EXPANSION: 5201 data->val_out = adapter->phy_regs.expansion; 5202 break; 5203 case MII_CTRL1000: 5204 data->val_out = adapter->phy_regs.ctrl1000; 5205 break; 5206 case MII_STAT1000: 5207 data->val_out = adapter->phy_regs.stat1000; 5208 break; 5209 case MII_ESTATUS: 5210 data->val_out = adapter->phy_regs.estatus; 5211 break; 5212 default: 5213 return -EIO; 5214 } 5215 break; 5216 case SIOCSMIIREG: 5217 default: 5218 return -EOPNOTSUPP; 5219 } 5220 return 0; 5221 } 5222 5223 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 5224 { 5225 switch (cmd) { 5226 case SIOCGMIIPHY: 5227 case SIOCGMIIREG: 5228 case SIOCSMIIREG: 5229 return e1000_mii_ioctl(netdev, ifr, cmd); 5230 default: 5231 return -EOPNOTSUPP; 5232 } 5233 } 5234 5235 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) 5236 { 5237 struct e1000_hw *hw = &adapter->hw; 5238 u32 i, mac_reg; 5239 u16 phy_reg, wuc_enable; 5240 int retval = 0; 5241 5242 /* copy MAC RARs to PHY RARs */ 5243 e1000_copy_rx_addrs_to_phy_ich8lan(hw); 5244 5245 retval = hw->phy.ops.acquire(hw); 5246 if (retval) { 5247 e_err("Could not acquire PHY\n"); 5248 return retval; 5249 } 5250 5251 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ 5252 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 5253 if (retval) 5254 goto out; 5255 5256 /* copy MAC MTA to PHY MTA - only needed for pchlan */ 5257 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 5258 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 5259 hw->phy.ops.write_reg_page(hw, BM_MTA(i), 5260 (u16)(mac_reg & 0xFFFF)); 5261 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, 5262 (u16)((mac_reg >> 16) & 0xFFFF)); 5263 } 5264 5265 /* configure PHY Rx Control register */ 5266 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); 5267 mac_reg = er32(RCTL); 5268 if (mac_reg & E1000_RCTL_UPE) 5269 phy_reg |= BM_RCTL_UPE; 5270 if (mac_reg & E1000_RCTL_MPE) 5271 phy_reg |= BM_RCTL_MPE; 5272 phy_reg &= ~(BM_RCTL_MO_MASK); 5273 if (mac_reg & E1000_RCTL_MO_3) 5274 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 5275 << BM_RCTL_MO_SHIFT); 5276 if (mac_reg & E1000_RCTL_BAM) 5277 phy_reg |= BM_RCTL_BAM; 5278 if (mac_reg & E1000_RCTL_PMCF) 5279 phy_reg |= BM_RCTL_PMCF; 5280 mac_reg = er32(CTRL); 5281 if (mac_reg & E1000_CTRL_RFCE) 5282 phy_reg |= BM_RCTL_RFCE; 5283 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); 5284 5285 /* enable PHY wakeup in MAC register */ 5286 ew32(WUFC, wufc); 5287 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); 5288 5289 /* configure and enable PHY wakeup in PHY registers */ 5290 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); 5291 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 5292 5293 /* activate PHY wakeup */ 5294 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 5295 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); 5296 if (retval) 5297 e_err("Could not set PHY Host Wakeup bit\n"); 5298 out: 5299 hw->phy.ops.release(hw); 5300 5301 return retval; 5302 } 5303 5304 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, 5305 bool runtime) 5306 { 5307 struct net_device *netdev = pci_get_drvdata(pdev); 5308 struct e1000_adapter *adapter = netdev_priv(netdev); 5309 struct e1000_hw *hw = &adapter->hw; 5310 u32 ctrl, ctrl_ext, rctl, status; 5311 /* Runtime suspend should only enable wakeup for link changes */ 5312 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 5313 int retval = 0; 5314 5315 netif_device_detach(netdev); 5316 5317 if (netif_running(netdev)) { 5318 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 5319 e1000e_down(adapter); 5320 e1000_free_irq(adapter); 5321 } 5322 e1000e_reset_interrupt_capability(adapter); 5323 5324 retval = pci_save_state(pdev); 5325 if (retval) 5326 return retval; 5327 5328 status = er32(STATUS); 5329 if (status & E1000_STATUS_LU) 5330 wufc &= ~E1000_WUFC_LNKC; 5331 5332 if (wufc) { 5333 e1000_setup_rctl(adapter); 5334 e1000_set_multi(netdev); 5335 5336 /* turn on all-multi mode if wake on multicast is enabled */ 5337 if (wufc & E1000_WUFC_MC) { 5338 rctl = er32(RCTL); 5339 rctl |= E1000_RCTL_MPE; 5340 ew32(RCTL, rctl); 5341 } 5342 5343 ctrl = er32(CTRL); 5344 /* advertise wake from D3Cold */ 5345 #define E1000_CTRL_ADVD3WUC 0x00100000 5346 /* phy power management enable */ 5347 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5348 ctrl |= E1000_CTRL_ADVD3WUC; 5349 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) 5350 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; 5351 ew32(CTRL, ctrl); 5352 5353 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 5354 adapter->hw.phy.media_type == 5355 e1000_media_type_internal_serdes) { 5356 /* keep the laser running in D3 */ 5357 ctrl_ext = er32(CTRL_EXT); 5358 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 5359 ew32(CTRL_EXT, ctrl_ext); 5360 } 5361 5362 if (adapter->flags & FLAG_IS_ICH) 5363 e1000_suspend_workarounds_ich8lan(&adapter->hw); 5364 5365 /* Allow time for pending master requests to run */ 5366 e1000e_disable_pcie_master(&adapter->hw); 5367 5368 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 5369 /* enable wakeup by the PHY */ 5370 retval = e1000_init_phy_wakeup(adapter, wufc); 5371 if (retval) 5372 return retval; 5373 } else { 5374 /* enable wakeup by the MAC */ 5375 ew32(WUFC, wufc); 5376 ew32(WUC, E1000_WUC_PME_EN); 5377 } 5378 } else { 5379 ew32(WUC, 0); 5380 ew32(WUFC, 0); 5381 } 5382 5383 *enable_wake = !!wufc; 5384 5385 /* make sure adapter isn't asleep if manageability is enabled */ 5386 if ((adapter->flags & FLAG_MNG_PT_ENABLED) || 5387 (hw->mac.ops.check_mng_mode(hw))) 5388 *enable_wake = true; 5389 5390 if (adapter->hw.phy.type == e1000_phy_igp_3) 5391 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 5392 5393 /* 5394 * Release control of h/w to f/w. If f/w is AMT enabled, this 5395 * would have already happened in close and is redundant. 5396 */ 5397 e1000e_release_hw_control(adapter); 5398 5399 pci_disable_device(pdev); 5400 5401 return 0; 5402 } 5403 5404 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) 5405 { 5406 if (sleep && wake) { 5407 pci_prepare_to_sleep(pdev); 5408 return; 5409 } 5410 5411 pci_wake_from_d3(pdev, wake); 5412 pci_set_power_state(pdev, PCI_D3hot); 5413 } 5414 5415 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, 5416 bool wake) 5417 { 5418 struct net_device *netdev = pci_get_drvdata(pdev); 5419 struct e1000_adapter *adapter = netdev_priv(netdev); 5420 5421 /* 5422 * The pci-e switch on some quad port adapters will report a 5423 * correctable error when the MAC transitions from D0 to D3. To 5424 * prevent this we need to mask off the correctable errors on the 5425 * downstream port of the pci-e switch. 5426 */ 5427 if (adapter->flags & FLAG_IS_QUAD_PORT) { 5428 struct pci_dev *us_dev = pdev->bus->self; 5429 int pos = pci_pcie_cap(us_dev); 5430 u16 devctl; 5431 5432 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); 5433 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, 5434 (devctl & ~PCI_EXP_DEVCTL_CERE)); 5435 5436 e1000_power_off(pdev, sleep, wake); 5437 5438 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); 5439 } else { 5440 e1000_power_off(pdev, sleep, wake); 5441 } 5442 } 5443 5444 #ifdef CONFIG_PCIEASPM 5445 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5446 { 5447 pci_disable_link_state_locked(pdev, state); 5448 } 5449 #else 5450 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5451 { 5452 int pos; 5453 u16 reg16; 5454 5455 /* 5456 * Both device and parent should have the same ASPM setting. 5457 * Disable ASPM in downstream component first and then upstream. 5458 */ 5459 pos = pci_pcie_cap(pdev); 5460 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); 5461 reg16 &= ~state; 5462 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 5463 5464 if (!pdev->bus->self) 5465 return; 5466 5467 pos = pci_pcie_cap(pdev->bus->self); 5468 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); 5469 reg16 &= ~state; 5470 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); 5471 } 5472 #endif 5473 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5474 { 5475 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", 5476 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", 5477 (state & PCIE_LINK_STATE_L1) ? "L1" : ""); 5478 5479 __e1000e_disable_aspm(pdev, state); 5480 } 5481 5482 #ifdef CONFIG_PM 5483 static bool e1000e_pm_ready(struct e1000_adapter *adapter) 5484 { 5485 return !!adapter->tx_ring->buffer_info; 5486 } 5487 5488 static int __e1000_resume(struct pci_dev *pdev) 5489 { 5490 struct net_device *netdev = pci_get_drvdata(pdev); 5491 struct e1000_adapter *adapter = netdev_priv(netdev); 5492 struct e1000_hw *hw = &adapter->hw; 5493 u16 aspm_disable_flag = 0; 5494 u32 err; 5495 5496 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 5497 aspm_disable_flag = PCIE_LINK_STATE_L0S; 5498 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5499 aspm_disable_flag |= PCIE_LINK_STATE_L1; 5500 if (aspm_disable_flag) 5501 e1000e_disable_aspm(pdev, aspm_disable_flag); 5502 5503 pci_set_power_state(pdev, PCI_D0); 5504 pci_restore_state(pdev); 5505 pci_save_state(pdev); 5506 5507 e1000e_set_interrupt_capability(adapter); 5508 if (netif_running(netdev)) { 5509 err = e1000_request_irq(adapter); 5510 if (err) 5511 return err; 5512 } 5513 5514 if (hw->mac.type == e1000_pch2lan) 5515 e1000_resume_workarounds_pchlan(&adapter->hw); 5516 5517 e1000e_power_up_phy(adapter); 5518 5519 /* report the system wakeup cause from S3/S4 */ 5520 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 5521 u16 phy_data; 5522 5523 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); 5524 if (phy_data) { 5525 e_info("PHY Wakeup cause - %s\n", 5526 phy_data & E1000_WUS_EX ? "Unicast Packet" : 5527 phy_data & E1000_WUS_MC ? "Multicast Packet" : 5528 phy_data & E1000_WUS_BC ? "Broadcast Packet" : 5529 phy_data & E1000_WUS_MAG ? "Magic Packet" : 5530 phy_data & E1000_WUS_LNKC ? "Link Status " 5531 " Change" : "other"); 5532 } 5533 e1e_wphy(&adapter->hw, BM_WUS, ~0); 5534 } else { 5535 u32 wus = er32(WUS); 5536 if (wus) { 5537 e_info("MAC Wakeup cause - %s\n", 5538 wus & E1000_WUS_EX ? "Unicast Packet" : 5539 wus & E1000_WUS_MC ? "Multicast Packet" : 5540 wus & E1000_WUS_BC ? "Broadcast Packet" : 5541 wus & E1000_WUS_MAG ? "Magic Packet" : 5542 wus & E1000_WUS_LNKC ? "Link Status Change" : 5543 "other"); 5544 } 5545 ew32(WUS, ~0); 5546 } 5547 5548 e1000e_reset(adapter); 5549 5550 e1000_init_manageability_pt(adapter); 5551 5552 if (netif_running(netdev)) 5553 e1000e_up(adapter); 5554 5555 netif_device_attach(netdev); 5556 5557 /* 5558 * If the controller has AMT, do not set DRV_LOAD until the interface 5559 * is up. For all other cases, let the f/w know that the h/w is now 5560 * under the control of the driver. 5561 */ 5562 if (!(adapter->flags & FLAG_HAS_AMT)) 5563 e1000e_get_hw_control(adapter); 5564 5565 return 0; 5566 } 5567 5568 #ifdef CONFIG_PM_SLEEP 5569 static int e1000_suspend(struct device *dev) 5570 { 5571 struct pci_dev *pdev = to_pci_dev(dev); 5572 int retval; 5573 bool wake; 5574 5575 retval = __e1000_shutdown(pdev, &wake, false); 5576 if (!retval) 5577 e1000_complete_shutdown(pdev, true, wake); 5578 5579 return retval; 5580 } 5581 5582 static int e1000_resume(struct device *dev) 5583 { 5584 struct pci_dev *pdev = to_pci_dev(dev); 5585 struct net_device *netdev = pci_get_drvdata(pdev); 5586 struct e1000_adapter *adapter = netdev_priv(netdev); 5587 5588 if (e1000e_pm_ready(adapter)) 5589 adapter->idle_check = true; 5590 5591 return __e1000_resume(pdev); 5592 } 5593 #endif /* CONFIG_PM_SLEEP */ 5594 5595 #ifdef CONFIG_PM_RUNTIME 5596 static int e1000_runtime_suspend(struct device *dev) 5597 { 5598 struct pci_dev *pdev = to_pci_dev(dev); 5599 struct net_device *netdev = pci_get_drvdata(pdev); 5600 struct e1000_adapter *adapter = netdev_priv(netdev); 5601 5602 if (e1000e_pm_ready(adapter)) { 5603 bool wake; 5604 5605 __e1000_shutdown(pdev, &wake, true); 5606 } 5607 5608 return 0; 5609 } 5610 5611 static int e1000_idle(struct device *dev) 5612 { 5613 struct pci_dev *pdev = to_pci_dev(dev); 5614 struct net_device *netdev = pci_get_drvdata(pdev); 5615 struct e1000_adapter *adapter = netdev_priv(netdev); 5616 5617 if (!e1000e_pm_ready(adapter)) 5618 return 0; 5619 5620 if (adapter->idle_check) { 5621 adapter->idle_check = false; 5622 if (!e1000e_has_link(adapter)) 5623 pm_schedule_suspend(dev, MSEC_PER_SEC); 5624 } 5625 5626 return -EBUSY; 5627 } 5628 5629 static int e1000_runtime_resume(struct device *dev) 5630 { 5631 struct pci_dev *pdev = to_pci_dev(dev); 5632 struct net_device *netdev = pci_get_drvdata(pdev); 5633 struct e1000_adapter *adapter = netdev_priv(netdev); 5634 5635 if (!e1000e_pm_ready(adapter)) 5636 return 0; 5637 5638 adapter->idle_check = !dev->power.runtime_auto; 5639 return __e1000_resume(pdev); 5640 } 5641 #endif /* CONFIG_PM_RUNTIME */ 5642 #endif /* CONFIG_PM */ 5643 5644 static void e1000_shutdown(struct pci_dev *pdev) 5645 { 5646 bool wake = false; 5647 5648 __e1000_shutdown(pdev, &wake, false); 5649 5650 if (system_state == SYSTEM_POWER_OFF) 5651 e1000_complete_shutdown(pdev, false, wake); 5652 } 5653 5654 #ifdef CONFIG_NET_POLL_CONTROLLER 5655 5656 static irqreturn_t e1000_intr_msix(int irq, void *data) 5657 { 5658 struct net_device *netdev = data; 5659 struct e1000_adapter *adapter = netdev_priv(netdev); 5660 5661 if (adapter->msix_entries) { 5662 int vector, msix_irq; 5663 5664 vector = 0; 5665 msix_irq = adapter->msix_entries[vector].vector; 5666 disable_irq(msix_irq); 5667 e1000_intr_msix_rx(msix_irq, netdev); 5668 enable_irq(msix_irq); 5669 5670 vector++; 5671 msix_irq = adapter->msix_entries[vector].vector; 5672 disable_irq(msix_irq); 5673 e1000_intr_msix_tx(msix_irq, netdev); 5674 enable_irq(msix_irq); 5675 5676 vector++; 5677 msix_irq = adapter->msix_entries[vector].vector; 5678 disable_irq(msix_irq); 5679 e1000_msix_other(msix_irq, netdev); 5680 enable_irq(msix_irq); 5681 } 5682 5683 return IRQ_HANDLED; 5684 } 5685 5686 /* 5687 * Polling 'interrupt' - used by things like netconsole to send skbs 5688 * without having to re-enable interrupts. It's not called while 5689 * the interrupt routine is executing. 5690 */ 5691 static void e1000_netpoll(struct net_device *netdev) 5692 { 5693 struct e1000_adapter *adapter = netdev_priv(netdev); 5694 5695 switch (adapter->int_mode) { 5696 case E1000E_INT_MODE_MSIX: 5697 e1000_intr_msix(adapter->pdev->irq, netdev); 5698 break; 5699 case E1000E_INT_MODE_MSI: 5700 disable_irq(adapter->pdev->irq); 5701 e1000_intr_msi(adapter->pdev->irq, netdev); 5702 enable_irq(adapter->pdev->irq); 5703 break; 5704 default: /* E1000E_INT_MODE_LEGACY */ 5705 disable_irq(adapter->pdev->irq); 5706 e1000_intr(adapter->pdev->irq, netdev); 5707 enable_irq(adapter->pdev->irq); 5708 break; 5709 } 5710 } 5711 #endif 5712 5713 /** 5714 * e1000_io_error_detected - called when PCI error is detected 5715 * @pdev: Pointer to PCI device 5716 * @state: The current pci connection state 5717 * 5718 * This function is called after a PCI bus error affecting 5719 * this device has been detected. 5720 */ 5721 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5722 pci_channel_state_t state) 5723 { 5724 struct net_device *netdev = pci_get_drvdata(pdev); 5725 struct e1000_adapter *adapter = netdev_priv(netdev); 5726 5727 netif_device_detach(netdev); 5728 5729 if (state == pci_channel_io_perm_failure) 5730 return PCI_ERS_RESULT_DISCONNECT; 5731 5732 if (netif_running(netdev)) 5733 e1000e_down(adapter); 5734 pci_disable_device(pdev); 5735 5736 /* Request a slot slot reset. */ 5737 return PCI_ERS_RESULT_NEED_RESET; 5738 } 5739 5740 /** 5741 * e1000_io_slot_reset - called after the pci bus has been reset. 5742 * @pdev: Pointer to PCI device 5743 * 5744 * Restart the card from scratch, as if from a cold-boot. Implementation 5745 * resembles the first-half of the e1000_resume routine. 5746 */ 5747 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5748 { 5749 struct net_device *netdev = pci_get_drvdata(pdev); 5750 struct e1000_adapter *adapter = netdev_priv(netdev); 5751 struct e1000_hw *hw = &adapter->hw; 5752 u16 aspm_disable_flag = 0; 5753 int err; 5754 pci_ers_result_t result; 5755 5756 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 5757 aspm_disable_flag = PCIE_LINK_STATE_L0S; 5758 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5759 aspm_disable_flag |= PCIE_LINK_STATE_L1; 5760 if (aspm_disable_flag) 5761 e1000e_disable_aspm(pdev, aspm_disable_flag); 5762 5763 err = pci_enable_device_mem(pdev); 5764 if (err) { 5765 dev_err(&pdev->dev, 5766 "Cannot re-enable PCI device after reset.\n"); 5767 result = PCI_ERS_RESULT_DISCONNECT; 5768 } else { 5769 pci_set_master(pdev); 5770 pdev->state_saved = true; 5771 pci_restore_state(pdev); 5772 5773 pci_enable_wake(pdev, PCI_D3hot, 0); 5774 pci_enable_wake(pdev, PCI_D3cold, 0); 5775 5776 e1000e_reset(adapter); 5777 ew32(WUS, ~0); 5778 result = PCI_ERS_RESULT_RECOVERED; 5779 } 5780 5781 pci_cleanup_aer_uncorrect_error_status(pdev); 5782 5783 return result; 5784 } 5785 5786 /** 5787 * e1000_io_resume - called when traffic can start flowing again. 5788 * @pdev: Pointer to PCI device 5789 * 5790 * This callback is called when the error recovery driver tells us that 5791 * its OK to resume normal operation. Implementation resembles the 5792 * second-half of the e1000_resume routine. 5793 */ 5794 static void e1000_io_resume(struct pci_dev *pdev) 5795 { 5796 struct net_device *netdev = pci_get_drvdata(pdev); 5797 struct e1000_adapter *adapter = netdev_priv(netdev); 5798 5799 e1000_init_manageability_pt(adapter); 5800 5801 if (netif_running(netdev)) { 5802 if (e1000e_up(adapter)) { 5803 dev_err(&pdev->dev, 5804 "can't bring device back up after reset\n"); 5805 return; 5806 } 5807 } 5808 5809 netif_device_attach(netdev); 5810 5811 /* 5812 * If the controller has AMT, do not set DRV_LOAD until the interface 5813 * is up. For all other cases, let the f/w know that the h/w is now 5814 * under the control of the driver. 5815 */ 5816 if (!(adapter->flags & FLAG_HAS_AMT)) 5817 e1000e_get_hw_control(adapter); 5818 5819 } 5820 5821 static void e1000_print_device_info(struct e1000_adapter *adapter) 5822 { 5823 struct e1000_hw *hw = &adapter->hw; 5824 struct net_device *netdev = adapter->netdev; 5825 u32 ret_val; 5826 u8 pba_str[E1000_PBANUM_LENGTH]; 5827 5828 /* print bus type/speed/width info */ 5829 e_info("(PCI Express:2.5GT/s:%s) %pM\n", 5830 /* bus width */ 5831 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 5832 "Width x1"), 5833 /* MAC address */ 5834 netdev->dev_addr); 5835 e_info("Intel(R) PRO/%s Network Connection\n", 5836 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 5837 ret_val = e1000_read_pba_string_generic(hw, pba_str, 5838 E1000_PBANUM_LENGTH); 5839 if (ret_val) 5840 strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); 5841 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 5842 hw->mac.type, hw->phy.type, pba_str); 5843 } 5844 5845 static void e1000_eeprom_checks(struct e1000_adapter *adapter) 5846 { 5847 struct e1000_hw *hw = &adapter->hw; 5848 int ret_val; 5849 u16 buf = 0; 5850 5851 if (hw->mac.type != e1000_82573) 5852 return; 5853 5854 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 5855 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) { 5856 /* Deep Smart Power Down (DSPD) */ 5857 dev_warn(&adapter->pdev->dev, 5858 "Warning: detected DSPD enabled in EEPROM\n"); 5859 } 5860 } 5861 5862 static int e1000_set_features(struct net_device *netdev, u32 features) 5863 { 5864 struct e1000_adapter *adapter = netdev_priv(netdev); 5865 u32 changed = features ^ netdev->features; 5866 5867 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) 5868 adapter->flags |= FLAG_TSO_FORCE; 5869 5870 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | 5871 NETIF_F_RXCSUM))) 5872 return 0; 5873 5874 if (netif_running(netdev)) 5875 e1000e_reinit_locked(adapter); 5876 else 5877 e1000e_reset(adapter); 5878 5879 return 0; 5880 } 5881 5882 static const struct net_device_ops e1000e_netdev_ops = { 5883 .ndo_open = e1000_open, 5884 .ndo_stop = e1000_close, 5885 .ndo_start_xmit = e1000_xmit_frame, 5886 .ndo_get_stats64 = e1000e_get_stats64, 5887 .ndo_set_rx_mode = e1000_set_multi, 5888 .ndo_set_mac_address = e1000_set_mac, 5889 .ndo_change_mtu = e1000_change_mtu, 5890 .ndo_do_ioctl = e1000_ioctl, 5891 .ndo_tx_timeout = e1000_tx_timeout, 5892 .ndo_validate_addr = eth_validate_addr, 5893 5894 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 5895 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 5896 #ifdef CONFIG_NET_POLL_CONTROLLER 5897 .ndo_poll_controller = e1000_netpoll, 5898 #endif 5899 .ndo_set_features = e1000_set_features, 5900 }; 5901 5902 /** 5903 * e1000_probe - Device Initialization Routine 5904 * @pdev: PCI device information struct 5905 * @ent: entry in e1000_pci_tbl 5906 * 5907 * Returns 0 on success, negative on failure 5908 * 5909 * e1000_probe initializes an adapter identified by a pci_dev structure. 5910 * The OS initialization, configuring of the adapter private structure, 5911 * and a hardware reset occur. 5912 **/ 5913 static int __devinit e1000_probe(struct pci_dev *pdev, 5914 const struct pci_device_id *ent) 5915 { 5916 struct net_device *netdev; 5917 struct e1000_adapter *adapter; 5918 struct e1000_hw *hw; 5919 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 5920 resource_size_t mmio_start, mmio_len; 5921 resource_size_t flash_start, flash_len; 5922 5923 static int cards_found; 5924 u16 aspm_disable_flag = 0; 5925 int i, err, pci_using_dac; 5926 u16 eeprom_data = 0; 5927 u16 eeprom_apme_mask = E1000_EEPROM_APME; 5928 5929 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) 5930 aspm_disable_flag = PCIE_LINK_STATE_L0S; 5931 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) 5932 aspm_disable_flag |= PCIE_LINK_STATE_L1; 5933 if (aspm_disable_flag) 5934 e1000e_disable_aspm(pdev, aspm_disable_flag); 5935 5936 err = pci_enable_device_mem(pdev); 5937 if (err) 5938 return err; 5939 5940 pci_using_dac = 0; 5941 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 5942 if (!err) { 5943 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 5944 if (!err) 5945 pci_using_dac = 1; 5946 } else { 5947 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 5948 if (err) { 5949 err = dma_set_coherent_mask(&pdev->dev, 5950 DMA_BIT_MASK(32)); 5951 if (err) { 5952 dev_err(&pdev->dev, "No usable DMA " 5953 "configuration, aborting\n"); 5954 goto err_dma; 5955 } 5956 } 5957 } 5958 5959 err = pci_request_selected_regions_exclusive(pdev, 5960 pci_select_bars(pdev, IORESOURCE_MEM), 5961 e1000e_driver_name); 5962 if (err) 5963 goto err_pci_reg; 5964 5965 /* AER (Advanced Error Reporting) hooks */ 5966 pci_enable_pcie_error_reporting(pdev); 5967 5968 pci_set_master(pdev); 5969 /* PCI config space info */ 5970 err = pci_save_state(pdev); 5971 if (err) 5972 goto err_alloc_etherdev; 5973 5974 err = -ENOMEM; 5975 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 5976 if (!netdev) 5977 goto err_alloc_etherdev; 5978 5979 SET_NETDEV_DEV(netdev, &pdev->dev); 5980 5981 netdev->irq = pdev->irq; 5982 5983 pci_set_drvdata(pdev, netdev); 5984 adapter = netdev_priv(netdev); 5985 hw = &adapter->hw; 5986 adapter->netdev = netdev; 5987 adapter->pdev = pdev; 5988 adapter->ei = ei; 5989 adapter->pba = ei->pba; 5990 adapter->flags = ei->flags; 5991 adapter->flags2 = ei->flags2; 5992 adapter->hw.adapter = adapter; 5993 adapter->hw.mac.type = ei->mac; 5994 adapter->max_hw_frame_size = ei->max_hw_frame_size; 5995 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 5996 5997 mmio_start = pci_resource_start(pdev, 0); 5998 mmio_len = pci_resource_len(pdev, 0); 5999 6000 err = -EIO; 6001 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 6002 if (!adapter->hw.hw_addr) 6003 goto err_ioremap; 6004 6005 if ((adapter->flags & FLAG_HAS_FLASH) && 6006 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 6007 flash_start = pci_resource_start(pdev, 1); 6008 flash_len = pci_resource_len(pdev, 1); 6009 adapter->hw.flash_address = ioremap(flash_start, flash_len); 6010 if (!adapter->hw.flash_address) 6011 goto err_flashmap; 6012 } 6013 6014 /* construct the net_device struct */ 6015 netdev->netdev_ops = &e1000e_netdev_ops; 6016 e1000e_set_ethtool_ops(netdev); 6017 netdev->watchdog_timeo = 5 * HZ; 6018 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 6019 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 6020 6021 netdev->mem_start = mmio_start; 6022 netdev->mem_end = mmio_start + mmio_len; 6023 6024 adapter->bd_number = cards_found++; 6025 6026 e1000e_check_options(adapter); 6027 6028 /* setup adapter struct */ 6029 err = e1000_sw_init(adapter); 6030 if (err) 6031 goto err_sw_init; 6032 6033 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 6034 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 6035 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 6036 6037 err = ei->get_variants(adapter); 6038 if (err) 6039 goto err_hw_init; 6040 6041 if ((adapter->flags & FLAG_IS_ICH) && 6042 (adapter->flags & FLAG_READ_ONLY_NVM)) 6043 e1000e_write_protect_nvm_ich8lan(&adapter->hw); 6044 6045 hw->mac.ops.get_bus_info(&adapter->hw); 6046 6047 adapter->hw.phy.autoneg_wait_to_complete = 0; 6048 6049 /* Copper options */ 6050 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 6051 adapter->hw.phy.mdix = AUTO_ALL_MODES; 6052 adapter->hw.phy.disable_polarity_correction = 0; 6053 adapter->hw.phy.ms_type = e1000_ms_hw_default; 6054 } 6055 6056 if (e1000_check_reset_block(&adapter->hw)) 6057 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6058 6059 /* Set initial default active device features */ 6060 netdev->features = (NETIF_F_SG | 6061 NETIF_F_HW_VLAN_RX | 6062 NETIF_F_HW_VLAN_TX | 6063 NETIF_F_TSO | 6064 NETIF_F_TSO6 | 6065 NETIF_F_RXCSUM | 6066 NETIF_F_HW_CSUM); 6067 6068 /* Set user-changeable features (subset of all device features) */ 6069 netdev->hw_features = netdev->features; 6070 6071 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) 6072 netdev->features |= NETIF_F_HW_VLAN_FILTER; 6073 6074 netdev->vlan_features |= (NETIF_F_SG | 6075 NETIF_F_TSO | 6076 NETIF_F_TSO6 | 6077 NETIF_F_HW_CSUM); 6078 6079 if (pci_using_dac) { 6080 netdev->features |= NETIF_F_HIGHDMA; 6081 netdev->vlan_features |= NETIF_F_HIGHDMA; 6082 } 6083 6084 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 6085 adapter->flags |= FLAG_MNG_PT_ENABLED; 6086 6087 /* 6088 * before reading the NVM, reset the controller to 6089 * put the device in a known good starting state 6090 */ 6091 adapter->hw.mac.ops.reset_hw(&adapter->hw); 6092 6093 /* 6094 * systems with ASPM and others may see the checksum fail on the first 6095 * attempt. Let's give it a few tries 6096 */ 6097 for (i = 0;; i++) { 6098 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 6099 break; 6100 if (i == 2) { 6101 e_err("The NVM Checksum Is Not Valid\n"); 6102 err = -EIO; 6103 goto err_eeprom; 6104 } 6105 } 6106 6107 e1000_eeprom_checks(adapter); 6108 6109 /* copy the MAC address */ 6110 if (e1000e_read_mac_addr(&adapter->hw)) 6111 e_err("NVM Read Error while reading MAC address\n"); 6112 6113 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 6114 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 6115 6116 if (!is_valid_ether_addr(netdev->perm_addr)) { 6117 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); 6118 err = -EIO; 6119 goto err_eeprom; 6120 } 6121 6122 init_timer(&adapter->watchdog_timer); 6123 adapter->watchdog_timer.function = e1000_watchdog; 6124 adapter->watchdog_timer.data = (unsigned long) adapter; 6125 6126 init_timer(&adapter->phy_info_timer); 6127 adapter->phy_info_timer.function = e1000_update_phy_info; 6128 adapter->phy_info_timer.data = (unsigned long) adapter; 6129 6130 INIT_WORK(&adapter->reset_task, e1000_reset_task); 6131 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 6132 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 6133 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 6134 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); 6135 6136 /* Initialize link parameters. User can change them with ethtool */ 6137 adapter->hw.mac.autoneg = 1; 6138 adapter->fc_autoneg = 1; 6139 adapter->hw.fc.requested_mode = e1000_fc_default; 6140 adapter->hw.fc.current_mode = e1000_fc_default; 6141 adapter->hw.phy.autoneg_advertised = 0x2f; 6142 6143 /* ring size defaults */ 6144 adapter->rx_ring->count = 256; 6145 adapter->tx_ring->count = 256; 6146 6147 /* 6148 * Initial Wake on LAN setting - If APM wake is enabled in 6149 * the EEPROM, enable the ACPI Magic Packet filter 6150 */ 6151 if (adapter->flags & FLAG_APME_IN_WUC) { 6152 /* APME bit in EEPROM is mapped to WUC.APME */ 6153 eeprom_data = er32(WUC); 6154 eeprom_apme_mask = E1000_WUC_APME; 6155 if ((hw->mac.type > e1000_ich10lan) && 6156 (eeprom_data & E1000_WUC_PHY_WAKE)) 6157 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 6158 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 6159 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 6160 (adapter->hw.bus.func == 1)) 6161 e1000_read_nvm(&adapter->hw, 6162 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 6163 else 6164 e1000_read_nvm(&adapter->hw, 6165 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 6166 } 6167 6168 /* fetch WoL from EEPROM */ 6169 if (eeprom_data & eeprom_apme_mask) 6170 adapter->eeprom_wol |= E1000_WUFC_MAG; 6171 6172 /* 6173 * now that we have the eeprom settings, apply the special cases 6174 * where the eeprom may be wrong or the board simply won't support 6175 * wake on lan on a particular port 6176 */ 6177 if (!(adapter->flags & FLAG_HAS_WOL)) 6178 adapter->eeprom_wol = 0; 6179 6180 /* initialize the wol settings based on the eeprom settings */ 6181 adapter->wol = adapter->eeprom_wol; 6182 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 6183 6184 /* save off EEPROM version number */ 6185 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 6186 6187 /* reset the hardware with the new settings */ 6188 e1000e_reset(adapter); 6189 6190 /* 6191 * If the controller has AMT, do not set DRV_LOAD until the interface 6192 * is up. For all other cases, let the f/w know that the h/w is now 6193 * under the control of the driver. 6194 */ 6195 if (!(adapter->flags & FLAG_HAS_AMT)) 6196 e1000e_get_hw_control(adapter); 6197 6198 strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); 6199 err = register_netdev(netdev); 6200 if (err) 6201 goto err_register; 6202 6203 /* carrier off reporting is important to ethtool even BEFORE open */ 6204 netif_carrier_off(netdev); 6205 6206 e1000_print_device_info(adapter); 6207 6208 if (pci_dev_run_wake(pdev)) 6209 pm_runtime_put_noidle(&pdev->dev); 6210 6211 return 0; 6212 6213 err_register: 6214 if (!(adapter->flags & FLAG_HAS_AMT)) 6215 e1000e_release_hw_control(adapter); 6216 err_eeprom: 6217 if (!e1000_check_reset_block(&adapter->hw)) 6218 e1000_phy_hw_reset(&adapter->hw); 6219 err_hw_init: 6220 kfree(adapter->tx_ring); 6221 kfree(adapter->rx_ring); 6222 err_sw_init: 6223 if (adapter->hw.flash_address) 6224 iounmap(adapter->hw.flash_address); 6225 e1000e_reset_interrupt_capability(adapter); 6226 err_flashmap: 6227 iounmap(adapter->hw.hw_addr); 6228 err_ioremap: 6229 free_netdev(netdev); 6230 err_alloc_etherdev: 6231 pci_release_selected_regions(pdev, 6232 pci_select_bars(pdev, IORESOURCE_MEM)); 6233 err_pci_reg: 6234 err_dma: 6235 pci_disable_device(pdev); 6236 return err; 6237 } 6238 6239 /** 6240 * e1000_remove - Device Removal Routine 6241 * @pdev: PCI device information struct 6242 * 6243 * e1000_remove is called by the PCI subsystem to alert the driver 6244 * that it should release a PCI device. The could be caused by a 6245 * Hot-Plug event, or because the driver is going to be removed from 6246 * memory. 6247 **/ 6248 static void __devexit e1000_remove(struct pci_dev *pdev) 6249 { 6250 struct net_device *netdev = pci_get_drvdata(pdev); 6251 struct e1000_adapter *adapter = netdev_priv(netdev); 6252 bool down = test_bit(__E1000_DOWN, &adapter->state); 6253 6254 /* 6255 * The timers may be rescheduled, so explicitly disable them 6256 * from being rescheduled. 6257 */ 6258 if (!down) 6259 set_bit(__E1000_DOWN, &adapter->state); 6260 del_timer_sync(&adapter->watchdog_timer); 6261 del_timer_sync(&adapter->phy_info_timer); 6262 6263 cancel_work_sync(&adapter->reset_task); 6264 cancel_work_sync(&adapter->watchdog_task); 6265 cancel_work_sync(&adapter->downshift_task); 6266 cancel_work_sync(&adapter->update_phy_task); 6267 cancel_work_sync(&adapter->print_hang_task); 6268 6269 if (!(netdev->flags & IFF_UP)) 6270 e1000_power_down_phy(adapter); 6271 6272 /* Don't lie to e1000_close() down the road. */ 6273 if (!down) 6274 clear_bit(__E1000_DOWN, &adapter->state); 6275 unregister_netdev(netdev); 6276 6277 if (pci_dev_run_wake(pdev)) 6278 pm_runtime_get_noresume(&pdev->dev); 6279 6280 /* 6281 * Release control of h/w to f/w. If f/w is AMT enabled, this 6282 * would have already happened in close and is redundant. 6283 */ 6284 e1000e_release_hw_control(adapter); 6285 6286 e1000e_reset_interrupt_capability(adapter); 6287 kfree(adapter->tx_ring); 6288 kfree(adapter->rx_ring); 6289 6290 iounmap(adapter->hw.hw_addr); 6291 if (adapter->hw.flash_address) 6292 iounmap(adapter->hw.flash_address); 6293 pci_release_selected_regions(pdev, 6294 pci_select_bars(pdev, IORESOURCE_MEM)); 6295 6296 free_netdev(netdev); 6297 6298 /* AER disable */ 6299 pci_disable_pcie_error_reporting(pdev); 6300 6301 pci_disable_device(pdev); 6302 } 6303 6304 /* PCI Error Recovery (ERS) */ 6305 static struct pci_error_handlers e1000_err_handler = { 6306 .error_detected = e1000_io_error_detected, 6307 .slot_reset = e1000_io_slot_reset, 6308 .resume = e1000_io_resume, 6309 }; 6310 6311 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 6312 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 6313 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 6314 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 6315 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, 6316 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, 6317 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, 6318 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 6319 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 6320 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 6321 6322 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 6323 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 6324 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 6325 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 6326 6327 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 6328 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 6329 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 6330 6331 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, 6332 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, 6333 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, 6334 6335 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 6336 board_80003es2lan }, 6337 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 6338 board_80003es2lan }, 6339 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), 6340 board_80003es2lan }, 6341 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 6342 board_80003es2lan }, 6343 6344 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 6345 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 6346 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, 6347 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, 6348 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 6349 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 6350 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 6351 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, 6352 6353 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 6354 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 6355 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 6356 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 6357 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 6358 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, 6359 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, 6360 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, 6361 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, 6362 6363 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, 6364 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, 6365 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, 6366 6367 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, 6368 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, 6369 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, 6370 6371 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, 6372 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, 6373 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, 6374 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, 6375 6376 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, 6377 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, 6378 6379 { } /* terminate list */ 6380 }; 6381 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6382 6383 #ifdef CONFIG_PM 6384 static const struct dev_pm_ops e1000_pm_ops = { 6385 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 6386 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, 6387 e1000_runtime_resume, e1000_idle) 6388 }; 6389 #endif 6390 6391 /* PCI Device API Driver */ 6392 static struct pci_driver e1000_driver = { 6393 .name = e1000e_driver_name, 6394 .id_table = e1000_pci_tbl, 6395 .probe = e1000_probe, 6396 .remove = __devexit_p(e1000_remove), 6397 #ifdef CONFIG_PM 6398 .driver.pm = &e1000_pm_ops, 6399 #endif 6400 .shutdown = e1000_shutdown, 6401 .err_handler = &e1000_err_handler 6402 }; 6403 6404 /** 6405 * e1000_init_module - Driver Registration Routine 6406 * 6407 * e1000_init_module is the first routine called when the driver is 6408 * loaded. All it does is register with the PCI subsystem. 6409 **/ 6410 static int __init e1000_init_module(void) 6411 { 6412 int ret; 6413 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 6414 e1000e_driver_version); 6415 pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); 6416 ret = pci_register_driver(&e1000_driver); 6417 6418 return ret; 6419 } 6420 module_init(e1000_init_module); 6421 6422 /** 6423 * e1000_exit_module - Driver Exit Cleanup Routine 6424 * 6425 * e1000_exit_module is called just before the driver is removed 6426 * from memory. 6427 **/ 6428 static void __exit e1000_exit_module(void) 6429 { 6430 pci_unregister_driver(&e1000_driver); 6431 } 6432 module_exit(e1000_exit_module); 6433 6434 6435 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 6436 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 6437 MODULE_LICENSE("GPL"); 6438 MODULE_VERSION(DRV_VERSION); 6439 6440 /* e1000_main.c */ 6441