1 /******************************************************************************* 2 3 Intel(R) 82576 Virtual Function Linux driver 4 Copyright(c) 2009 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/module.h> 31 #include <linux/types.h> 32 #include <linux/init.h> 33 #include <linux/pci.h> 34 #include <linux/vmalloc.h> 35 #include <linux/pagemap.h> 36 #include <linux/delay.h> 37 #include <linux/netdevice.h> 38 #include <linux/tcp.h> 39 #include <linux/ipv6.h> 40 #include <linux/slab.h> 41 #include <net/checksum.h> 42 #include <net/ip6_checksum.h> 43 #include <linux/mii.h> 44 #include <linux/ethtool.h> 45 #include <linux/if_vlan.h> 46 #include <linux/prefetch.h> 47 48 #include "igbvf.h" 49 50 #define DRV_VERSION "2.0.1-k" 51 char igbvf_driver_name[] = "igbvf"; 52 const char igbvf_driver_version[] = DRV_VERSION; 53 static const char igbvf_driver_string[] = 54 "Intel(R) Gigabit Virtual Function Network Driver"; 55 static const char igbvf_copyright[] = 56 "Copyright (c) 2009 - 2012 Intel Corporation."; 57 58 static int igbvf_poll(struct napi_struct *napi, int budget); 59 static void igbvf_reset(struct igbvf_adapter *); 60 static void igbvf_set_interrupt_capability(struct igbvf_adapter *); 61 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); 62 63 static struct igbvf_info igbvf_vf_info = { 64 .mac = e1000_vfadapt, 65 .flags = 0, 66 .pba = 10, 67 .init_ops = e1000_init_function_pointers_vf, 68 }; 69 70 static struct igbvf_info igbvf_i350_vf_info = { 71 .mac = e1000_vfadapt_i350, 72 .flags = 0, 73 .pba = 10, 74 .init_ops = e1000_init_function_pointers_vf, 75 }; 76 77 static const struct igbvf_info *igbvf_info_tbl[] = { 78 [board_vf] = &igbvf_vf_info, 79 [board_i350_vf] = &igbvf_i350_vf_info, 80 }; 81 82 /** 83 * igbvf_desc_unused - calculate if we have unused descriptors 84 **/ 85 static int igbvf_desc_unused(struct igbvf_ring *ring) 86 { 87 if (ring->next_to_clean > ring->next_to_use) 88 return ring->next_to_clean - ring->next_to_use - 1; 89 90 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 91 } 92 93 /** 94 * igbvf_receive_skb - helper function to handle Rx indications 95 * @adapter: board private structure 96 * @status: descriptor status field as written by hardware 97 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 98 * @skb: pointer to sk_buff to be indicated to stack 99 **/ 100 static void igbvf_receive_skb(struct igbvf_adapter *adapter, 101 struct net_device *netdev, 102 struct sk_buff *skb, 103 u32 status, u16 vlan) 104 { 105 if (status & E1000_RXD_STAT_VP) { 106 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 107 if (test_bit(vid, adapter->active_vlans)) 108 __vlan_hwaccel_put_tag(skb, vid); 109 } 110 netif_receive_skb(skb); 111 } 112 113 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, 114 u32 status_err, struct sk_buff *skb) 115 { 116 skb_checksum_none_assert(skb); 117 118 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 119 if ((status_err & E1000_RXD_STAT_IXSM) || 120 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) 121 return; 122 123 /* TCP/UDP checksum error bit is set */ 124 if (status_err & 125 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 126 /* let the stack verify checksum errors */ 127 adapter->hw_csum_err++; 128 return; 129 } 130 131 /* It must be a TCP or UDP packet with a valid checksum */ 132 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 133 skb->ip_summed = CHECKSUM_UNNECESSARY; 134 135 adapter->hw_csum_good++; 136 } 137 138 /** 139 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split 140 * @rx_ring: address of ring structure to repopulate 141 * @cleaned_count: number of buffers to repopulate 142 **/ 143 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, 144 int cleaned_count) 145 { 146 struct igbvf_adapter *adapter = rx_ring->adapter; 147 struct net_device *netdev = adapter->netdev; 148 struct pci_dev *pdev = adapter->pdev; 149 union e1000_adv_rx_desc *rx_desc; 150 struct igbvf_buffer *buffer_info; 151 struct sk_buff *skb; 152 unsigned int i; 153 int bufsz; 154 155 i = rx_ring->next_to_use; 156 buffer_info = &rx_ring->buffer_info[i]; 157 158 if (adapter->rx_ps_hdr_size) 159 bufsz = adapter->rx_ps_hdr_size; 160 else 161 bufsz = adapter->rx_buffer_len; 162 163 while (cleaned_count--) { 164 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 165 166 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 167 if (!buffer_info->page) { 168 buffer_info->page = alloc_page(GFP_ATOMIC); 169 if (!buffer_info->page) { 170 adapter->alloc_rx_buff_failed++; 171 goto no_buffers; 172 } 173 buffer_info->page_offset = 0; 174 } else { 175 buffer_info->page_offset ^= PAGE_SIZE / 2; 176 } 177 buffer_info->page_dma = 178 dma_map_page(&pdev->dev, buffer_info->page, 179 buffer_info->page_offset, 180 PAGE_SIZE / 2, 181 DMA_FROM_DEVICE); 182 } 183 184 if (!buffer_info->skb) { 185 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 186 if (!skb) { 187 adapter->alloc_rx_buff_failed++; 188 goto no_buffers; 189 } 190 191 buffer_info->skb = skb; 192 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 193 bufsz, 194 DMA_FROM_DEVICE); 195 } 196 /* Refresh the desc even if buffer_addrs didn't change because 197 * each write-back erases this info. */ 198 if (adapter->rx_ps_hdr_size) { 199 rx_desc->read.pkt_addr = 200 cpu_to_le64(buffer_info->page_dma); 201 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 202 } else { 203 rx_desc->read.pkt_addr = 204 cpu_to_le64(buffer_info->dma); 205 rx_desc->read.hdr_addr = 0; 206 } 207 208 i++; 209 if (i == rx_ring->count) 210 i = 0; 211 buffer_info = &rx_ring->buffer_info[i]; 212 } 213 214 no_buffers: 215 if (rx_ring->next_to_use != i) { 216 rx_ring->next_to_use = i; 217 if (i == 0) 218 i = (rx_ring->count - 1); 219 else 220 i--; 221 222 /* Force memory writes to complete before letting h/w 223 * know there are new descriptors to fetch. (Only 224 * applicable for weak-ordered memory model archs, 225 * such as IA-64). */ 226 wmb(); 227 writel(i, adapter->hw.hw_addr + rx_ring->tail); 228 } 229 } 230 231 /** 232 * igbvf_clean_rx_irq - Send received data up the network stack; legacy 233 * @adapter: board private structure 234 * 235 * the return value indicates whether actual cleaning was done, there 236 * is no guarantee that everything was cleaned 237 **/ 238 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, 239 int *work_done, int work_to_do) 240 { 241 struct igbvf_ring *rx_ring = adapter->rx_ring; 242 struct net_device *netdev = adapter->netdev; 243 struct pci_dev *pdev = adapter->pdev; 244 union e1000_adv_rx_desc *rx_desc, *next_rxd; 245 struct igbvf_buffer *buffer_info, *next_buffer; 246 struct sk_buff *skb; 247 bool cleaned = false; 248 int cleaned_count = 0; 249 unsigned int total_bytes = 0, total_packets = 0; 250 unsigned int i; 251 u32 length, hlen, staterr; 252 253 i = rx_ring->next_to_clean; 254 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 255 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 256 257 while (staterr & E1000_RXD_STAT_DD) { 258 if (*work_done >= work_to_do) 259 break; 260 (*work_done)++; 261 rmb(); /* read descriptor and rx_buffer_info after status DD */ 262 263 buffer_info = &rx_ring->buffer_info[i]; 264 265 /* HW will not DMA in data larger than the given buffer, even 266 * if it parses the (NFS, of course) header to be larger. In 267 * that case, it fills the header buffer and spills the rest 268 * into the page. 269 */ 270 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & 271 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 272 if (hlen > adapter->rx_ps_hdr_size) 273 hlen = adapter->rx_ps_hdr_size; 274 275 length = le16_to_cpu(rx_desc->wb.upper.length); 276 cleaned = true; 277 cleaned_count++; 278 279 skb = buffer_info->skb; 280 prefetch(skb->data - NET_IP_ALIGN); 281 buffer_info->skb = NULL; 282 if (!adapter->rx_ps_hdr_size) { 283 dma_unmap_single(&pdev->dev, buffer_info->dma, 284 adapter->rx_buffer_len, 285 DMA_FROM_DEVICE); 286 buffer_info->dma = 0; 287 skb_put(skb, length); 288 goto send_up; 289 } 290 291 if (!skb_shinfo(skb)->nr_frags) { 292 dma_unmap_single(&pdev->dev, buffer_info->dma, 293 adapter->rx_ps_hdr_size, 294 DMA_FROM_DEVICE); 295 skb_put(skb, hlen); 296 } 297 298 if (length) { 299 dma_unmap_page(&pdev->dev, buffer_info->page_dma, 300 PAGE_SIZE / 2, 301 DMA_FROM_DEVICE); 302 buffer_info->page_dma = 0; 303 304 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 305 buffer_info->page, 306 buffer_info->page_offset, 307 length); 308 309 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 310 (page_count(buffer_info->page) != 1)) 311 buffer_info->page = NULL; 312 else 313 get_page(buffer_info->page); 314 315 skb->len += length; 316 skb->data_len += length; 317 skb->truesize += PAGE_SIZE / 2; 318 } 319 send_up: 320 i++; 321 if (i == rx_ring->count) 322 i = 0; 323 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); 324 prefetch(next_rxd); 325 next_buffer = &rx_ring->buffer_info[i]; 326 327 if (!(staterr & E1000_RXD_STAT_EOP)) { 328 buffer_info->skb = next_buffer->skb; 329 buffer_info->dma = next_buffer->dma; 330 next_buffer->skb = skb; 331 next_buffer->dma = 0; 332 goto next_desc; 333 } 334 335 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 336 dev_kfree_skb_irq(skb); 337 goto next_desc; 338 } 339 340 total_bytes += skb->len; 341 total_packets++; 342 343 igbvf_rx_checksum_adv(adapter, staterr, skb); 344 345 skb->protocol = eth_type_trans(skb, netdev); 346 347 igbvf_receive_skb(adapter, netdev, skb, staterr, 348 rx_desc->wb.upper.vlan); 349 350 next_desc: 351 rx_desc->wb.upper.status_error = 0; 352 353 /* return some buffers to hardware, one at a time is too slow */ 354 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { 355 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 356 cleaned_count = 0; 357 } 358 359 /* use prefetched values */ 360 rx_desc = next_rxd; 361 buffer_info = next_buffer; 362 363 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 364 } 365 366 rx_ring->next_to_clean = i; 367 cleaned_count = igbvf_desc_unused(rx_ring); 368 369 if (cleaned_count) 370 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 371 372 adapter->total_rx_packets += total_packets; 373 adapter->total_rx_bytes += total_bytes; 374 adapter->net_stats.rx_bytes += total_bytes; 375 adapter->net_stats.rx_packets += total_packets; 376 return cleaned; 377 } 378 379 static void igbvf_put_txbuf(struct igbvf_adapter *adapter, 380 struct igbvf_buffer *buffer_info) 381 { 382 if (buffer_info->dma) { 383 if (buffer_info->mapped_as_page) 384 dma_unmap_page(&adapter->pdev->dev, 385 buffer_info->dma, 386 buffer_info->length, 387 DMA_TO_DEVICE); 388 else 389 dma_unmap_single(&adapter->pdev->dev, 390 buffer_info->dma, 391 buffer_info->length, 392 DMA_TO_DEVICE); 393 buffer_info->dma = 0; 394 } 395 if (buffer_info->skb) { 396 dev_kfree_skb_any(buffer_info->skb); 397 buffer_info->skb = NULL; 398 } 399 buffer_info->time_stamp = 0; 400 } 401 402 /** 403 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) 404 * @adapter: board private structure 405 * 406 * Return 0 on success, negative on failure 407 **/ 408 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, 409 struct igbvf_ring *tx_ring) 410 { 411 struct pci_dev *pdev = adapter->pdev; 412 int size; 413 414 size = sizeof(struct igbvf_buffer) * tx_ring->count; 415 tx_ring->buffer_info = vzalloc(size); 416 if (!tx_ring->buffer_info) 417 goto err; 418 419 /* round up to nearest 4K */ 420 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 421 tx_ring->size = ALIGN(tx_ring->size, 4096); 422 423 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 424 &tx_ring->dma, GFP_KERNEL); 425 426 if (!tx_ring->desc) 427 goto err; 428 429 tx_ring->adapter = adapter; 430 tx_ring->next_to_use = 0; 431 tx_ring->next_to_clean = 0; 432 433 return 0; 434 err: 435 vfree(tx_ring->buffer_info); 436 dev_err(&adapter->pdev->dev, 437 "Unable to allocate memory for the transmit descriptor ring\n"); 438 return -ENOMEM; 439 } 440 441 /** 442 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) 443 * @adapter: board private structure 444 * 445 * Returns 0 on success, negative on failure 446 **/ 447 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, 448 struct igbvf_ring *rx_ring) 449 { 450 struct pci_dev *pdev = adapter->pdev; 451 int size, desc_len; 452 453 size = sizeof(struct igbvf_buffer) * rx_ring->count; 454 rx_ring->buffer_info = vzalloc(size); 455 if (!rx_ring->buffer_info) 456 goto err; 457 458 desc_len = sizeof(union e1000_adv_rx_desc); 459 460 /* Round up to nearest 4K */ 461 rx_ring->size = rx_ring->count * desc_len; 462 rx_ring->size = ALIGN(rx_ring->size, 4096); 463 464 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 465 &rx_ring->dma, GFP_KERNEL); 466 467 if (!rx_ring->desc) 468 goto err; 469 470 rx_ring->next_to_clean = 0; 471 rx_ring->next_to_use = 0; 472 473 rx_ring->adapter = adapter; 474 475 return 0; 476 477 err: 478 vfree(rx_ring->buffer_info); 479 rx_ring->buffer_info = NULL; 480 dev_err(&adapter->pdev->dev, 481 "Unable to allocate memory for the receive descriptor ring\n"); 482 return -ENOMEM; 483 } 484 485 /** 486 * igbvf_clean_tx_ring - Free Tx Buffers 487 * @tx_ring: ring to be cleaned 488 **/ 489 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) 490 { 491 struct igbvf_adapter *adapter = tx_ring->adapter; 492 struct igbvf_buffer *buffer_info; 493 unsigned long size; 494 unsigned int i; 495 496 if (!tx_ring->buffer_info) 497 return; 498 499 /* Free all the Tx ring sk_buffs */ 500 for (i = 0; i < tx_ring->count; i++) { 501 buffer_info = &tx_ring->buffer_info[i]; 502 igbvf_put_txbuf(adapter, buffer_info); 503 } 504 505 size = sizeof(struct igbvf_buffer) * tx_ring->count; 506 memset(tx_ring->buffer_info, 0, size); 507 508 /* Zero out the descriptor ring */ 509 memset(tx_ring->desc, 0, tx_ring->size); 510 511 tx_ring->next_to_use = 0; 512 tx_ring->next_to_clean = 0; 513 514 writel(0, adapter->hw.hw_addr + tx_ring->head); 515 writel(0, adapter->hw.hw_addr + tx_ring->tail); 516 } 517 518 /** 519 * igbvf_free_tx_resources - Free Tx Resources per Queue 520 * @tx_ring: ring to free resources from 521 * 522 * Free all transmit software resources 523 **/ 524 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) 525 { 526 struct pci_dev *pdev = tx_ring->adapter->pdev; 527 528 igbvf_clean_tx_ring(tx_ring); 529 530 vfree(tx_ring->buffer_info); 531 tx_ring->buffer_info = NULL; 532 533 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 534 tx_ring->dma); 535 536 tx_ring->desc = NULL; 537 } 538 539 /** 540 * igbvf_clean_rx_ring - Free Rx Buffers per Queue 541 * @adapter: board private structure 542 **/ 543 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) 544 { 545 struct igbvf_adapter *adapter = rx_ring->adapter; 546 struct igbvf_buffer *buffer_info; 547 struct pci_dev *pdev = adapter->pdev; 548 unsigned long size; 549 unsigned int i; 550 551 if (!rx_ring->buffer_info) 552 return; 553 554 /* Free all the Rx ring sk_buffs */ 555 for (i = 0; i < rx_ring->count; i++) { 556 buffer_info = &rx_ring->buffer_info[i]; 557 if (buffer_info->dma) { 558 if (adapter->rx_ps_hdr_size){ 559 dma_unmap_single(&pdev->dev, buffer_info->dma, 560 adapter->rx_ps_hdr_size, 561 DMA_FROM_DEVICE); 562 } else { 563 dma_unmap_single(&pdev->dev, buffer_info->dma, 564 adapter->rx_buffer_len, 565 DMA_FROM_DEVICE); 566 } 567 buffer_info->dma = 0; 568 } 569 570 if (buffer_info->skb) { 571 dev_kfree_skb(buffer_info->skb); 572 buffer_info->skb = NULL; 573 } 574 575 if (buffer_info->page) { 576 if (buffer_info->page_dma) 577 dma_unmap_page(&pdev->dev, 578 buffer_info->page_dma, 579 PAGE_SIZE / 2, 580 DMA_FROM_DEVICE); 581 put_page(buffer_info->page); 582 buffer_info->page = NULL; 583 buffer_info->page_dma = 0; 584 buffer_info->page_offset = 0; 585 } 586 } 587 588 size = sizeof(struct igbvf_buffer) * rx_ring->count; 589 memset(rx_ring->buffer_info, 0, size); 590 591 /* Zero out the descriptor ring */ 592 memset(rx_ring->desc, 0, rx_ring->size); 593 594 rx_ring->next_to_clean = 0; 595 rx_ring->next_to_use = 0; 596 597 writel(0, adapter->hw.hw_addr + rx_ring->head); 598 writel(0, adapter->hw.hw_addr + rx_ring->tail); 599 } 600 601 /** 602 * igbvf_free_rx_resources - Free Rx Resources 603 * @rx_ring: ring to clean the resources from 604 * 605 * Free all receive software resources 606 **/ 607 608 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) 609 { 610 struct pci_dev *pdev = rx_ring->adapter->pdev; 611 612 igbvf_clean_rx_ring(rx_ring); 613 614 vfree(rx_ring->buffer_info); 615 rx_ring->buffer_info = NULL; 616 617 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 618 rx_ring->dma); 619 rx_ring->desc = NULL; 620 } 621 622 /** 623 * igbvf_update_itr - update the dynamic ITR value based on statistics 624 * @adapter: pointer to adapter 625 * @itr_setting: current adapter->itr 626 * @packets: the number of packets during this measurement interval 627 * @bytes: the number of bytes during this measurement interval 628 * 629 * Stores a new ITR value based on packets and byte 630 * counts during the last interrupt. The advantage of per interrupt 631 * computation is faster updates and more accurate ITR for the current 632 * traffic pattern. Constants in this function were computed 633 * based on theoretical maximum wire speed and thresholds were set based 634 * on testing data as well as attempting to minimize response time 635 * while increasing bulk throughput. 636 **/ 637 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, 638 enum latency_range itr_setting, 639 int packets, int bytes) 640 { 641 enum latency_range retval = itr_setting; 642 643 if (packets == 0) 644 goto update_itr_done; 645 646 switch (itr_setting) { 647 case lowest_latency: 648 /* handle TSO and jumbo frames */ 649 if (bytes/packets > 8000) 650 retval = bulk_latency; 651 else if ((packets < 5) && (bytes > 512)) 652 retval = low_latency; 653 break; 654 case low_latency: /* 50 usec aka 20000 ints/s */ 655 if (bytes > 10000) { 656 /* this if handles the TSO accounting */ 657 if (bytes/packets > 8000) 658 retval = bulk_latency; 659 else if ((packets < 10) || ((bytes/packets) > 1200)) 660 retval = bulk_latency; 661 else if ((packets > 35)) 662 retval = lowest_latency; 663 } else if (bytes/packets > 2000) { 664 retval = bulk_latency; 665 } else if (packets <= 2 && bytes < 512) { 666 retval = lowest_latency; 667 } 668 break; 669 case bulk_latency: /* 250 usec aka 4000 ints/s */ 670 if (bytes > 25000) { 671 if (packets > 35) 672 retval = low_latency; 673 } else if (bytes < 6000) { 674 retval = low_latency; 675 } 676 break; 677 default: 678 break; 679 } 680 681 update_itr_done: 682 return retval; 683 } 684 685 static int igbvf_range_to_itr(enum latency_range current_range) 686 { 687 int new_itr; 688 689 switch (current_range) { 690 /* counts and packets in update_itr are dependent on these numbers */ 691 case lowest_latency: 692 new_itr = IGBVF_70K_ITR; 693 break; 694 case low_latency: 695 new_itr = IGBVF_20K_ITR; 696 break; 697 case bulk_latency: 698 new_itr = IGBVF_4K_ITR; 699 break; 700 default: 701 new_itr = IGBVF_START_ITR; 702 break; 703 } 704 return new_itr; 705 } 706 707 static void igbvf_set_itr(struct igbvf_adapter *adapter) 708 { 709 u32 new_itr; 710 711 adapter->tx_ring->itr_range = 712 igbvf_update_itr(adapter, 713 adapter->tx_ring->itr_val, 714 adapter->total_tx_packets, 715 adapter->total_tx_bytes); 716 717 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 718 if (adapter->requested_itr == 3 && 719 adapter->tx_ring->itr_range == lowest_latency) 720 adapter->tx_ring->itr_range = low_latency; 721 722 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); 723 724 725 if (new_itr != adapter->tx_ring->itr_val) { 726 u32 current_itr = adapter->tx_ring->itr_val; 727 /* 728 * this attempts to bias the interrupt rate towards Bulk 729 * by adding intermediate steps when interrupt rate is 730 * increasing 731 */ 732 new_itr = new_itr > current_itr ? 733 min(current_itr + (new_itr >> 2), new_itr) : 734 new_itr; 735 adapter->tx_ring->itr_val = new_itr; 736 737 adapter->tx_ring->set_itr = 1; 738 } 739 740 adapter->rx_ring->itr_range = 741 igbvf_update_itr(adapter, adapter->rx_ring->itr_val, 742 adapter->total_rx_packets, 743 adapter->total_rx_bytes); 744 if (adapter->requested_itr == 3 && 745 adapter->rx_ring->itr_range == lowest_latency) 746 adapter->rx_ring->itr_range = low_latency; 747 748 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); 749 750 if (new_itr != adapter->rx_ring->itr_val) { 751 u32 current_itr = adapter->rx_ring->itr_val; 752 new_itr = new_itr > current_itr ? 753 min(current_itr + (new_itr >> 2), new_itr) : 754 new_itr; 755 adapter->rx_ring->itr_val = new_itr; 756 757 adapter->rx_ring->set_itr = 1; 758 } 759 } 760 761 /** 762 * igbvf_clean_tx_irq - Reclaim resources after transmit completes 763 * @adapter: board private structure 764 * returns true if ring is completely cleaned 765 **/ 766 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 767 { 768 struct igbvf_adapter *adapter = tx_ring->adapter; 769 struct net_device *netdev = adapter->netdev; 770 struct igbvf_buffer *buffer_info; 771 struct sk_buff *skb; 772 union e1000_adv_tx_desc *tx_desc, *eop_desc; 773 unsigned int total_bytes = 0, total_packets = 0; 774 unsigned int i, eop, count = 0; 775 bool cleaned = false; 776 777 i = tx_ring->next_to_clean; 778 eop = tx_ring->buffer_info[i].next_to_watch; 779 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 780 781 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 782 (count < tx_ring->count)) { 783 rmb(); /* read buffer_info after eop_desc status */ 784 for (cleaned = false; !cleaned; count++) { 785 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 786 buffer_info = &tx_ring->buffer_info[i]; 787 cleaned = (i == eop); 788 skb = buffer_info->skb; 789 790 if (skb) { 791 unsigned int segs, bytecount; 792 793 /* gso_segs is currently only valid for tcp */ 794 segs = skb_shinfo(skb)->gso_segs ?: 1; 795 /* multiply data chunks by size of headers */ 796 bytecount = ((segs - 1) * skb_headlen(skb)) + 797 skb->len; 798 total_packets += segs; 799 total_bytes += bytecount; 800 } 801 802 igbvf_put_txbuf(adapter, buffer_info); 803 tx_desc->wb.status = 0; 804 805 i++; 806 if (i == tx_ring->count) 807 i = 0; 808 } 809 eop = tx_ring->buffer_info[i].next_to_watch; 810 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 811 } 812 813 tx_ring->next_to_clean = i; 814 815 if (unlikely(count && 816 netif_carrier_ok(netdev) && 817 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { 818 /* Make sure that anybody stopping the queue after this 819 * sees the new next_to_clean. 820 */ 821 smp_mb(); 822 if (netif_queue_stopped(netdev) && 823 !(test_bit(__IGBVF_DOWN, &adapter->state))) { 824 netif_wake_queue(netdev); 825 ++adapter->restart_queue; 826 } 827 } 828 829 adapter->net_stats.tx_bytes += total_bytes; 830 adapter->net_stats.tx_packets += total_packets; 831 return count < tx_ring->count; 832 } 833 834 static irqreturn_t igbvf_msix_other(int irq, void *data) 835 { 836 struct net_device *netdev = data; 837 struct igbvf_adapter *adapter = netdev_priv(netdev); 838 struct e1000_hw *hw = &adapter->hw; 839 840 adapter->int_counter1++; 841 842 netif_carrier_off(netdev); 843 hw->mac.get_link_status = 1; 844 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 845 mod_timer(&adapter->watchdog_timer, jiffies + 1); 846 847 ew32(EIMS, adapter->eims_other); 848 849 return IRQ_HANDLED; 850 } 851 852 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) 853 { 854 struct net_device *netdev = data; 855 struct igbvf_adapter *adapter = netdev_priv(netdev); 856 struct e1000_hw *hw = &adapter->hw; 857 struct igbvf_ring *tx_ring = adapter->tx_ring; 858 859 if (tx_ring->set_itr) { 860 writel(tx_ring->itr_val, 861 adapter->hw.hw_addr + tx_ring->itr_register); 862 adapter->tx_ring->set_itr = 0; 863 } 864 865 adapter->total_tx_bytes = 0; 866 adapter->total_tx_packets = 0; 867 868 /* auto mask will automatically reenable the interrupt when we write 869 * EICS */ 870 if (!igbvf_clean_tx_irq(tx_ring)) 871 /* Ring was not completely cleaned, so fire another interrupt */ 872 ew32(EICS, tx_ring->eims_value); 873 else 874 ew32(EIMS, tx_ring->eims_value); 875 876 return IRQ_HANDLED; 877 } 878 879 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) 880 { 881 struct net_device *netdev = data; 882 struct igbvf_adapter *adapter = netdev_priv(netdev); 883 884 adapter->int_counter0++; 885 886 /* Write the ITR value calculated at the end of the 887 * previous interrupt. 888 */ 889 if (adapter->rx_ring->set_itr) { 890 writel(adapter->rx_ring->itr_val, 891 adapter->hw.hw_addr + adapter->rx_ring->itr_register); 892 adapter->rx_ring->set_itr = 0; 893 } 894 895 if (napi_schedule_prep(&adapter->rx_ring->napi)) { 896 adapter->total_rx_bytes = 0; 897 adapter->total_rx_packets = 0; 898 __napi_schedule(&adapter->rx_ring->napi); 899 } 900 901 return IRQ_HANDLED; 902 } 903 904 #define IGBVF_NO_QUEUE -1 905 906 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, 907 int tx_queue, int msix_vector) 908 { 909 struct e1000_hw *hw = &adapter->hw; 910 u32 ivar, index; 911 912 /* 82576 uses a table-based method for assigning vectors. 913 Each queue has a single entry in the table to which we write 914 a vector number along with a "valid" bit. Sadly, the layout 915 of the table is somewhat counterintuitive. */ 916 if (rx_queue > IGBVF_NO_QUEUE) { 917 index = (rx_queue >> 1); 918 ivar = array_er32(IVAR0, index); 919 if (rx_queue & 0x1) { 920 /* vector goes into third byte of register */ 921 ivar = ivar & 0xFF00FFFF; 922 ivar |= (msix_vector | E1000_IVAR_VALID) << 16; 923 } else { 924 /* vector goes into low byte of register */ 925 ivar = ivar & 0xFFFFFF00; 926 ivar |= msix_vector | E1000_IVAR_VALID; 927 } 928 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; 929 array_ew32(IVAR0, index, ivar); 930 } 931 if (tx_queue > IGBVF_NO_QUEUE) { 932 index = (tx_queue >> 1); 933 ivar = array_er32(IVAR0, index); 934 if (tx_queue & 0x1) { 935 /* vector goes into high byte of register */ 936 ivar = ivar & 0x00FFFFFF; 937 ivar |= (msix_vector | E1000_IVAR_VALID) << 24; 938 } else { 939 /* vector goes into second byte of register */ 940 ivar = ivar & 0xFFFF00FF; 941 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 942 } 943 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; 944 array_ew32(IVAR0, index, ivar); 945 } 946 } 947 948 /** 949 * igbvf_configure_msix - Configure MSI-X hardware 950 * 951 * igbvf_configure_msix sets up the hardware to properly 952 * generate MSI-X interrupts. 953 **/ 954 static void igbvf_configure_msix(struct igbvf_adapter *adapter) 955 { 956 u32 tmp; 957 struct e1000_hw *hw = &adapter->hw; 958 struct igbvf_ring *tx_ring = adapter->tx_ring; 959 struct igbvf_ring *rx_ring = adapter->rx_ring; 960 int vector = 0; 961 962 adapter->eims_enable_mask = 0; 963 964 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); 965 adapter->eims_enable_mask |= tx_ring->eims_value; 966 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); 967 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); 968 adapter->eims_enable_mask |= rx_ring->eims_value; 969 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); 970 971 /* set vector for other causes, i.e. link changes */ 972 973 tmp = (vector++ | E1000_IVAR_VALID); 974 975 ew32(IVAR_MISC, tmp); 976 977 adapter->eims_enable_mask = (1 << (vector)) - 1; 978 adapter->eims_other = 1 << (vector - 1); 979 e1e_flush(); 980 } 981 982 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) 983 { 984 if (adapter->msix_entries) { 985 pci_disable_msix(adapter->pdev); 986 kfree(adapter->msix_entries); 987 adapter->msix_entries = NULL; 988 } 989 } 990 991 /** 992 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported 993 * 994 * Attempt to configure interrupts using the best available 995 * capabilities of the hardware and kernel. 996 **/ 997 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) 998 { 999 int err = -ENOMEM; 1000 int i; 1001 1002 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ 1003 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), 1004 GFP_KERNEL); 1005 if (adapter->msix_entries) { 1006 for (i = 0; i < 3; i++) 1007 adapter->msix_entries[i].entry = i; 1008 1009 err = pci_enable_msix(adapter->pdev, 1010 adapter->msix_entries, 3); 1011 } 1012 1013 if (err) { 1014 /* MSI-X failed */ 1015 dev_err(&adapter->pdev->dev, 1016 "Failed to initialize MSI-X interrupts.\n"); 1017 igbvf_reset_interrupt_capability(adapter); 1018 } 1019 } 1020 1021 /** 1022 * igbvf_request_msix - Initialize MSI-X interrupts 1023 * 1024 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the 1025 * kernel. 1026 **/ 1027 static int igbvf_request_msix(struct igbvf_adapter *adapter) 1028 { 1029 struct net_device *netdev = adapter->netdev; 1030 int err = 0, vector = 0; 1031 1032 if (strlen(netdev->name) < (IFNAMSIZ - 5)) { 1033 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); 1034 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); 1035 } else { 1036 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1037 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1038 } 1039 1040 err = request_irq(adapter->msix_entries[vector].vector, 1041 igbvf_intr_msix_tx, 0, adapter->tx_ring->name, 1042 netdev); 1043 if (err) 1044 goto out; 1045 1046 adapter->tx_ring->itr_register = E1000_EITR(vector); 1047 adapter->tx_ring->itr_val = adapter->current_itr; 1048 vector++; 1049 1050 err = request_irq(adapter->msix_entries[vector].vector, 1051 igbvf_intr_msix_rx, 0, adapter->rx_ring->name, 1052 netdev); 1053 if (err) 1054 goto out; 1055 1056 adapter->rx_ring->itr_register = E1000_EITR(vector); 1057 adapter->rx_ring->itr_val = adapter->current_itr; 1058 vector++; 1059 1060 err = request_irq(adapter->msix_entries[vector].vector, 1061 igbvf_msix_other, 0, netdev->name, netdev); 1062 if (err) 1063 goto out; 1064 1065 igbvf_configure_msix(adapter); 1066 return 0; 1067 out: 1068 return err; 1069 } 1070 1071 /** 1072 * igbvf_alloc_queues - Allocate memory for all rings 1073 * @adapter: board private structure to initialize 1074 **/ 1075 static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) 1076 { 1077 struct net_device *netdev = adapter->netdev; 1078 1079 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1080 if (!adapter->tx_ring) 1081 return -ENOMEM; 1082 1083 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1084 if (!adapter->rx_ring) { 1085 kfree(adapter->tx_ring); 1086 return -ENOMEM; 1087 } 1088 1089 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); 1090 1091 return 0; 1092 } 1093 1094 /** 1095 * igbvf_request_irq - initialize interrupts 1096 * 1097 * Attempts to configure interrupts using the best available 1098 * capabilities of the hardware and kernel. 1099 **/ 1100 static int igbvf_request_irq(struct igbvf_adapter *adapter) 1101 { 1102 int err = -1; 1103 1104 /* igbvf supports msi-x only */ 1105 if (adapter->msix_entries) 1106 err = igbvf_request_msix(adapter); 1107 1108 if (!err) 1109 return err; 1110 1111 dev_err(&adapter->pdev->dev, 1112 "Unable to allocate interrupt, Error: %d\n", err); 1113 1114 return err; 1115 } 1116 1117 static void igbvf_free_irq(struct igbvf_adapter *adapter) 1118 { 1119 struct net_device *netdev = adapter->netdev; 1120 int vector; 1121 1122 if (adapter->msix_entries) { 1123 for (vector = 0; vector < 3; vector++) 1124 free_irq(adapter->msix_entries[vector].vector, netdev); 1125 } 1126 } 1127 1128 /** 1129 * igbvf_irq_disable - Mask off interrupt generation on the NIC 1130 **/ 1131 static void igbvf_irq_disable(struct igbvf_adapter *adapter) 1132 { 1133 struct e1000_hw *hw = &adapter->hw; 1134 1135 ew32(EIMC, ~0); 1136 1137 if (adapter->msix_entries) 1138 ew32(EIAC, 0); 1139 } 1140 1141 /** 1142 * igbvf_irq_enable - Enable default interrupt generation settings 1143 **/ 1144 static void igbvf_irq_enable(struct igbvf_adapter *adapter) 1145 { 1146 struct e1000_hw *hw = &adapter->hw; 1147 1148 ew32(EIAC, adapter->eims_enable_mask); 1149 ew32(EIAM, adapter->eims_enable_mask); 1150 ew32(EIMS, adapter->eims_enable_mask); 1151 } 1152 1153 /** 1154 * igbvf_poll - NAPI Rx polling callback 1155 * @napi: struct associated with this polling callback 1156 * @budget: amount of packets driver is allowed to process this poll 1157 **/ 1158 static int igbvf_poll(struct napi_struct *napi, int budget) 1159 { 1160 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); 1161 struct igbvf_adapter *adapter = rx_ring->adapter; 1162 struct e1000_hw *hw = &adapter->hw; 1163 int work_done = 0; 1164 1165 igbvf_clean_rx_irq(adapter, &work_done, budget); 1166 1167 /* If not enough Rx work done, exit the polling mode */ 1168 if (work_done < budget) { 1169 napi_complete(napi); 1170 1171 if (adapter->requested_itr & 3) 1172 igbvf_set_itr(adapter); 1173 1174 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1175 ew32(EIMS, adapter->rx_ring->eims_value); 1176 } 1177 1178 return work_done; 1179 } 1180 1181 /** 1182 * igbvf_set_rlpml - set receive large packet maximum length 1183 * @adapter: board private structure 1184 * 1185 * Configure the maximum size of packets that will be received 1186 */ 1187 static void igbvf_set_rlpml(struct igbvf_adapter *adapter) 1188 { 1189 int max_frame_size; 1190 struct e1000_hw *hw = &adapter->hw; 1191 1192 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; 1193 e1000_rlpml_set_vf(hw, max_frame_size); 1194 } 1195 1196 static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1197 { 1198 struct igbvf_adapter *adapter = netdev_priv(netdev); 1199 struct e1000_hw *hw = &adapter->hw; 1200 1201 if (hw->mac.ops.set_vfta(hw, vid, true)) { 1202 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); 1203 return -EINVAL; 1204 } 1205 set_bit(vid, adapter->active_vlans); 1206 return 0; 1207 } 1208 1209 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1210 { 1211 struct igbvf_adapter *adapter = netdev_priv(netdev); 1212 struct e1000_hw *hw = &adapter->hw; 1213 1214 if (hw->mac.ops.set_vfta(hw, vid, false)) { 1215 dev_err(&adapter->pdev->dev, 1216 "Failed to remove vlan id %d\n", vid); 1217 return -EINVAL; 1218 } 1219 clear_bit(vid, adapter->active_vlans); 1220 return 0; 1221 } 1222 1223 static void igbvf_restore_vlan(struct igbvf_adapter *adapter) 1224 { 1225 u16 vid; 1226 1227 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1228 igbvf_vlan_rx_add_vid(adapter->netdev, vid); 1229 } 1230 1231 /** 1232 * igbvf_configure_tx - Configure Transmit Unit after Reset 1233 * @adapter: board private structure 1234 * 1235 * Configure the Tx unit of the MAC after a reset. 1236 **/ 1237 static void igbvf_configure_tx(struct igbvf_adapter *adapter) 1238 { 1239 struct e1000_hw *hw = &adapter->hw; 1240 struct igbvf_ring *tx_ring = adapter->tx_ring; 1241 u64 tdba; 1242 u32 txdctl, dca_txctrl; 1243 1244 /* disable transmits */ 1245 txdctl = er32(TXDCTL(0)); 1246 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1247 e1e_flush(); 1248 msleep(10); 1249 1250 /* Setup the HW Tx Head and Tail descriptor pointers */ 1251 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); 1252 tdba = tx_ring->dma; 1253 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); 1254 ew32(TDBAH(0), (tdba >> 32)); 1255 ew32(TDH(0), 0); 1256 ew32(TDT(0), 0); 1257 tx_ring->head = E1000_TDH(0); 1258 tx_ring->tail = E1000_TDT(0); 1259 1260 /* Turn off Relaxed Ordering on head write-backs. The writebacks 1261 * MUST be delivered in order or it will completely screw up 1262 * our bookeeping. 1263 */ 1264 dca_txctrl = er32(DCA_TXCTRL(0)); 1265 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1266 ew32(DCA_TXCTRL(0), dca_txctrl); 1267 1268 /* enable transmits */ 1269 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1270 ew32(TXDCTL(0), txdctl); 1271 1272 /* Setup Transmit Descriptor Settings for eop descriptor */ 1273 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; 1274 1275 /* enable Report Status bit */ 1276 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; 1277 } 1278 1279 /** 1280 * igbvf_setup_srrctl - configure the receive control registers 1281 * @adapter: Board private structure 1282 **/ 1283 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) 1284 { 1285 struct e1000_hw *hw = &adapter->hw; 1286 u32 srrctl = 0; 1287 1288 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | 1289 E1000_SRRCTL_BSIZEHDR_MASK | 1290 E1000_SRRCTL_BSIZEPKT_MASK); 1291 1292 /* Enable queue drop to avoid head of line blocking */ 1293 srrctl |= E1000_SRRCTL_DROP_EN; 1294 1295 /* Setup buffer sizes */ 1296 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> 1297 E1000_SRRCTL_BSIZEPKT_SHIFT; 1298 1299 if (adapter->rx_buffer_len < 2048) { 1300 adapter->rx_ps_hdr_size = 0; 1301 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 1302 } else { 1303 adapter->rx_ps_hdr_size = 128; 1304 srrctl |= adapter->rx_ps_hdr_size << 1305 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 1306 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1307 } 1308 1309 ew32(SRRCTL(0), srrctl); 1310 } 1311 1312 /** 1313 * igbvf_configure_rx - Configure Receive Unit after Reset 1314 * @adapter: board private structure 1315 * 1316 * Configure the Rx unit of the MAC after a reset. 1317 **/ 1318 static void igbvf_configure_rx(struct igbvf_adapter *adapter) 1319 { 1320 struct e1000_hw *hw = &adapter->hw; 1321 struct igbvf_ring *rx_ring = adapter->rx_ring; 1322 u64 rdba; 1323 u32 rdlen, rxdctl; 1324 1325 /* disable receives */ 1326 rxdctl = er32(RXDCTL(0)); 1327 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1328 e1e_flush(); 1329 msleep(10); 1330 1331 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1332 1333 /* 1334 * Setup the HW Rx Head and Tail Descriptor Pointers and 1335 * the Base and Length of the Rx Descriptor Ring 1336 */ 1337 rdba = rx_ring->dma; 1338 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); 1339 ew32(RDBAH(0), (rdba >> 32)); 1340 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); 1341 rx_ring->head = E1000_RDH(0); 1342 rx_ring->tail = E1000_RDT(0); 1343 ew32(RDH(0), 0); 1344 ew32(RDT(0), 0); 1345 1346 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 1347 rxdctl &= 0xFFF00000; 1348 rxdctl |= IGBVF_RX_PTHRESH; 1349 rxdctl |= IGBVF_RX_HTHRESH << 8; 1350 rxdctl |= IGBVF_RX_WTHRESH << 16; 1351 1352 igbvf_set_rlpml(adapter); 1353 1354 /* enable receives */ 1355 ew32(RXDCTL(0), rxdctl); 1356 } 1357 1358 /** 1359 * igbvf_set_multi - Multicast and Promiscuous mode set 1360 * @netdev: network interface device structure 1361 * 1362 * The set_multi entry point is called whenever the multicast address 1363 * list or the network interface flags are updated. This routine is 1364 * responsible for configuring the hardware for proper multicast, 1365 * promiscuous mode, and all-multi behavior. 1366 **/ 1367 static void igbvf_set_multi(struct net_device *netdev) 1368 { 1369 struct igbvf_adapter *adapter = netdev_priv(netdev); 1370 struct e1000_hw *hw = &adapter->hw; 1371 struct netdev_hw_addr *ha; 1372 u8 *mta_list = NULL; 1373 int i; 1374 1375 if (!netdev_mc_empty(netdev)) { 1376 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); 1377 if (!mta_list) { 1378 dev_err(&adapter->pdev->dev, 1379 "failed to allocate multicast filter list\n"); 1380 return; 1381 } 1382 } 1383 1384 /* prepare a packed array of only addresses. */ 1385 i = 0; 1386 netdev_for_each_mc_addr(ha, netdev) 1387 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 1388 1389 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); 1390 kfree(mta_list); 1391 } 1392 1393 /** 1394 * igbvf_configure - configure the hardware for Rx and Tx 1395 * @adapter: private board structure 1396 **/ 1397 static void igbvf_configure(struct igbvf_adapter *adapter) 1398 { 1399 igbvf_set_multi(adapter->netdev); 1400 1401 igbvf_restore_vlan(adapter); 1402 1403 igbvf_configure_tx(adapter); 1404 igbvf_setup_srrctl(adapter); 1405 igbvf_configure_rx(adapter); 1406 igbvf_alloc_rx_buffers(adapter->rx_ring, 1407 igbvf_desc_unused(adapter->rx_ring)); 1408 } 1409 1410 /* igbvf_reset - bring the hardware into a known good state 1411 * 1412 * This function boots the hardware and enables some settings that 1413 * require a configuration cycle of the hardware - those cannot be 1414 * set/changed during runtime. After reset the device needs to be 1415 * properly configured for Rx, Tx etc. 1416 */ 1417 static void igbvf_reset(struct igbvf_adapter *adapter) 1418 { 1419 struct e1000_mac_info *mac = &adapter->hw.mac; 1420 struct net_device *netdev = adapter->netdev; 1421 struct e1000_hw *hw = &adapter->hw; 1422 1423 /* Allow time for pending master requests to run */ 1424 if (mac->ops.reset_hw(hw)) 1425 dev_err(&adapter->pdev->dev, "PF still resetting\n"); 1426 1427 mac->ops.init_hw(hw); 1428 1429 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1430 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1431 netdev->addr_len); 1432 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1433 netdev->addr_len); 1434 } 1435 1436 adapter->last_reset = jiffies; 1437 } 1438 1439 int igbvf_up(struct igbvf_adapter *adapter) 1440 { 1441 struct e1000_hw *hw = &adapter->hw; 1442 1443 /* hardware has been reset, we need to reload some things */ 1444 igbvf_configure(adapter); 1445 1446 clear_bit(__IGBVF_DOWN, &adapter->state); 1447 1448 napi_enable(&adapter->rx_ring->napi); 1449 if (adapter->msix_entries) 1450 igbvf_configure_msix(adapter); 1451 1452 /* Clear any pending interrupts. */ 1453 er32(EICR); 1454 igbvf_irq_enable(adapter); 1455 1456 /* start the watchdog */ 1457 hw->mac.get_link_status = 1; 1458 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1459 1460 1461 return 0; 1462 } 1463 1464 void igbvf_down(struct igbvf_adapter *adapter) 1465 { 1466 struct net_device *netdev = adapter->netdev; 1467 struct e1000_hw *hw = &adapter->hw; 1468 u32 rxdctl, txdctl; 1469 1470 /* 1471 * signal that we're down so the interrupt handler does not 1472 * reschedule our watchdog timer 1473 */ 1474 set_bit(__IGBVF_DOWN, &adapter->state); 1475 1476 /* disable receives in the hardware */ 1477 rxdctl = er32(RXDCTL(0)); 1478 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1479 1480 netif_stop_queue(netdev); 1481 1482 /* disable transmits in the hardware */ 1483 txdctl = er32(TXDCTL(0)); 1484 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1485 1486 /* flush both disables and wait for them to finish */ 1487 e1e_flush(); 1488 msleep(10); 1489 1490 napi_disable(&adapter->rx_ring->napi); 1491 1492 igbvf_irq_disable(adapter); 1493 1494 del_timer_sync(&adapter->watchdog_timer); 1495 1496 netif_carrier_off(netdev); 1497 1498 /* record the stats before reset*/ 1499 igbvf_update_stats(adapter); 1500 1501 adapter->link_speed = 0; 1502 adapter->link_duplex = 0; 1503 1504 igbvf_reset(adapter); 1505 igbvf_clean_tx_ring(adapter->tx_ring); 1506 igbvf_clean_rx_ring(adapter->rx_ring); 1507 } 1508 1509 void igbvf_reinit_locked(struct igbvf_adapter *adapter) 1510 { 1511 might_sleep(); 1512 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 1513 msleep(1); 1514 igbvf_down(adapter); 1515 igbvf_up(adapter); 1516 clear_bit(__IGBVF_RESETTING, &adapter->state); 1517 } 1518 1519 /** 1520 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) 1521 * @adapter: board private structure to initialize 1522 * 1523 * igbvf_sw_init initializes the Adapter private data structure. 1524 * Fields are initialized based on PCI device information and 1525 * OS network device settings (MTU size). 1526 **/ 1527 static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) 1528 { 1529 struct net_device *netdev = adapter->netdev; 1530 s32 rc; 1531 1532 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 1533 adapter->rx_ps_hdr_size = 0; 1534 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1535 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1536 1537 adapter->tx_int_delay = 8; 1538 adapter->tx_abs_int_delay = 32; 1539 adapter->rx_int_delay = 0; 1540 adapter->rx_abs_int_delay = 8; 1541 adapter->requested_itr = 3; 1542 adapter->current_itr = IGBVF_START_ITR; 1543 1544 /* Set various function pointers */ 1545 adapter->ei->init_ops(&adapter->hw); 1546 1547 rc = adapter->hw.mac.ops.init_params(&adapter->hw); 1548 if (rc) 1549 return rc; 1550 1551 rc = adapter->hw.mbx.ops.init_params(&adapter->hw); 1552 if (rc) 1553 return rc; 1554 1555 igbvf_set_interrupt_capability(adapter); 1556 1557 if (igbvf_alloc_queues(adapter)) 1558 return -ENOMEM; 1559 1560 spin_lock_init(&adapter->tx_queue_lock); 1561 1562 /* Explicitly disable IRQ since the NIC can be in any state. */ 1563 igbvf_irq_disable(adapter); 1564 1565 spin_lock_init(&adapter->stats_lock); 1566 1567 set_bit(__IGBVF_DOWN, &adapter->state); 1568 return 0; 1569 } 1570 1571 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) 1572 { 1573 struct e1000_hw *hw = &adapter->hw; 1574 1575 adapter->stats.last_gprc = er32(VFGPRC); 1576 adapter->stats.last_gorc = er32(VFGORC); 1577 adapter->stats.last_gptc = er32(VFGPTC); 1578 adapter->stats.last_gotc = er32(VFGOTC); 1579 adapter->stats.last_mprc = er32(VFMPRC); 1580 adapter->stats.last_gotlbc = er32(VFGOTLBC); 1581 adapter->stats.last_gptlbc = er32(VFGPTLBC); 1582 adapter->stats.last_gorlbc = er32(VFGORLBC); 1583 adapter->stats.last_gprlbc = er32(VFGPRLBC); 1584 1585 adapter->stats.base_gprc = er32(VFGPRC); 1586 adapter->stats.base_gorc = er32(VFGORC); 1587 adapter->stats.base_gptc = er32(VFGPTC); 1588 adapter->stats.base_gotc = er32(VFGOTC); 1589 adapter->stats.base_mprc = er32(VFMPRC); 1590 adapter->stats.base_gotlbc = er32(VFGOTLBC); 1591 adapter->stats.base_gptlbc = er32(VFGPTLBC); 1592 adapter->stats.base_gorlbc = er32(VFGORLBC); 1593 adapter->stats.base_gprlbc = er32(VFGPRLBC); 1594 } 1595 1596 /** 1597 * igbvf_open - Called when a network interface is made active 1598 * @netdev: network interface device structure 1599 * 1600 * Returns 0 on success, negative value on failure 1601 * 1602 * The open entry point is called when a network interface is made 1603 * active by the system (IFF_UP). At this point all resources needed 1604 * for transmit and receive operations are allocated, the interrupt 1605 * handler is registered with the OS, the watchdog timer is started, 1606 * and the stack is notified that the interface is ready. 1607 **/ 1608 static int igbvf_open(struct net_device *netdev) 1609 { 1610 struct igbvf_adapter *adapter = netdev_priv(netdev); 1611 struct e1000_hw *hw = &adapter->hw; 1612 int err; 1613 1614 /* disallow open during test */ 1615 if (test_bit(__IGBVF_TESTING, &adapter->state)) 1616 return -EBUSY; 1617 1618 /* allocate transmit descriptors */ 1619 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); 1620 if (err) 1621 goto err_setup_tx; 1622 1623 /* allocate receive descriptors */ 1624 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); 1625 if (err) 1626 goto err_setup_rx; 1627 1628 /* 1629 * before we allocate an interrupt, we must be ready to handle it. 1630 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1631 * as soon as we call pci_request_irq, so we have to setup our 1632 * clean_rx handler before we do so. 1633 */ 1634 igbvf_configure(adapter); 1635 1636 err = igbvf_request_irq(adapter); 1637 if (err) 1638 goto err_req_irq; 1639 1640 /* From here on the code is the same as igbvf_up() */ 1641 clear_bit(__IGBVF_DOWN, &adapter->state); 1642 1643 napi_enable(&adapter->rx_ring->napi); 1644 1645 /* clear any pending interrupts */ 1646 er32(EICR); 1647 1648 igbvf_irq_enable(adapter); 1649 1650 /* start the watchdog */ 1651 hw->mac.get_link_status = 1; 1652 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1653 1654 return 0; 1655 1656 err_req_irq: 1657 igbvf_free_rx_resources(adapter->rx_ring); 1658 err_setup_rx: 1659 igbvf_free_tx_resources(adapter->tx_ring); 1660 err_setup_tx: 1661 igbvf_reset(adapter); 1662 1663 return err; 1664 } 1665 1666 /** 1667 * igbvf_close - Disables a network interface 1668 * @netdev: network interface device structure 1669 * 1670 * Returns 0, this is not allowed to fail 1671 * 1672 * The close entry point is called when an interface is de-activated 1673 * by the OS. The hardware is still under the drivers control, but 1674 * needs to be disabled. A global MAC reset is issued to stop the 1675 * hardware, and all transmit and receive resources are freed. 1676 **/ 1677 static int igbvf_close(struct net_device *netdev) 1678 { 1679 struct igbvf_adapter *adapter = netdev_priv(netdev); 1680 1681 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 1682 igbvf_down(adapter); 1683 1684 igbvf_free_irq(adapter); 1685 1686 igbvf_free_tx_resources(adapter->tx_ring); 1687 igbvf_free_rx_resources(adapter->rx_ring); 1688 1689 return 0; 1690 } 1691 /** 1692 * igbvf_set_mac - Change the Ethernet Address of the NIC 1693 * @netdev: network interface device structure 1694 * @p: pointer to an address structure 1695 * 1696 * Returns 0 on success, negative on failure 1697 **/ 1698 static int igbvf_set_mac(struct net_device *netdev, void *p) 1699 { 1700 struct igbvf_adapter *adapter = netdev_priv(netdev); 1701 struct e1000_hw *hw = &adapter->hw; 1702 struct sockaddr *addr = p; 1703 1704 if (!is_valid_ether_addr(addr->sa_data)) 1705 return -EADDRNOTAVAIL; 1706 1707 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 1708 1709 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 1710 1711 if (memcmp(addr->sa_data, hw->mac.addr, 6)) 1712 return -EADDRNOTAVAIL; 1713 1714 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1715 netdev->addr_assign_type &= ~NET_ADDR_RANDOM; 1716 1717 return 0; 1718 } 1719 1720 #define UPDATE_VF_COUNTER(reg, name) \ 1721 { \ 1722 u32 current_counter = er32(reg); \ 1723 if (current_counter < adapter->stats.last_##name) \ 1724 adapter->stats.name += 0x100000000LL; \ 1725 adapter->stats.last_##name = current_counter; \ 1726 adapter->stats.name &= 0xFFFFFFFF00000000LL; \ 1727 adapter->stats.name |= current_counter; \ 1728 } 1729 1730 /** 1731 * igbvf_update_stats - Update the board statistics counters 1732 * @adapter: board private structure 1733 **/ 1734 void igbvf_update_stats(struct igbvf_adapter *adapter) 1735 { 1736 struct e1000_hw *hw = &adapter->hw; 1737 struct pci_dev *pdev = adapter->pdev; 1738 1739 /* 1740 * Prevent stats update while adapter is being reset, link is down 1741 * or if the pci connection is down. 1742 */ 1743 if (adapter->link_speed == 0) 1744 return; 1745 1746 if (test_bit(__IGBVF_RESETTING, &adapter->state)) 1747 return; 1748 1749 if (pci_channel_offline(pdev)) 1750 return; 1751 1752 UPDATE_VF_COUNTER(VFGPRC, gprc); 1753 UPDATE_VF_COUNTER(VFGORC, gorc); 1754 UPDATE_VF_COUNTER(VFGPTC, gptc); 1755 UPDATE_VF_COUNTER(VFGOTC, gotc); 1756 UPDATE_VF_COUNTER(VFMPRC, mprc); 1757 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); 1758 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); 1759 UPDATE_VF_COUNTER(VFGORLBC, gorlbc); 1760 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); 1761 1762 /* Fill out the OS statistics structure */ 1763 adapter->net_stats.multicast = adapter->stats.mprc; 1764 } 1765 1766 static void igbvf_print_link_info(struct igbvf_adapter *adapter) 1767 { 1768 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", 1769 adapter->link_speed, 1770 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); 1771 } 1772 1773 static bool igbvf_has_link(struct igbvf_adapter *adapter) 1774 { 1775 struct e1000_hw *hw = &adapter->hw; 1776 s32 ret_val = E1000_SUCCESS; 1777 bool link_active; 1778 1779 /* If interface is down, stay link down */ 1780 if (test_bit(__IGBVF_DOWN, &adapter->state)) 1781 return false; 1782 1783 ret_val = hw->mac.ops.check_for_link(hw); 1784 link_active = !hw->mac.get_link_status; 1785 1786 /* if check for link returns error we will need to reset */ 1787 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) 1788 schedule_work(&adapter->reset_task); 1789 1790 return link_active; 1791 } 1792 1793 /** 1794 * igbvf_watchdog - Timer Call-back 1795 * @data: pointer to adapter cast into an unsigned long 1796 **/ 1797 static void igbvf_watchdog(unsigned long data) 1798 { 1799 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; 1800 1801 /* Do the rest outside of interrupt context */ 1802 schedule_work(&adapter->watchdog_task); 1803 } 1804 1805 static void igbvf_watchdog_task(struct work_struct *work) 1806 { 1807 struct igbvf_adapter *adapter = container_of(work, 1808 struct igbvf_adapter, 1809 watchdog_task); 1810 struct net_device *netdev = adapter->netdev; 1811 struct e1000_mac_info *mac = &adapter->hw.mac; 1812 struct igbvf_ring *tx_ring = adapter->tx_ring; 1813 struct e1000_hw *hw = &adapter->hw; 1814 u32 link; 1815 int tx_pending = 0; 1816 1817 link = igbvf_has_link(adapter); 1818 1819 if (link) { 1820 if (!netif_carrier_ok(netdev)) { 1821 mac->ops.get_link_up_info(&adapter->hw, 1822 &adapter->link_speed, 1823 &adapter->link_duplex); 1824 igbvf_print_link_info(adapter); 1825 1826 netif_carrier_on(netdev); 1827 netif_wake_queue(netdev); 1828 } 1829 } else { 1830 if (netif_carrier_ok(netdev)) { 1831 adapter->link_speed = 0; 1832 adapter->link_duplex = 0; 1833 dev_info(&adapter->pdev->dev, "Link is Down\n"); 1834 netif_carrier_off(netdev); 1835 netif_stop_queue(netdev); 1836 } 1837 } 1838 1839 if (netif_carrier_ok(netdev)) { 1840 igbvf_update_stats(adapter); 1841 } else { 1842 tx_pending = (igbvf_desc_unused(tx_ring) + 1 < 1843 tx_ring->count); 1844 if (tx_pending) { 1845 /* 1846 * We've lost link, so the controller stops DMA, 1847 * but we've got queued Tx work that's never going 1848 * to get done, so reset controller to flush Tx. 1849 * (Do the reset outside of interrupt context). 1850 */ 1851 adapter->tx_timeout_count++; 1852 schedule_work(&adapter->reset_task); 1853 } 1854 } 1855 1856 /* Cause software interrupt to ensure Rx ring is cleaned */ 1857 ew32(EICS, adapter->rx_ring->eims_value); 1858 1859 /* Reset the timer */ 1860 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1861 mod_timer(&adapter->watchdog_timer, 1862 round_jiffies(jiffies + (2 * HZ))); 1863 } 1864 1865 #define IGBVF_TX_FLAGS_CSUM 0x00000001 1866 #define IGBVF_TX_FLAGS_VLAN 0x00000002 1867 #define IGBVF_TX_FLAGS_TSO 0x00000004 1868 #define IGBVF_TX_FLAGS_IPV4 0x00000008 1869 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 1870 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 1871 1872 static int igbvf_tso(struct igbvf_adapter *adapter, 1873 struct igbvf_ring *tx_ring, 1874 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1875 { 1876 struct e1000_adv_tx_context_desc *context_desc; 1877 unsigned int i; 1878 int err; 1879 struct igbvf_buffer *buffer_info; 1880 u32 info = 0, tu_cmd = 0; 1881 u32 mss_l4len_idx, l4len; 1882 *hdr_len = 0; 1883 1884 if (skb_header_cloned(skb)) { 1885 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1886 if (err) { 1887 dev_err(&adapter->pdev->dev, 1888 "igbvf_tso returning an error\n"); 1889 return err; 1890 } 1891 } 1892 1893 l4len = tcp_hdrlen(skb); 1894 *hdr_len += l4len; 1895 1896 if (skb->protocol == htons(ETH_P_IP)) { 1897 struct iphdr *iph = ip_hdr(skb); 1898 iph->tot_len = 0; 1899 iph->check = 0; 1900 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1901 iph->daddr, 0, 1902 IPPROTO_TCP, 1903 0); 1904 } else if (skb_is_gso_v6(skb)) { 1905 ipv6_hdr(skb)->payload_len = 0; 1906 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1907 &ipv6_hdr(skb)->daddr, 1908 0, IPPROTO_TCP, 0); 1909 } 1910 1911 i = tx_ring->next_to_use; 1912 1913 buffer_info = &tx_ring->buffer_info[i]; 1914 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 1915 /* VLAN MACLEN IPLEN */ 1916 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 1917 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 1918 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 1919 *hdr_len += skb_network_offset(skb); 1920 info |= (skb_transport_header(skb) - skb_network_header(skb)); 1921 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); 1922 context_desc->vlan_macip_lens = cpu_to_le32(info); 1923 1924 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1925 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1926 1927 if (skb->protocol == htons(ETH_P_IP)) 1928 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1929 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1930 1931 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 1932 1933 /* MSS L4LEN IDX */ 1934 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 1935 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 1936 1937 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1938 context_desc->seqnum_seed = 0; 1939 1940 buffer_info->time_stamp = jiffies; 1941 buffer_info->next_to_watch = i; 1942 buffer_info->dma = 0; 1943 i++; 1944 if (i == tx_ring->count) 1945 i = 0; 1946 1947 tx_ring->next_to_use = i; 1948 1949 return true; 1950 } 1951 1952 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1953 struct igbvf_ring *tx_ring, 1954 struct sk_buff *skb, u32 tx_flags) 1955 { 1956 struct e1000_adv_tx_context_desc *context_desc; 1957 unsigned int i; 1958 struct igbvf_buffer *buffer_info; 1959 u32 info = 0, tu_cmd = 0; 1960 1961 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 1962 (tx_flags & IGBVF_TX_FLAGS_VLAN)) { 1963 i = tx_ring->next_to_use; 1964 buffer_info = &tx_ring->buffer_info[i]; 1965 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 1966 1967 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 1968 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 1969 1970 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 1971 if (skb->ip_summed == CHECKSUM_PARTIAL) 1972 info |= (skb_transport_header(skb) - 1973 skb_network_header(skb)); 1974 1975 1976 context_desc->vlan_macip_lens = cpu_to_le32(info); 1977 1978 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1979 1980 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1981 switch (skb->protocol) { 1982 case __constant_htons(ETH_P_IP): 1983 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1984 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1985 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1986 break; 1987 case __constant_htons(ETH_P_IPV6): 1988 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1989 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1990 break; 1991 default: 1992 break; 1993 } 1994 } 1995 1996 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 1997 context_desc->seqnum_seed = 0; 1998 context_desc->mss_l4len_idx = 0; 1999 2000 buffer_info->time_stamp = jiffies; 2001 buffer_info->next_to_watch = i; 2002 buffer_info->dma = 0; 2003 i++; 2004 if (i == tx_ring->count) 2005 i = 0; 2006 tx_ring->next_to_use = i; 2007 2008 return true; 2009 } 2010 2011 return false; 2012 } 2013 2014 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) 2015 { 2016 struct igbvf_adapter *adapter = netdev_priv(netdev); 2017 2018 /* there is enough descriptors then we don't need to worry */ 2019 if (igbvf_desc_unused(adapter->tx_ring) >= size) 2020 return 0; 2021 2022 netif_stop_queue(netdev); 2023 2024 smp_mb(); 2025 2026 /* We need to check again just in case room has been made available */ 2027 if (igbvf_desc_unused(adapter->tx_ring) < size) 2028 return -EBUSY; 2029 2030 netif_wake_queue(netdev); 2031 2032 ++adapter->restart_queue; 2033 return 0; 2034 } 2035 2036 #define IGBVF_MAX_TXD_PWR 16 2037 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) 2038 2039 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, 2040 struct igbvf_ring *tx_ring, 2041 struct sk_buff *skb, 2042 unsigned int first) 2043 { 2044 struct igbvf_buffer *buffer_info; 2045 struct pci_dev *pdev = adapter->pdev; 2046 unsigned int len = skb_headlen(skb); 2047 unsigned int count = 0, i; 2048 unsigned int f; 2049 2050 i = tx_ring->next_to_use; 2051 2052 buffer_info = &tx_ring->buffer_info[i]; 2053 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2054 buffer_info->length = len; 2055 /* set time_stamp *before* dma to help avoid a possible race */ 2056 buffer_info->time_stamp = jiffies; 2057 buffer_info->next_to_watch = i; 2058 buffer_info->mapped_as_page = false; 2059 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, 2060 DMA_TO_DEVICE); 2061 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2062 goto dma_error; 2063 2064 2065 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2066 const struct skb_frag_struct *frag; 2067 2068 count++; 2069 i++; 2070 if (i == tx_ring->count) 2071 i = 0; 2072 2073 frag = &skb_shinfo(skb)->frags[f]; 2074 len = skb_frag_size(frag); 2075 2076 buffer_info = &tx_ring->buffer_info[i]; 2077 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2078 buffer_info->length = len; 2079 buffer_info->time_stamp = jiffies; 2080 buffer_info->next_to_watch = i; 2081 buffer_info->mapped_as_page = true; 2082 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, 2083 DMA_TO_DEVICE); 2084 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2085 goto dma_error; 2086 } 2087 2088 tx_ring->buffer_info[i].skb = skb; 2089 tx_ring->buffer_info[first].next_to_watch = i; 2090 2091 return ++count; 2092 2093 dma_error: 2094 dev_err(&pdev->dev, "TX DMA map failed\n"); 2095 2096 /* clear timestamp and dma mappings for failed buffer_info mapping */ 2097 buffer_info->dma = 0; 2098 buffer_info->time_stamp = 0; 2099 buffer_info->length = 0; 2100 buffer_info->next_to_watch = 0; 2101 buffer_info->mapped_as_page = false; 2102 if (count) 2103 count--; 2104 2105 /* clear timestamp and dma mappings for remaining portion of packet */ 2106 while (count--) { 2107 if (i==0) 2108 i += tx_ring->count; 2109 i--; 2110 buffer_info = &tx_ring->buffer_info[i]; 2111 igbvf_put_txbuf(adapter, buffer_info); 2112 } 2113 2114 return 0; 2115 } 2116 2117 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, 2118 struct igbvf_ring *tx_ring, 2119 int tx_flags, int count, u32 paylen, 2120 u8 hdr_len) 2121 { 2122 union e1000_adv_tx_desc *tx_desc = NULL; 2123 struct igbvf_buffer *buffer_info; 2124 u32 olinfo_status = 0, cmd_type_len; 2125 unsigned int i; 2126 2127 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 2128 E1000_ADVTXD_DCMD_DEXT); 2129 2130 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 2131 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 2132 2133 if (tx_flags & IGBVF_TX_FLAGS_TSO) { 2134 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 2135 2136 /* insert tcp checksum */ 2137 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2138 2139 /* insert ip checksum */ 2140 if (tx_flags & IGBVF_TX_FLAGS_IPV4) 2141 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 2142 2143 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { 2144 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2145 } 2146 2147 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 2148 2149 i = tx_ring->next_to_use; 2150 while (count--) { 2151 buffer_info = &tx_ring->buffer_info[i]; 2152 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 2153 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 2154 tx_desc->read.cmd_type_len = 2155 cpu_to_le32(cmd_type_len | buffer_info->length); 2156 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2157 i++; 2158 if (i == tx_ring->count) 2159 i = 0; 2160 } 2161 2162 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 2163 /* Force memory writes to complete before letting h/w 2164 * know there are new descriptors to fetch. (Only 2165 * applicable for weak-ordered memory model archs, 2166 * such as IA-64). */ 2167 wmb(); 2168 2169 tx_ring->next_to_use = i; 2170 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2171 /* we need this if more than one processor can write to our tail 2172 * at a time, it syncronizes IO on IA64/Altix systems */ 2173 mmiowb(); 2174 } 2175 2176 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, 2177 struct net_device *netdev, 2178 struct igbvf_ring *tx_ring) 2179 { 2180 struct igbvf_adapter *adapter = netdev_priv(netdev); 2181 unsigned int first, tx_flags = 0; 2182 u8 hdr_len = 0; 2183 int count = 0; 2184 int tso = 0; 2185 2186 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2187 dev_kfree_skb_any(skb); 2188 return NETDEV_TX_OK; 2189 } 2190 2191 if (skb->len <= 0) { 2192 dev_kfree_skb_any(skb); 2193 return NETDEV_TX_OK; 2194 } 2195 2196 /* 2197 * need: count + 4 desc gap to keep tail from touching 2198 * + 2 desc gap to keep tail from touching head, 2199 * + 1 desc for skb->data, 2200 * + 1 desc for context descriptor, 2201 * head, otherwise try next time 2202 */ 2203 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { 2204 /* this is a hard error */ 2205 return NETDEV_TX_BUSY; 2206 } 2207 2208 if (vlan_tx_tag_present(skb)) { 2209 tx_flags |= IGBVF_TX_FLAGS_VLAN; 2210 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); 2211 } 2212 2213 if (skb->protocol == htons(ETH_P_IP)) 2214 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2215 2216 first = tx_ring->next_to_use; 2217 2218 tso = skb_is_gso(skb) ? 2219 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; 2220 if (unlikely(tso < 0)) { 2221 dev_kfree_skb_any(skb); 2222 return NETDEV_TX_OK; 2223 } 2224 2225 if (tso) 2226 tx_flags |= IGBVF_TX_FLAGS_TSO; 2227 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2228 (skb->ip_summed == CHECKSUM_PARTIAL)) 2229 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2230 2231 /* 2232 * count reflects descriptors mapped, if 0 then mapping error 2233 * has occurred and we need to rewind the descriptor queue 2234 */ 2235 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); 2236 2237 if (count) { 2238 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, 2239 skb->len, hdr_len); 2240 /* Make sure there is space in the ring for the next send. */ 2241 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); 2242 } else { 2243 dev_kfree_skb_any(skb); 2244 tx_ring->buffer_info[first].time_stamp = 0; 2245 tx_ring->next_to_use = first; 2246 } 2247 2248 return NETDEV_TX_OK; 2249 } 2250 2251 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb, 2252 struct net_device *netdev) 2253 { 2254 struct igbvf_adapter *adapter = netdev_priv(netdev); 2255 struct igbvf_ring *tx_ring; 2256 2257 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2258 dev_kfree_skb_any(skb); 2259 return NETDEV_TX_OK; 2260 } 2261 2262 tx_ring = &adapter->tx_ring[0]; 2263 2264 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); 2265 } 2266 2267 /** 2268 * igbvf_tx_timeout - Respond to a Tx Hang 2269 * @netdev: network interface device structure 2270 **/ 2271 static void igbvf_tx_timeout(struct net_device *netdev) 2272 { 2273 struct igbvf_adapter *adapter = netdev_priv(netdev); 2274 2275 /* Do the reset outside of interrupt context */ 2276 adapter->tx_timeout_count++; 2277 schedule_work(&adapter->reset_task); 2278 } 2279 2280 static void igbvf_reset_task(struct work_struct *work) 2281 { 2282 struct igbvf_adapter *adapter; 2283 adapter = container_of(work, struct igbvf_adapter, reset_task); 2284 2285 igbvf_reinit_locked(adapter); 2286 } 2287 2288 /** 2289 * igbvf_get_stats - Get System Network Statistics 2290 * @netdev: network interface device structure 2291 * 2292 * Returns the address of the device statistics structure. 2293 * The statistics are actually updated from the timer callback. 2294 **/ 2295 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) 2296 { 2297 struct igbvf_adapter *adapter = netdev_priv(netdev); 2298 2299 /* only return the current stats */ 2300 return &adapter->net_stats; 2301 } 2302 2303 /** 2304 * igbvf_change_mtu - Change the Maximum Transfer Unit 2305 * @netdev: network interface device structure 2306 * @new_mtu: new value for maximum frame size 2307 * 2308 * Returns 0 on success, negative on failure 2309 **/ 2310 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) 2311 { 2312 struct igbvf_adapter *adapter = netdev_priv(netdev); 2313 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2314 2315 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { 2316 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); 2317 return -EINVAL; 2318 } 2319 2320 #define MAX_STD_JUMBO_FRAME_SIZE 9234 2321 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 2322 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 2323 return -EINVAL; 2324 } 2325 2326 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 2327 msleep(1); 2328 /* igbvf_down has a dependency on max_frame_size */ 2329 adapter->max_frame_size = max_frame; 2330 if (netif_running(netdev)) 2331 igbvf_down(adapter); 2332 2333 /* 2334 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 2335 * means we reserve 2 more, this pushes us to allocate from the next 2336 * larger slab size. 2337 * i.e. RXBUFFER_2048 --> size-4096 slab 2338 * However with the new *_jumbo_rx* routines, jumbo receives will use 2339 * fragmented skbs 2340 */ 2341 2342 if (max_frame <= 1024) 2343 adapter->rx_buffer_len = 1024; 2344 else if (max_frame <= 2048) 2345 adapter->rx_buffer_len = 2048; 2346 else 2347 #if (PAGE_SIZE / 2) > 16384 2348 adapter->rx_buffer_len = 16384; 2349 #else 2350 adapter->rx_buffer_len = PAGE_SIZE / 2; 2351 #endif 2352 2353 2354 /* adjust allocation if LPE protects us, and we aren't using SBP */ 2355 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 2356 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 2357 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + 2358 ETH_FCS_LEN; 2359 2360 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 2361 netdev->mtu, new_mtu); 2362 netdev->mtu = new_mtu; 2363 2364 if (netif_running(netdev)) 2365 igbvf_up(adapter); 2366 else 2367 igbvf_reset(adapter); 2368 2369 clear_bit(__IGBVF_RESETTING, &adapter->state); 2370 2371 return 0; 2372 } 2373 2374 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2375 { 2376 switch (cmd) { 2377 default: 2378 return -EOPNOTSUPP; 2379 } 2380 } 2381 2382 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) 2383 { 2384 struct net_device *netdev = pci_get_drvdata(pdev); 2385 struct igbvf_adapter *adapter = netdev_priv(netdev); 2386 #ifdef CONFIG_PM 2387 int retval = 0; 2388 #endif 2389 2390 netif_device_detach(netdev); 2391 2392 if (netif_running(netdev)) { 2393 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 2394 igbvf_down(adapter); 2395 igbvf_free_irq(adapter); 2396 } 2397 2398 #ifdef CONFIG_PM 2399 retval = pci_save_state(pdev); 2400 if (retval) 2401 return retval; 2402 #endif 2403 2404 pci_disable_device(pdev); 2405 2406 return 0; 2407 } 2408 2409 #ifdef CONFIG_PM 2410 static int igbvf_resume(struct pci_dev *pdev) 2411 { 2412 struct net_device *netdev = pci_get_drvdata(pdev); 2413 struct igbvf_adapter *adapter = netdev_priv(netdev); 2414 u32 err; 2415 2416 pci_restore_state(pdev); 2417 err = pci_enable_device_mem(pdev); 2418 if (err) { 2419 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 2420 return err; 2421 } 2422 2423 pci_set_master(pdev); 2424 2425 if (netif_running(netdev)) { 2426 err = igbvf_request_irq(adapter); 2427 if (err) 2428 return err; 2429 } 2430 2431 igbvf_reset(adapter); 2432 2433 if (netif_running(netdev)) 2434 igbvf_up(adapter); 2435 2436 netif_device_attach(netdev); 2437 2438 return 0; 2439 } 2440 #endif 2441 2442 static void igbvf_shutdown(struct pci_dev *pdev) 2443 { 2444 igbvf_suspend(pdev, PMSG_SUSPEND); 2445 } 2446 2447 #ifdef CONFIG_NET_POLL_CONTROLLER 2448 /* 2449 * Polling 'interrupt' - used by things like netconsole to send skbs 2450 * without having to re-enable interrupts. It's not called while 2451 * the interrupt routine is executing. 2452 */ 2453 static void igbvf_netpoll(struct net_device *netdev) 2454 { 2455 struct igbvf_adapter *adapter = netdev_priv(netdev); 2456 2457 disable_irq(adapter->pdev->irq); 2458 2459 igbvf_clean_tx_irq(adapter->tx_ring); 2460 2461 enable_irq(adapter->pdev->irq); 2462 } 2463 #endif 2464 2465 /** 2466 * igbvf_io_error_detected - called when PCI error is detected 2467 * @pdev: Pointer to PCI device 2468 * @state: The current pci connection state 2469 * 2470 * This function is called after a PCI bus error affecting 2471 * this device has been detected. 2472 */ 2473 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, 2474 pci_channel_state_t state) 2475 { 2476 struct net_device *netdev = pci_get_drvdata(pdev); 2477 struct igbvf_adapter *adapter = netdev_priv(netdev); 2478 2479 netif_device_detach(netdev); 2480 2481 if (state == pci_channel_io_perm_failure) 2482 return PCI_ERS_RESULT_DISCONNECT; 2483 2484 if (netif_running(netdev)) 2485 igbvf_down(adapter); 2486 pci_disable_device(pdev); 2487 2488 /* Request a slot slot reset. */ 2489 return PCI_ERS_RESULT_NEED_RESET; 2490 } 2491 2492 /** 2493 * igbvf_io_slot_reset - called after the pci bus has been reset. 2494 * @pdev: Pointer to PCI device 2495 * 2496 * Restart the card from scratch, as if from a cold-boot. Implementation 2497 * resembles the first-half of the igbvf_resume routine. 2498 */ 2499 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) 2500 { 2501 struct net_device *netdev = pci_get_drvdata(pdev); 2502 struct igbvf_adapter *adapter = netdev_priv(netdev); 2503 2504 if (pci_enable_device_mem(pdev)) { 2505 dev_err(&pdev->dev, 2506 "Cannot re-enable PCI device after reset.\n"); 2507 return PCI_ERS_RESULT_DISCONNECT; 2508 } 2509 pci_set_master(pdev); 2510 2511 igbvf_reset(adapter); 2512 2513 return PCI_ERS_RESULT_RECOVERED; 2514 } 2515 2516 /** 2517 * igbvf_io_resume - called when traffic can start flowing again. 2518 * @pdev: Pointer to PCI device 2519 * 2520 * This callback is called when the error recovery driver tells us that 2521 * its OK to resume normal operation. Implementation resembles the 2522 * second-half of the igbvf_resume routine. 2523 */ 2524 static void igbvf_io_resume(struct pci_dev *pdev) 2525 { 2526 struct net_device *netdev = pci_get_drvdata(pdev); 2527 struct igbvf_adapter *adapter = netdev_priv(netdev); 2528 2529 if (netif_running(netdev)) { 2530 if (igbvf_up(adapter)) { 2531 dev_err(&pdev->dev, 2532 "can't bring device back up after reset\n"); 2533 return; 2534 } 2535 } 2536 2537 netif_device_attach(netdev); 2538 } 2539 2540 static void igbvf_print_device_info(struct igbvf_adapter *adapter) 2541 { 2542 struct e1000_hw *hw = &adapter->hw; 2543 struct net_device *netdev = adapter->netdev; 2544 struct pci_dev *pdev = adapter->pdev; 2545 2546 if (hw->mac.type == e1000_vfadapt_i350) 2547 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n"); 2548 else 2549 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); 2550 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); 2551 } 2552 2553 static int igbvf_set_features(struct net_device *netdev, 2554 netdev_features_t features) 2555 { 2556 struct igbvf_adapter *adapter = netdev_priv(netdev); 2557 2558 if (features & NETIF_F_RXCSUM) 2559 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; 2560 else 2561 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; 2562 2563 return 0; 2564 } 2565 2566 static const struct net_device_ops igbvf_netdev_ops = { 2567 .ndo_open = igbvf_open, 2568 .ndo_stop = igbvf_close, 2569 .ndo_start_xmit = igbvf_xmit_frame, 2570 .ndo_get_stats = igbvf_get_stats, 2571 .ndo_set_rx_mode = igbvf_set_multi, 2572 .ndo_set_mac_address = igbvf_set_mac, 2573 .ndo_change_mtu = igbvf_change_mtu, 2574 .ndo_do_ioctl = igbvf_ioctl, 2575 .ndo_tx_timeout = igbvf_tx_timeout, 2576 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, 2577 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, 2578 #ifdef CONFIG_NET_POLL_CONTROLLER 2579 .ndo_poll_controller = igbvf_netpoll, 2580 #endif 2581 .ndo_set_features = igbvf_set_features, 2582 }; 2583 2584 /** 2585 * igbvf_probe - Device Initialization Routine 2586 * @pdev: PCI device information struct 2587 * @ent: entry in igbvf_pci_tbl 2588 * 2589 * Returns 0 on success, negative on failure 2590 * 2591 * igbvf_probe initializes an adapter identified by a pci_dev structure. 2592 * The OS initialization, configuring of the adapter private structure, 2593 * and a hardware reset occur. 2594 **/ 2595 static int __devinit igbvf_probe(struct pci_dev *pdev, 2596 const struct pci_device_id *ent) 2597 { 2598 struct net_device *netdev; 2599 struct igbvf_adapter *adapter; 2600 struct e1000_hw *hw; 2601 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; 2602 2603 static int cards_found; 2604 int err, pci_using_dac; 2605 2606 err = pci_enable_device_mem(pdev); 2607 if (err) 2608 return err; 2609 2610 pci_using_dac = 0; 2611 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 2612 if (!err) { 2613 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2614 if (!err) 2615 pci_using_dac = 1; 2616 } else { 2617 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2618 if (err) { 2619 err = dma_set_coherent_mask(&pdev->dev, 2620 DMA_BIT_MASK(32)); 2621 if (err) { 2622 dev_err(&pdev->dev, "No usable DMA " 2623 "configuration, aborting\n"); 2624 goto err_dma; 2625 } 2626 } 2627 } 2628 2629 err = pci_request_regions(pdev, igbvf_driver_name); 2630 if (err) 2631 goto err_pci_reg; 2632 2633 pci_set_master(pdev); 2634 2635 err = -ENOMEM; 2636 netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); 2637 if (!netdev) 2638 goto err_alloc_etherdev; 2639 2640 SET_NETDEV_DEV(netdev, &pdev->dev); 2641 2642 pci_set_drvdata(pdev, netdev); 2643 adapter = netdev_priv(netdev); 2644 hw = &adapter->hw; 2645 adapter->netdev = netdev; 2646 adapter->pdev = pdev; 2647 adapter->ei = ei; 2648 adapter->pba = ei->pba; 2649 adapter->flags = ei->flags; 2650 adapter->hw.back = adapter; 2651 adapter->hw.mac.type = ei->mac; 2652 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 2653 2654 /* PCI config space info */ 2655 2656 hw->vendor_id = pdev->vendor; 2657 hw->device_id = pdev->device; 2658 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2659 hw->subsystem_device_id = pdev->subsystem_device; 2660 hw->revision_id = pdev->revision; 2661 2662 err = -EIO; 2663 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2664 pci_resource_len(pdev, 0)); 2665 2666 if (!adapter->hw.hw_addr) 2667 goto err_ioremap; 2668 2669 if (ei->get_variants) { 2670 err = ei->get_variants(adapter); 2671 if (err) 2672 goto err_ioremap; 2673 } 2674 2675 /* setup adapter struct */ 2676 err = igbvf_sw_init(adapter); 2677 if (err) 2678 goto err_sw_init; 2679 2680 /* construct the net_device struct */ 2681 netdev->netdev_ops = &igbvf_netdev_ops; 2682 2683 igbvf_set_ethtool_ops(netdev); 2684 netdev->watchdog_timeo = 5 * HZ; 2685 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 2686 2687 adapter->bd_number = cards_found++; 2688 2689 netdev->hw_features = NETIF_F_SG | 2690 NETIF_F_IP_CSUM | 2691 NETIF_F_IPV6_CSUM | 2692 NETIF_F_TSO | 2693 NETIF_F_TSO6 | 2694 NETIF_F_RXCSUM; 2695 2696 netdev->features = netdev->hw_features | 2697 NETIF_F_HW_VLAN_TX | 2698 NETIF_F_HW_VLAN_RX | 2699 NETIF_F_HW_VLAN_FILTER; 2700 2701 if (pci_using_dac) 2702 netdev->features |= NETIF_F_HIGHDMA; 2703 2704 netdev->vlan_features |= NETIF_F_TSO; 2705 netdev->vlan_features |= NETIF_F_TSO6; 2706 netdev->vlan_features |= NETIF_F_IP_CSUM; 2707 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 2708 netdev->vlan_features |= NETIF_F_SG; 2709 2710 /*reset the controller to put the device in a known good state */ 2711 err = hw->mac.ops.reset_hw(hw); 2712 if (err) { 2713 dev_info(&pdev->dev, 2714 "PF still in reset state, assigning new address." 2715 " Is the PF interface up?\n"); 2716 eth_hw_addr_random(netdev); 2717 memcpy(adapter->hw.mac.addr, netdev->dev_addr, 2718 netdev->addr_len); 2719 } else { 2720 err = hw->mac.ops.read_mac_addr(hw); 2721 if (err) { 2722 dev_err(&pdev->dev, "Error reading MAC address\n"); 2723 goto err_hw_init; 2724 } 2725 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 2726 netdev->addr_len); 2727 } 2728 2729 if (!is_valid_ether_addr(netdev->perm_addr)) { 2730 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", 2731 netdev->dev_addr); 2732 err = -EIO; 2733 goto err_hw_init; 2734 } 2735 2736 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2737 2738 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, 2739 (unsigned long) adapter); 2740 2741 INIT_WORK(&adapter->reset_task, igbvf_reset_task); 2742 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); 2743 2744 /* ring size defaults */ 2745 adapter->rx_ring->count = 1024; 2746 adapter->tx_ring->count = 1024; 2747 2748 /* reset the hardware with the new settings */ 2749 igbvf_reset(adapter); 2750 2751 strcpy(netdev->name, "eth%d"); 2752 err = register_netdev(netdev); 2753 if (err) 2754 goto err_hw_init; 2755 2756 /* tell the stack to leave us alone until igbvf_open() is called */ 2757 netif_carrier_off(netdev); 2758 netif_stop_queue(netdev); 2759 2760 igbvf_print_device_info(adapter); 2761 2762 igbvf_initialize_last_counter_stats(adapter); 2763 2764 return 0; 2765 2766 err_hw_init: 2767 kfree(adapter->tx_ring); 2768 kfree(adapter->rx_ring); 2769 err_sw_init: 2770 igbvf_reset_interrupt_capability(adapter); 2771 iounmap(adapter->hw.hw_addr); 2772 err_ioremap: 2773 free_netdev(netdev); 2774 err_alloc_etherdev: 2775 pci_release_regions(pdev); 2776 err_pci_reg: 2777 err_dma: 2778 pci_disable_device(pdev); 2779 return err; 2780 } 2781 2782 /** 2783 * igbvf_remove - Device Removal Routine 2784 * @pdev: PCI device information struct 2785 * 2786 * igbvf_remove is called by the PCI subsystem to alert the driver 2787 * that it should release a PCI device. The could be caused by a 2788 * Hot-Plug event, or because the driver is going to be removed from 2789 * memory. 2790 **/ 2791 static void __devexit igbvf_remove(struct pci_dev *pdev) 2792 { 2793 struct net_device *netdev = pci_get_drvdata(pdev); 2794 struct igbvf_adapter *adapter = netdev_priv(netdev); 2795 struct e1000_hw *hw = &adapter->hw; 2796 2797 /* 2798 * The watchdog timer may be rescheduled, so explicitly 2799 * disable it from being rescheduled. 2800 */ 2801 set_bit(__IGBVF_DOWN, &adapter->state); 2802 del_timer_sync(&adapter->watchdog_timer); 2803 2804 cancel_work_sync(&adapter->reset_task); 2805 cancel_work_sync(&adapter->watchdog_task); 2806 2807 unregister_netdev(netdev); 2808 2809 igbvf_reset_interrupt_capability(adapter); 2810 2811 /* 2812 * it is important to delete the napi struct prior to freeing the 2813 * rx ring so that you do not end up with null pointer refs 2814 */ 2815 netif_napi_del(&adapter->rx_ring->napi); 2816 kfree(adapter->tx_ring); 2817 kfree(adapter->rx_ring); 2818 2819 iounmap(hw->hw_addr); 2820 if (hw->flash_address) 2821 iounmap(hw->flash_address); 2822 pci_release_regions(pdev); 2823 2824 free_netdev(netdev); 2825 2826 pci_disable_device(pdev); 2827 } 2828 2829 /* PCI Error Recovery (ERS) */ 2830 static struct pci_error_handlers igbvf_err_handler = { 2831 .error_detected = igbvf_io_error_detected, 2832 .slot_reset = igbvf_io_slot_reset, 2833 .resume = igbvf_io_resume, 2834 }; 2835 2836 static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = { 2837 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, 2838 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, 2839 { } /* terminate list */ 2840 }; 2841 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); 2842 2843 /* PCI Device API Driver */ 2844 static struct pci_driver igbvf_driver = { 2845 .name = igbvf_driver_name, 2846 .id_table = igbvf_pci_tbl, 2847 .probe = igbvf_probe, 2848 .remove = __devexit_p(igbvf_remove), 2849 #ifdef CONFIG_PM 2850 /* Power Management Hooks */ 2851 .suspend = igbvf_suspend, 2852 .resume = igbvf_resume, 2853 #endif 2854 .shutdown = igbvf_shutdown, 2855 .err_handler = &igbvf_err_handler 2856 }; 2857 2858 /** 2859 * igbvf_init_module - Driver Registration Routine 2860 * 2861 * igbvf_init_module is the first routine called when the driver is 2862 * loaded. All it does is register with the PCI subsystem. 2863 **/ 2864 static int __init igbvf_init_module(void) 2865 { 2866 int ret; 2867 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); 2868 pr_info("%s\n", igbvf_copyright); 2869 2870 ret = pci_register_driver(&igbvf_driver); 2871 2872 return ret; 2873 } 2874 module_init(igbvf_init_module); 2875 2876 /** 2877 * igbvf_exit_module - Driver Exit Cleanup Routine 2878 * 2879 * igbvf_exit_module is called just before the driver is removed 2880 * from memory. 2881 **/ 2882 static void __exit igbvf_exit_module(void) 2883 { 2884 pci_unregister_driver(&igbvf_driver); 2885 } 2886 module_exit(igbvf_exit_module); 2887 2888 2889 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 2890 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); 2891 MODULE_LICENSE("GPL"); 2892 MODULE_VERSION(DRV_VERSION); 2893 2894 /* netdev.c */ 2895