1 /******************************************************************************* 2 3 Intel(R) 82576 Virtual Function Linux driver 4 Copyright(c) 2009 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/module.h> 31 #include <linux/types.h> 32 #include <linux/init.h> 33 #include <linux/pci.h> 34 #include <linux/vmalloc.h> 35 #include <linux/pagemap.h> 36 #include <linux/delay.h> 37 #include <linux/netdevice.h> 38 #include <linux/tcp.h> 39 #include <linux/ipv6.h> 40 #include <linux/slab.h> 41 #include <net/checksum.h> 42 #include <net/ip6_checksum.h> 43 #include <linux/mii.h> 44 #include <linux/ethtool.h> 45 #include <linux/if_vlan.h> 46 #include <linux/prefetch.h> 47 48 #include "igbvf.h" 49 50 #define DRV_VERSION "2.0.1-k" 51 char igbvf_driver_name[] = "igbvf"; 52 const char igbvf_driver_version[] = DRV_VERSION; 53 static const char igbvf_driver_string[] = 54 "Intel(R) Gigabit Virtual Function Network Driver"; 55 static const char igbvf_copyright[] = 56 "Copyright (c) 2009 - 2012 Intel Corporation."; 57 58 static int igbvf_poll(struct napi_struct *napi, int budget); 59 static void igbvf_reset(struct igbvf_adapter *); 60 static void igbvf_set_interrupt_capability(struct igbvf_adapter *); 61 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); 62 63 static struct igbvf_info igbvf_vf_info = { 64 .mac = e1000_vfadapt, 65 .flags = 0, 66 .pba = 10, 67 .init_ops = e1000_init_function_pointers_vf, 68 }; 69 70 static struct igbvf_info igbvf_i350_vf_info = { 71 .mac = e1000_vfadapt_i350, 72 .flags = 0, 73 .pba = 10, 74 .init_ops = e1000_init_function_pointers_vf, 75 }; 76 77 static const struct igbvf_info *igbvf_info_tbl[] = { 78 [board_vf] = &igbvf_vf_info, 79 [board_i350_vf] = &igbvf_i350_vf_info, 80 }; 81 82 /** 83 * igbvf_desc_unused - calculate if we have unused descriptors 84 **/ 85 static int igbvf_desc_unused(struct igbvf_ring *ring) 86 { 87 if (ring->next_to_clean > ring->next_to_use) 88 return ring->next_to_clean - ring->next_to_use - 1; 89 90 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 91 } 92 93 /** 94 * igbvf_receive_skb - helper function to handle Rx indications 95 * @adapter: board private structure 96 * @status: descriptor status field as written by hardware 97 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 98 * @skb: pointer to sk_buff to be indicated to stack 99 **/ 100 static void igbvf_receive_skb(struct igbvf_adapter *adapter, 101 struct net_device *netdev, 102 struct sk_buff *skb, 103 u32 status, u16 vlan) 104 { 105 if (status & E1000_RXD_STAT_VP) { 106 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 107 if (test_bit(vid, adapter->active_vlans)) 108 __vlan_hwaccel_put_tag(skb, vid); 109 } 110 netif_receive_skb(skb); 111 } 112 113 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, 114 u32 status_err, struct sk_buff *skb) 115 { 116 skb_checksum_none_assert(skb); 117 118 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 119 if ((status_err & E1000_RXD_STAT_IXSM) || 120 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) 121 return; 122 123 /* TCP/UDP checksum error bit is set */ 124 if (status_err & 125 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 126 /* let the stack verify checksum errors */ 127 adapter->hw_csum_err++; 128 return; 129 } 130 131 /* It must be a TCP or UDP packet with a valid checksum */ 132 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 133 skb->ip_summed = CHECKSUM_UNNECESSARY; 134 135 adapter->hw_csum_good++; 136 } 137 138 /** 139 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split 140 * @rx_ring: address of ring structure to repopulate 141 * @cleaned_count: number of buffers to repopulate 142 **/ 143 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, 144 int cleaned_count) 145 { 146 struct igbvf_adapter *adapter = rx_ring->adapter; 147 struct net_device *netdev = adapter->netdev; 148 struct pci_dev *pdev = adapter->pdev; 149 union e1000_adv_rx_desc *rx_desc; 150 struct igbvf_buffer *buffer_info; 151 struct sk_buff *skb; 152 unsigned int i; 153 int bufsz; 154 155 i = rx_ring->next_to_use; 156 buffer_info = &rx_ring->buffer_info[i]; 157 158 if (adapter->rx_ps_hdr_size) 159 bufsz = adapter->rx_ps_hdr_size; 160 else 161 bufsz = adapter->rx_buffer_len; 162 163 while (cleaned_count--) { 164 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 165 166 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 167 if (!buffer_info->page) { 168 buffer_info->page = alloc_page(GFP_ATOMIC); 169 if (!buffer_info->page) { 170 adapter->alloc_rx_buff_failed++; 171 goto no_buffers; 172 } 173 buffer_info->page_offset = 0; 174 } else { 175 buffer_info->page_offset ^= PAGE_SIZE / 2; 176 } 177 buffer_info->page_dma = 178 dma_map_page(&pdev->dev, buffer_info->page, 179 buffer_info->page_offset, 180 PAGE_SIZE / 2, 181 DMA_FROM_DEVICE); 182 } 183 184 if (!buffer_info->skb) { 185 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 186 if (!skb) { 187 adapter->alloc_rx_buff_failed++; 188 goto no_buffers; 189 } 190 191 buffer_info->skb = skb; 192 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 193 bufsz, 194 DMA_FROM_DEVICE); 195 } 196 /* Refresh the desc even if buffer_addrs didn't change because 197 * each write-back erases this info. */ 198 if (adapter->rx_ps_hdr_size) { 199 rx_desc->read.pkt_addr = 200 cpu_to_le64(buffer_info->page_dma); 201 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 202 } else { 203 rx_desc->read.pkt_addr = 204 cpu_to_le64(buffer_info->dma); 205 rx_desc->read.hdr_addr = 0; 206 } 207 208 i++; 209 if (i == rx_ring->count) 210 i = 0; 211 buffer_info = &rx_ring->buffer_info[i]; 212 } 213 214 no_buffers: 215 if (rx_ring->next_to_use != i) { 216 rx_ring->next_to_use = i; 217 if (i == 0) 218 i = (rx_ring->count - 1); 219 else 220 i--; 221 222 /* Force memory writes to complete before letting h/w 223 * know there are new descriptors to fetch. (Only 224 * applicable for weak-ordered memory model archs, 225 * such as IA-64). */ 226 wmb(); 227 writel(i, adapter->hw.hw_addr + rx_ring->tail); 228 } 229 } 230 231 /** 232 * igbvf_clean_rx_irq - Send received data up the network stack; legacy 233 * @adapter: board private structure 234 * 235 * the return value indicates whether actual cleaning was done, there 236 * is no guarantee that everything was cleaned 237 **/ 238 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, 239 int *work_done, int work_to_do) 240 { 241 struct igbvf_ring *rx_ring = adapter->rx_ring; 242 struct net_device *netdev = adapter->netdev; 243 struct pci_dev *pdev = adapter->pdev; 244 union e1000_adv_rx_desc *rx_desc, *next_rxd; 245 struct igbvf_buffer *buffer_info, *next_buffer; 246 struct sk_buff *skb; 247 bool cleaned = false; 248 int cleaned_count = 0; 249 unsigned int total_bytes = 0, total_packets = 0; 250 unsigned int i; 251 u32 length, hlen, staterr; 252 253 i = rx_ring->next_to_clean; 254 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 255 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 256 257 while (staterr & E1000_RXD_STAT_DD) { 258 if (*work_done >= work_to_do) 259 break; 260 (*work_done)++; 261 rmb(); /* read descriptor and rx_buffer_info after status DD */ 262 263 buffer_info = &rx_ring->buffer_info[i]; 264 265 /* HW will not DMA in data larger than the given buffer, even 266 * if it parses the (NFS, of course) header to be larger. In 267 * that case, it fills the header buffer and spills the rest 268 * into the page. 269 */ 270 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & 271 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 272 if (hlen > adapter->rx_ps_hdr_size) 273 hlen = adapter->rx_ps_hdr_size; 274 275 length = le16_to_cpu(rx_desc->wb.upper.length); 276 cleaned = true; 277 cleaned_count++; 278 279 skb = buffer_info->skb; 280 prefetch(skb->data - NET_IP_ALIGN); 281 buffer_info->skb = NULL; 282 if (!adapter->rx_ps_hdr_size) { 283 dma_unmap_single(&pdev->dev, buffer_info->dma, 284 adapter->rx_buffer_len, 285 DMA_FROM_DEVICE); 286 buffer_info->dma = 0; 287 skb_put(skb, length); 288 goto send_up; 289 } 290 291 if (!skb_shinfo(skb)->nr_frags) { 292 dma_unmap_single(&pdev->dev, buffer_info->dma, 293 adapter->rx_ps_hdr_size, 294 DMA_FROM_DEVICE); 295 skb_put(skb, hlen); 296 } 297 298 if (length) { 299 dma_unmap_page(&pdev->dev, buffer_info->page_dma, 300 PAGE_SIZE / 2, 301 DMA_FROM_DEVICE); 302 buffer_info->page_dma = 0; 303 304 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 305 buffer_info->page, 306 buffer_info->page_offset, 307 length); 308 309 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 310 (page_count(buffer_info->page) != 1)) 311 buffer_info->page = NULL; 312 else 313 get_page(buffer_info->page); 314 315 skb->len += length; 316 skb->data_len += length; 317 skb->truesize += PAGE_SIZE / 2; 318 } 319 send_up: 320 i++; 321 if (i == rx_ring->count) 322 i = 0; 323 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); 324 prefetch(next_rxd); 325 next_buffer = &rx_ring->buffer_info[i]; 326 327 if (!(staterr & E1000_RXD_STAT_EOP)) { 328 buffer_info->skb = next_buffer->skb; 329 buffer_info->dma = next_buffer->dma; 330 next_buffer->skb = skb; 331 next_buffer->dma = 0; 332 goto next_desc; 333 } 334 335 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 336 dev_kfree_skb_irq(skb); 337 goto next_desc; 338 } 339 340 total_bytes += skb->len; 341 total_packets++; 342 343 igbvf_rx_checksum_adv(adapter, staterr, skb); 344 345 skb->protocol = eth_type_trans(skb, netdev); 346 347 igbvf_receive_skb(adapter, netdev, skb, staterr, 348 rx_desc->wb.upper.vlan); 349 350 next_desc: 351 rx_desc->wb.upper.status_error = 0; 352 353 /* return some buffers to hardware, one at a time is too slow */ 354 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { 355 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 356 cleaned_count = 0; 357 } 358 359 /* use prefetched values */ 360 rx_desc = next_rxd; 361 buffer_info = next_buffer; 362 363 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 364 } 365 366 rx_ring->next_to_clean = i; 367 cleaned_count = igbvf_desc_unused(rx_ring); 368 369 if (cleaned_count) 370 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 371 372 adapter->total_rx_packets += total_packets; 373 adapter->total_rx_bytes += total_bytes; 374 adapter->net_stats.rx_bytes += total_bytes; 375 adapter->net_stats.rx_packets += total_packets; 376 return cleaned; 377 } 378 379 static void igbvf_put_txbuf(struct igbvf_adapter *adapter, 380 struct igbvf_buffer *buffer_info) 381 { 382 if (buffer_info->dma) { 383 if (buffer_info->mapped_as_page) 384 dma_unmap_page(&adapter->pdev->dev, 385 buffer_info->dma, 386 buffer_info->length, 387 DMA_TO_DEVICE); 388 else 389 dma_unmap_single(&adapter->pdev->dev, 390 buffer_info->dma, 391 buffer_info->length, 392 DMA_TO_DEVICE); 393 buffer_info->dma = 0; 394 } 395 if (buffer_info->skb) { 396 dev_kfree_skb_any(buffer_info->skb); 397 buffer_info->skb = NULL; 398 } 399 buffer_info->time_stamp = 0; 400 } 401 402 /** 403 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) 404 * @adapter: board private structure 405 * 406 * Return 0 on success, negative on failure 407 **/ 408 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, 409 struct igbvf_ring *tx_ring) 410 { 411 struct pci_dev *pdev = adapter->pdev; 412 int size; 413 414 size = sizeof(struct igbvf_buffer) * tx_ring->count; 415 tx_ring->buffer_info = vzalloc(size); 416 if (!tx_ring->buffer_info) 417 goto err; 418 419 /* round up to nearest 4K */ 420 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 421 tx_ring->size = ALIGN(tx_ring->size, 4096); 422 423 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 424 &tx_ring->dma, GFP_KERNEL); 425 426 if (!tx_ring->desc) 427 goto err; 428 429 tx_ring->adapter = adapter; 430 tx_ring->next_to_use = 0; 431 tx_ring->next_to_clean = 0; 432 433 return 0; 434 err: 435 vfree(tx_ring->buffer_info); 436 dev_err(&adapter->pdev->dev, 437 "Unable to allocate memory for the transmit descriptor ring\n"); 438 return -ENOMEM; 439 } 440 441 /** 442 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) 443 * @adapter: board private structure 444 * 445 * Returns 0 on success, negative on failure 446 **/ 447 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, 448 struct igbvf_ring *rx_ring) 449 { 450 struct pci_dev *pdev = adapter->pdev; 451 int size, desc_len; 452 453 size = sizeof(struct igbvf_buffer) * rx_ring->count; 454 rx_ring->buffer_info = vzalloc(size); 455 if (!rx_ring->buffer_info) 456 goto err; 457 458 desc_len = sizeof(union e1000_adv_rx_desc); 459 460 /* Round up to nearest 4K */ 461 rx_ring->size = rx_ring->count * desc_len; 462 rx_ring->size = ALIGN(rx_ring->size, 4096); 463 464 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 465 &rx_ring->dma, GFP_KERNEL); 466 467 if (!rx_ring->desc) 468 goto err; 469 470 rx_ring->next_to_clean = 0; 471 rx_ring->next_to_use = 0; 472 473 rx_ring->adapter = adapter; 474 475 return 0; 476 477 err: 478 vfree(rx_ring->buffer_info); 479 rx_ring->buffer_info = NULL; 480 dev_err(&adapter->pdev->dev, 481 "Unable to allocate memory for the receive descriptor ring\n"); 482 return -ENOMEM; 483 } 484 485 /** 486 * igbvf_clean_tx_ring - Free Tx Buffers 487 * @tx_ring: ring to be cleaned 488 **/ 489 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) 490 { 491 struct igbvf_adapter *adapter = tx_ring->adapter; 492 struct igbvf_buffer *buffer_info; 493 unsigned long size; 494 unsigned int i; 495 496 if (!tx_ring->buffer_info) 497 return; 498 499 /* Free all the Tx ring sk_buffs */ 500 for (i = 0; i < tx_ring->count; i++) { 501 buffer_info = &tx_ring->buffer_info[i]; 502 igbvf_put_txbuf(adapter, buffer_info); 503 } 504 505 size = sizeof(struct igbvf_buffer) * tx_ring->count; 506 memset(tx_ring->buffer_info, 0, size); 507 508 /* Zero out the descriptor ring */ 509 memset(tx_ring->desc, 0, tx_ring->size); 510 511 tx_ring->next_to_use = 0; 512 tx_ring->next_to_clean = 0; 513 514 writel(0, adapter->hw.hw_addr + tx_ring->head); 515 writel(0, adapter->hw.hw_addr + tx_ring->tail); 516 } 517 518 /** 519 * igbvf_free_tx_resources - Free Tx Resources per Queue 520 * @tx_ring: ring to free resources from 521 * 522 * Free all transmit software resources 523 **/ 524 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) 525 { 526 struct pci_dev *pdev = tx_ring->adapter->pdev; 527 528 igbvf_clean_tx_ring(tx_ring); 529 530 vfree(tx_ring->buffer_info); 531 tx_ring->buffer_info = NULL; 532 533 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 534 tx_ring->dma); 535 536 tx_ring->desc = NULL; 537 } 538 539 /** 540 * igbvf_clean_rx_ring - Free Rx Buffers per Queue 541 * @adapter: board private structure 542 **/ 543 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) 544 { 545 struct igbvf_adapter *adapter = rx_ring->adapter; 546 struct igbvf_buffer *buffer_info; 547 struct pci_dev *pdev = adapter->pdev; 548 unsigned long size; 549 unsigned int i; 550 551 if (!rx_ring->buffer_info) 552 return; 553 554 /* Free all the Rx ring sk_buffs */ 555 for (i = 0; i < rx_ring->count; i++) { 556 buffer_info = &rx_ring->buffer_info[i]; 557 if (buffer_info->dma) { 558 if (adapter->rx_ps_hdr_size){ 559 dma_unmap_single(&pdev->dev, buffer_info->dma, 560 adapter->rx_ps_hdr_size, 561 DMA_FROM_DEVICE); 562 } else { 563 dma_unmap_single(&pdev->dev, buffer_info->dma, 564 adapter->rx_buffer_len, 565 DMA_FROM_DEVICE); 566 } 567 buffer_info->dma = 0; 568 } 569 570 if (buffer_info->skb) { 571 dev_kfree_skb(buffer_info->skb); 572 buffer_info->skb = NULL; 573 } 574 575 if (buffer_info->page) { 576 if (buffer_info->page_dma) 577 dma_unmap_page(&pdev->dev, 578 buffer_info->page_dma, 579 PAGE_SIZE / 2, 580 DMA_FROM_DEVICE); 581 put_page(buffer_info->page); 582 buffer_info->page = NULL; 583 buffer_info->page_dma = 0; 584 buffer_info->page_offset = 0; 585 } 586 } 587 588 size = sizeof(struct igbvf_buffer) * rx_ring->count; 589 memset(rx_ring->buffer_info, 0, size); 590 591 /* Zero out the descriptor ring */ 592 memset(rx_ring->desc, 0, rx_ring->size); 593 594 rx_ring->next_to_clean = 0; 595 rx_ring->next_to_use = 0; 596 597 writel(0, adapter->hw.hw_addr + rx_ring->head); 598 writel(0, adapter->hw.hw_addr + rx_ring->tail); 599 } 600 601 /** 602 * igbvf_free_rx_resources - Free Rx Resources 603 * @rx_ring: ring to clean the resources from 604 * 605 * Free all receive software resources 606 **/ 607 608 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) 609 { 610 struct pci_dev *pdev = rx_ring->adapter->pdev; 611 612 igbvf_clean_rx_ring(rx_ring); 613 614 vfree(rx_ring->buffer_info); 615 rx_ring->buffer_info = NULL; 616 617 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 618 rx_ring->dma); 619 rx_ring->desc = NULL; 620 } 621 622 /** 623 * igbvf_update_itr - update the dynamic ITR value based on statistics 624 * @adapter: pointer to adapter 625 * @itr_setting: current adapter->itr 626 * @packets: the number of packets during this measurement interval 627 * @bytes: the number of bytes during this measurement interval 628 * 629 * Stores a new ITR value based on packets and byte 630 * counts during the last interrupt. The advantage of per interrupt 631 * computation is faster updates and more accurate ITR for the current 632 * traffic pattern. Constants in this function were computed 633 * based on theoretical maximum wire speed and thresholds were set based 634 * on testing data as well as attempting to minimize response time 635 * while increasing bulk throughput. 636 **/ 637 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, 638 enum latency_range itr_setting, 639 int packets, int bytes) 640 { 641 enum latency_range retval = itr_setting; 642 643 if (packets == 0) 644 goto update_itr_done; 645 646 switch (itr_setting) { 647 case lowest_latency: 648 /* handle TSO and jumbo frames */ 649 if (bytes/packets > 8000) 650 retval = bulk_latency; 651 else if ((packets < 5) && (bytes > 512)) 652 retval = low_latency; 653 break; 654 case low_latency: /* 50 usec aka 20000 ints/s */ 655 if (bytes > 10000) { 656 /* this if handles the TSO accounting */ 657 if (bytes/packets > 8000) 658 retval = bulk_latency; 659 else if ((packets < 10) || ((bytes/packets) > 1200)) 660 retval = bulk_latency; 661 else if ((packets > 35)) 662 retval = lowest_latency; 663 } else if (bytes/packets > 2000) { 664 retval = bulk_latency; 665 } else if (packets <= 2 && bytes < 512) { 666 retval = lowest_latency; 667 } 668 break; 669 case bulk_latency: /* 250 usec aka 4000 ints/s */ 670 if (bytes > 25000) { 671 if (packets > 35) 672 retval = low_latency; 673 } else if (bytes < 6000) { 674 retval = low_latency; 675 } 676 break; 677 default: 678 break; 679 } 680 681 update_itr_done: 682 return retval; 683 } 684 685 static int igbvf_range_to_itr(enum latency_range current_range) 686 { 687 int new_itr; 688 689 switch (current_range) { 690 /* counts and packets in update_itr are dependent on these numbers */ 691 case lowest_latency: 692 new_itr = IGBVF_70K_ITR; 693 break; 694 case low_latency: 695 new_itr = IGBVF_20K_ITR; 696 break; 697 case bulk_latency: 698 new_itr = IGBVF_4K_ITR; 699 break; 700 default: 701 new_itr = IGBVF_START_ITR; 702 break; 703 } 704 return new_itr; 705 } 706 707 static void igbvf_set_itr(struct igbvf_adapter *adapter) 708 { 709 u32 new_itr; 710 711 adapter->tx_ring->itr_range = 712 igbvf_update_itr(adapter, 713 adapter->tx_ring->itr_val, 714 adapter->total_tx_packets, 715 adapter->total_tx_bytes); 716 717 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 718 if (adapter->requested_itr == 3 && 719 adapter->tx_ring->itr_range == lowest_latency) 720 adapter->tx_ring->itr_range = low_latency; 721 722 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); 723 724 725 if (new_itr != adapter->tx_ring->itr_val) { 726 u32 current_itr = adapter->tx_ring->itr_val; 727 /* 728 * this attempts to bias the interrupt rate towards Bulk 729 * by adding intermediate steps when interrupt rate is 730 * increasing 731 */ 732 new_itr = new_itr > current_itr ? 733 min(current_itr + (new_itr >> 2), new_itr) : 734 new_itr; 735 adapter->tx_ring->itr_val = new_itr; 736 737 adapter->tx_ring->set_itr = 1; 738 } 739 740 adapter->rx_ring->itr_range = 741 igbvf_update_itr(adapter, adapter->rx_ring->itr_val, 742 adapter->total_rx_packets, 743 adapter->total_rx_bytes); 744 if (adapter->requested_itr == 3 && 745 adapter->rx_ring->itr_range == lowest_latency) 746 adapter->rx_ring->itr_range = low_latency; 747 748 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); 749 750 if (new_itr != adapter->rx_ring->itr_val) { 751 u32 current_itr = adapter->rx_ring->itr_val; 752 new_itr = new_itr > current_itr ? 753 min(current_itr + (new_itr >> 2), new_itr) : 754 new_itr; 755 adapter->rx_ring->itr_val = new_itr; 756 757 adapter->rx_ring->set_itr = 1; 758 } 759 } 760 761 /** 762 * igbvf_clean_tx_irq - Reclaim resources after transmit completes 763 * @adapter: board private structure 764 * returns true if ring is completely cleaned 765 **/ 766 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 767 { 768 struct igbvf_adapter *adapter = tx_ring->adapter; 769 struct net_device *netdev = adapter->netdev; 770 struct igbvf_buffer *buffer_info; 771 struct sk_buff *skb; 772 union e1000_adv_tx_desc *tx_desc, *eop_desc; 773 unsigned int total_bytes = 0, total_packets = 0; 774 unsigned int i, eop, count = 0; 775 bool cleaned = false; 776 777 i = tx_ring->next_to_clean; 778 eop = tx_ring->buffer_info[i].next_to_watch; 779 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 780 781 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 782 (count < tx_ring->count)) { 783 rmb(); /* read buffer_info after eop_desc status */ 784 for (cleaned = false; !cleaned; count++) { 785 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 786 buffer_info = &tx_ring->buffer_info[i]; 787 cleaned = (i == eop); 788 skb = buffer_info->skb; 789 790 if (skb) { 791 unsigned int segs, bytecount; 792 793 /* gso_segs is currently only valid for tcp */ 794 segs = skb_shinfo(skb)->gso_segs ?: 1; 795 /* multiply data chunks by size of headers */ 796 bytecount = ((segs - 1) * skb_headlen(skb)) + 797 skb->len; 798 total_packets += segs; 799 total_bytes += bytecount; 800 } 801 802 igbvf_put_txbuf(adapter, buffer_info); 803 tx_desc->wb.status = 0; 804 805 i++; 806 if (i == tx_ring->count) 807 i = 0; 808 } 809 eop = tx_ring->buffer_info[i].next_to_watch; 810 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 811 } 812 813 tx_ring->next_to_clean = i; 814 815 if (unlikely(count && 816 netif_carrier_ok(netdev) && 817 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { 818 /* Make sure that anybody stopping the queue after this 819 * sees the new next_to_clean. 820 */ 821 smp_mb(); 822 if (netif_queue_stopped(netdev) && 823 !(test_bit(__IGBVF_DOWN, &adapter->state))) { 824 netif_wake_queue(netdev); 825 ++adapter->restart_queue; 826 } 827 } 828 829 adapter->net_stats.tx_bytes += total_bytes; 830 adapter->net_stats.tx_packets += total_packets; 831 return count < tx_ring->count; 832 } 833 834 static irqreturn_t igbvf_msix_other(int irq, void *data) 835 { 836 struct net_device *netdev = data; 837 struct igbvf_adapter *adapter = netdev_priv(netdev); 838 struct e1000_hw *hw = &adapter->hw; 839 840 adapter->int_counter1++; 841 842 netif_carrier_off(netdev); 843 hw->mac.get_link_status = 1; 844 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 845 mod_timer(&adapter->watchdog_timer, jiffies + 1); 846 847 ew32(EIMS, adapter->eims_other); 848 849 return IRQ_HANDLED; 850 } 851 852 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) 853 { 854 struct net_device *netdev = data; 855 struct igbvf_adapter *adapter = netdev_priv(netdev); 856 struct e1000_hw *hw = &adapter->hw; 857 struct igbvf_ring *tx_ring = adapter->tx_ring; 858 859 if (tx_ring->set_itr) { 860 writel(tx_ring->itr_val, 861 adapter->hw.hw_addr + tx_ring->itr_register); 862 adapter->tx_ring->set_itr = 0; 863 } 864 865 adapter->total_tx_bytes = 0; 866 adapter->total_tx_packets = 0; 867 868 /* auto mask will automatically reenable the interrupt when we write 869 * EICS */ 870 if (!igbvf_clean_tx_irq(tx_ring)) 871 /* Ring was not completely cleaned, so fire another interrupt */ 872 ew32(EICS, tx_ring->eims_value); 873 else 874 ew32(EIMS, tx_ring->eims_value); 875 876 return IRQ_HANDLED; 877 } 878 879 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) 880 { 881 struct net_device *netdev = data; 882 struct igbvf_adapter *adapter = netdev_priv(netdev); 883 884 adapter->int_counter0++; 885 886 /* Write the ITR value calculated at the end of the 887 * previous interrupt. 888 */ 889 if (adapter->rx_ring->set_itr) { 890 writel(adapter->rx_ring->itr_val, 891 adapter->hw.hw_addr + adapter->rx_ring->itr_register); 892 adapter->rx_ring->set_itr = 0; 893 } 894 895 if (napi_schedule_prep(&adapter->rx_ring->napi)) { 896 adapter->total_rx_bytes = 0; 897 adapter->total_rx_packets = 0; 898 __napi_schedule(&adapter->rx_ring->napi); 899 } 900 901 return IRQ_HANDLED; 902 } 903 904 #define IGBVF_NO_QUEUE -1 905 906 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, 907 int tx_queue, int msix_vector) 908 { 909 struct e1000_hw *hw = &adapter->hw; 910 u32 ivar, index; 911 912 /* 82576 uses a table-based method for assigning vectors. 913 Each queue has a single entry in the table to which we write 914 a vector number along with a "valid" bit. Sadly, the layout 915 of the table is somewhat counterintuitive. */ 916 if (rx_queue > IGBVF_NO_QUEUE) { 917 index = (rx_queue >> 1); 918 ivar = array_er32(IVAR0, index); 919 if (rx_queue & 0x1) { 920 /* vector goes into third byte of register */ 921 ivar = ivar & 0xFF00FFFF; 922 ivar |= (msix_vector | E1000_IVAR_VALID) << 16; 923 } else { 924 /* vector goes into low byte of register */ 925 ivar = ivar & 0xFFFFFF00; 926 ivar |= msix_vector | E1000_IVAR_VALID; 927 } 928 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; 929 array_ew32(IVAR0, index, ivar); 930 } 931 if (tx_queue > IGBVF_NO_QUEUE) { 932 index = (tx_queue >> 1); 933 ivar = array_er32(IVAR0, index); 934 if (tx_queue & 0x1) { 935 /* vector goes into high byte of register */ 936 ivar = ivar & 0x00FFFFFF; 937 ivar |= (msix_vector | E1000_IVAR_VALID) << 24; 938 } else { 939 /* vector goes into second byte of register */ 940 ivar = ivar & 0xFFFF00FF; 941 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 942 } 943 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; 944 array_ew32(IVAR0, index, ivar); 945 } 946 } 947 948 /** 949 * igbvf_configure_msix - Configure MSI-X hardware 950 * 951 * igbvf_configure_msix sets up the hardware to properly 952 * generate MSI-X interrupts. 953 **/ 954 static void igbvf_configure_msix(struct igbvf_adapter *adapter) 955 { 956 u32 tmp; 957 struct e1000_hw *hw = &adapter->hw; 958 struct igbvf_ring *tx_ring = adapter->tx_ring; 959 struct igbvf_ring *rx_ring = adapter->rx_ring; 960 int vector = 0; 961 962 adapter->eims_enable_mask = 0; 963 964 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); 965 adapter->eims_enable_mask |= tx_ring->eims_value; 966 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); 967 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); 968 adapter->eims_enable_mask |= rx_ring->eims_value; 969 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); 970 971 /* set vector for other causes, i.e. link changes */ 972 973 tmp = (vector++ | E1000_IVAR_VALID); 974 975 ew32(IVAR_MISC, tmp); 976 977 adapter->eims_enable_mask = (1 << (vector)) - 1; 978 adapter->eims_other = 1 << (vector - 1); 979 e1e_flush(); 980 } 981 982 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) 983 { 984 if (adapter->msix_entries) { 985 pci_disable_msix(adapter->pdev); 986 kfree(adapter->msix_entries); 987 adapter->msix_entries = NULL; 988 } 989 } 990 991 /** 992 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported 993 * 994 * Attempt to configure interrupts using the best available 995 * capabilities of the hardware and kernel. 996 **/ 997 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) 998 { 999 int err = -ENOMEM; 1000 int i; 1001 1002 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ 1003 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), 1004 GFP_KERNEL); 1005 if (adapter->msix_entries) { 1006 for (i = 0; i < 3; i++) 1007 adapter->msix_entries[i].entry = i; 1008 1009 err = pci_enable_msix(adapter->pdev, 1010 adapter->msix_entries, 3); 1011 } 1012 1013 if (err) { 1014 /* MSI-X failed */ 1015 dev_err(&adapter->pdev->dev, 1016 "Failed to initialize MSI-X interrupts.\n"); 1017 igbvf_reset_interrupt_capability(adapter); 1018 } 1019 } 1020 1021 /** 1022 * igbvf_request_msix - Initialize MSI-X interrupts 1023 * 1024 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the 1025 * kernel. 1026 **/ 1027 static int igbvf_request_msix(struct igbvf_adapter *adapter) 1028 { 1029 struct net_device *netdev = adapter->netdev; 1030 int err = 0, vector = 0; 1031 1032 if (strlen(netdev->name) < (IFNAMSIZ - 5)) { 1033 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); 1034 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); 1035 } else { 1036 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1037 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1038 } 1039 1040 err = request_irq(adapter->msix_entries[vector].vector, 1041 igbvf_intr_msix_tx, 0, adapter->tx_ring->name, 1042 netdev); 1043 if (err) 1044 goto out; 1045 1046 adapter->tx_ring->itr_register = E1000_EITR(vector); 1047 adapter->tx_ring->itr_val = adapter->current_itr; 1048 vector++; 1049 1050 err = request_irq(adapter->msix_entries[vector].vector, 1051 igbvf_intr_msix_rx, 0, adapter->rx_ring->name, 1052 netdev); 1053 if (err) 1054 goto out; 1055 1056 adapter->rx_ring->itr_register = E1000_EITR(vector); 1057 adapter->rx_ring->itr_val = adapter->current_itr; 1058 vector++; 1059 1060 err = request_irq(adapter->msix_entries[vector].vector, 1061 igbvf_msix_other, 0, netdev->name, netdev); 1062 if (err) 1063 goto out; 1064 1065 igbvf_configure_msix(adapter); 1066 return 0; 1067 out: 1068 return err; 1069 } 1070 1071 /** 1072 * igbvf_alloc_queues - Allocate memory for all rings 1073 * @adapter: board private structure to initialize 1074 **/ 1075 static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) 1076 { 1077 struct net_device *netdev = adapter->netdev; 1078 1079 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1080 if (!adapter->tx_ring) 1081 return -ENOMEM; 1082 1083 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1084 if (!adapter->rx_ring) { 1085 kfree(adapter->tx_ring); 1086 return -ENOMEM; 1087 } 1088 1089 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); 1090 1091 return 0; 1092 } 1093 1094 /** 1095 * igbvf_request_irq - initialize interrupts 1096 * 1097 * Attempts to configure interrupts using the best available 1098 * capabilities of the hardware and kernel. 1099 **/ 1100 static int igbvf_request_irq(struct igbvf_adapter *adapter) 1101 { 1102 int err = -1; 1103 1104 /* igbvf supports msi-x only */ 1105 if (adapter->msix_entries) 1106 err = igbvf_request_msix(adapter); 1107 1108 if (!err) 1109 return err; 1110 1111 dev_err(&adapter->pdev->dev, 1112 "Unable to allocate interrupt, Error: %d\n", err); 1113 1114 return err; 1115 } 1116 1117 static void igbvf_free_irq(struct igbvf_adapter *adapter) 1118 { 1119 struct net_device *netdev = adapter->netdev; 1120 int vector; 1121 1122 if (adapter->msix_entries) { 1123 for (vector = 0; vector < 3; vector++) 1124 free_irq(adapter->msix_entries[vector].vector, netdev); 1125 } 1126 } 1127 1128 /** 1129 * igbvf_irq_disable - Mask off interrupt generation on the NIC 1130 **/ 1131 static void igbvf_irq_disable(struct igbvf_adapter *adapter) 1132 { 1133 struct e1000_hw *hw = &adapter->hw; 1134 1135 ew32(EIMC, ~0); 1136 1137 if (adapter->msix_entries) 1138 ew32(EIAC, 0); 1139 } 1140 1141 /** 1142 * igbvf_irq_enable - Enable default interrupt generation settings 1143 **/ 1144 static void igbvf_irq_enable(struct igbvf_adapter *adapter) 1145 { 1146 struct e1000_hw *hw = &adapter->hw; 1147 1148 ew32(EIAC, adapter->eims_enable_mask); 1149 ew32(EIAM, adapter->eims_enable_mask); 1150 ew32(EIMS, adapter->eims_enable_mask); 1151 } 1152 1153 /** 1154 * igbvf_poll - NAPI Rx polling callback 1155 * @napi: struct associated with this polling callback 1156 * @budget: amount of packets driver is allowed to process this poll 1157 **/ 1158 static int igbvf_poll(struct napi_struct *napi, int budget) 1159 { 1160 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); 1161 struct igbvf_adapter *adapter = rx_ring->adapter; 1162 struct e1000_hw *hw = &adapter->hw; 1163 int work_done = 0; 1164 1165 igbvf_clean_rx_irq(adapter, &work_done, budget); 1166 1167 /* If not enough Rx work done, exit the polling mode */ 1168 if (work_done < budget) { 1169 napi_complete(napi); 1170 1171 if (adapter->requested_itr & 3) 1172 igbvf_set_itr(adapter); 1173 1174 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1175 ew32(EIMS, adapter->rx_ring->eims_value); 1176 } 1177 1178 return work_done; 1179 } 1180 1181 /** 1182 * igbvf_set_rlpml - set receive large packet maximum length 1183 * @adapter: board private structure 1184 * 1185 * Configure the maximum size of packets that will be received 1186 */ 1187 static void igbvf_set_rlpml(struct igbvf_adapter *adapter) 1188 { 1189 int max_frame_size; 1190 struct e1000_hw *hw = &adapter->hw; 1191 1192 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; 1193 e1000_rlpml_set_vf(hw, max_frame_size); 1194 } 1195 1196 static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1197 { 1198 struct igbvf_adapter *adapter = netdev_priv(netdev); 1199 struct e1000_hw *hw = &adapter->hw; 1200 1201 if (hw->mac.ops.set_vfta(hw, vid, true)) { 1202 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); 1203 return -EINVAL; 1204 } 1205 set_bit(vid, adapter->active_vlans); 1206 return 0; 1207 } 1208 1209 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1210 { 1211 struct igbvf_adapter *adapter = netdev_priv(netdev); 1212 struct e1000_hw *hw = &adapter->hw; 1213 1214 if (hw->mac.ops.set_vfta(hw, vid, false)) { 1215 dev_err(&adapter->pdev->dev, 1216 "Failed to remove vlan id %d\n", vid); 1217 return -EINVAL; 1218 } 1219 clear_bit(vid, adapter->active_vlans); 1220 return 0; 1221 } 1222 1223 static void igbvf_restore_vlan(struct igbvf_adapter *adapter) 1224 { 1225 u16 vid; 1226 1227 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1228 igbvf_vlan_rx_add_vid(adapter->netdev, vid); 1229 } 1230 1231 /** 1232 * igbvf_configure_tx - Configure Transmit Unit after Reset 1233 * @adapter: board private structure 1234 * 1235 * Configure the Tx unit of the MAC after a reset. 1236 **/ 1237 static void igbvf_configure_tx(struct igbvf_adapter *adapter) 1238 { 1239 struct e1000_hw *hw = &adapter->hw; 1240 struct igbvf_ring *tx_ring = adapter->tx_ring; 1241 u64 tdba; 1242 u32 txdctl, dca_txctrl; 1243 1244 /* disable transmits */ 1245 txdctl = er32(TXDCTL(0)); 1246 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1247 e1e_flush(); 1248 msleep(10); 1249 1250 /* Setup the HW Tx Head and Tail descriptor pointers */ 1251 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); 1252 tdba = tx_ring->dma; 1253 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); 1254 ew32(TDBAH(0), (tdba >> 32)); 1255 ew32(TDH(0), 0); 1256 ew32(TDT(0), 0); 1257 tx_ring->head = E1000_TDH(0); 1258 tx_ring->tail = E1000_TDT(0); 1259 1260 /* Turn off Relaxed Ordering on head write-backs. The writebacks 1261 * MUST be delivered in order or it will completely screw up 1262 * our bookeeping. 1263 */ 1264 dca_txctrl = er32(DCA_TXCTRL(0)); 1265 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1266 ew32(DCA_TXCTRL(0), dca_txctrl); 1267 1268 /* enable transmits */ 1269 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1270 ew32(TXDCTL(0), txdctl); 1271 1272 /* Setup Transmit Descriptor Settings for eop descriptor */ 1273 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; 1274 1275 /* enable Report Status bit */ 1276 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; 1277 } 1278 1279 /** 1280 * igbvf_setup_srrctl - configure the receive control registers 1281 * @adapter: Board private structure 1282 **/ 1283 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) 1284 { 1285 struct e1000_hw *hw = &adapter->hw; 1286 u32 srrctl = 0; 1287 1288 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | 1289 E1000_SRRCTL_BSIZEHDR_MASK | 1290 E1000_SRRCTL_BSIZEPKT_MASK); 1291 1292 /* Enable queue drop to avoid head of line blocking */ 1293 srrctl |= E1000_SRRCTL_DROP_EN; 1294 1295 /* Setup buffer sizes */ 1296 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> 1297 E1000_SRRCTL_BSIZEPKT_SHIFT; 1298 1299 if (adapter->rx_buffer_len < 2048) { 1300 adapter->rx_ps_hdr_size = 0; 1301 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 1302 } else { 1303 adapter->rx_ps_hdr_size = 128; 1304 srrctl |= adapter->rx_ps_hdr_size << 1305 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 1306 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1307 } 1308 1309 ew32(SRRCTL(0), srrctl); 1310 } 1311 1312 /** 1313 * igbvf_configure_rx - Configure Receive Unit after Reset 1314 * @adapter: board private structure 1315 * 1316 * Configure the Rx unit of the MAC after a reset. 1317 **/ 1318 static void igbvf_configure_rx(struct igbvf_adapter *adapter) 1319 { 1320 struct e1000_hw *hw = &adapter->hw; 1321 struct igbvf_ring *rx_ring = adapter->rx_ring; 1322 u64 rdba; 1323 u32 rdlen, rxdctl; 1324 1325 /* disable receives */ 1326 rxdctl = er32(RXDCTL(0)); 1327 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1328 e1e_flush(); 1329 msleep(10); 1330 1331 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1332 1333 /* 1334 * Setup the HW Rx Head and Tail Descriptor Pointers and 1335 * the Base and Length of the Rx Descriptor Ring 1336 */ 1337 rdba = rx_ring->dma; 1338 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); 1339 ew32(RDBAH(0), (rdba >> 32)); 1340 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); 1341 rx_ring->head = E1000_RDH(0); 1342 rx_ring->tail = E1000_RDT(0); 1343 ew32(RDH(0), 0); 1344 ew32(RDT(0), 0); 1345 1346 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 1347 rxdctl &= 0xFFF00000; 1348 rxdctl |= IGBVF_RX_PTHRESH; 1349 rxdctl |= IGBVF_RX_HTHRESH << 8; 1350 rxdctl |= IGBVF_RX_WTHRESH << 16; 1351 1352 igbvf_set_rlpml(adapter); 1353 1354 /* enable receives */ 1355 ew32(RXDCTL(0), rxdctl); 1356 } 1357 1358 /** 1359 * igbvf_set_multi - Multicast and Promiscuous mode set 1360 * @netdev: network interface device structure 1361 * 1362 * The set_multi entry point is called whenever the multicast address 1363 * list or the network interface flags are updated. This routine is 1364 * responsible for configuring the hardware for proper multicast, 1365 * promiscuous mode, and all-multi behavior. 1366 **/ 1367 static void igbvf_set_multi(struct net_device *netdev) 1368 { 1369 struct igbvf_adapter *adapter = netdev_priv(netdev); 1370 struct e1000_hw *hw = &adapter->hw; 1371 struct netdev_hw_addr *ha; 1372 u8 *mta_list = NULL; 1373 int i; 1374 1375 if (!netdev_mc_empty(netdev)) { 1376 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); 1377 if (!mta_list) { 1378 dev_err(&adapter->pdev->dev, 1379 "failed to allocate multicast filter list\n"); 1380 return; 1381 } 1382 } 1383 1384 /* prepare a packed array of only addresses. */ 1385 i = 0; 1386 netdev_for_each_mc_addr(ha, netdev) 1387 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 1388 1389 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); 1390 kfree(mta_list); 1391 } 1392 1393 /** 1394 * igbvf_configure - configure the hardware for Rx and Tx 1395 * @adapter: private board structure 1396 **/ 1397 static void igbvf_configure(struct igbvf_adapter *adapter) 1398 { 1399 igbvf_set_multi(adapter->netdev); 1400 1401 igbvf_restore_vlan(adapter); 1402 1403 igbvf_configure_tx(adapter); 1404 igbvf_setup_srrctl(adapter); 1405 igbvf_configure_rx(adapter); 1406 igbvf_alloc_rx_buffers(adapter->rx_ring, 1407 igbvf_desc_unused(adapter->rx_ring)); 1408 } 1409 1410 /* igbvf_reset - bring the hardware into a known good state 1411 * 1412 * This function boots the hardware and enables some settings that 1413 * require a configuration cycle of the hardware - those cannot be 1414 * set/changed during runtime. After reset the device needs to be 1415 * properly configured for Rx, Tx etc. 1416 */ 1417 static void igbvf_reset(struct igbvf_adapter *adapter) 1418 { 1419 struct e1000_mac_info *mac = &adapter->hw.mac; 1420 struct net_device *netdev = adapter->netdev; 1421 struct e1000_hw *hw = &adapter->hw; 1422 1423 /* Allow time for pending master requests to run */ 1424 if (mac->ops.reset_hw(hw)) 1425 dev_err(&adapter->pdev->dev, "PF still resetting\n"); 1426 1427 mac->ops.init_hw(hw); 1428 1429 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1430 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1431 netdev->addr_len); 1432 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1433 netdev->addr_len); 1434 } 1435 1436 adapter->last_reset = jiffies; 1437 } 1438 1439 int igbvf_up(struct igbvf_adapter *adapter) 1440 { 1441 struct e1000_hw *hw = &adapter->hw; 1442 1443 /* hardware has been reset, we need to reload some things */ 1444 igbvf_configure(adapter); 1445 1446 clear_bit(__IGBVF_DOWN, &adapter->state); 1447 1448 napi_enable(&adapter->rx_ring->napi); 1449 if (adapter->msix_entries) 1450 igbvf_configure_msix(adapter); 1451 1452 /* Clear any pending interrupts. */ 1453 er32(EICR); 1454 igbvf_irq_enable(adapter); 1455 1456 /* start the watchdog */ 1457 hw->mac.get_link_status = 1; 1458 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1459 1460 1461 return 0; 1462 } 1463 1464 void igbvf_down(struct igbvf_adapter *adapter) 1465 { 1466 struct net_device *netdev = adapter->netdev; 1467 struct e1000_hw *hw = &adapter->hw; 1468 u32 rxdctl, txdctl; 1469 1470 /* 1471 * signal that we're down so the interrupt handler does not 1472 * reschedule our watchdog timer 1473 */ 1474 set_bit(__IGBVF_DOWN, &adapter->state); 1475 1476 /* disable receives in the hardware */ 1477 rxdctl = er32(RXDCTL(0)); 1478 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1479 1480 netif_stop_queue(netdev); 1481 1482 /* disable transmits in the hardware */ 1483 txdctl = er32(TXDCTL(0)); 1484 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1485 1486 /* flush both disables and wait for them to finish */ 1487 e1e_flush(); 1488 msleep(10); 1489 1490 napi_disable(&adapter->rx_ring->napi); 1491 1492 igbvf_irq_disable(adapter); 1493 1494 del_timer_sync(&adapter->watchdog_timer); 1495 1496 netif_carrier_off(netdev); 1497 1498 /* record the stats before reset*/ 1499 igbvf_update_stats(adapter); 1500 1501 adapter->link_speed = 0; 1502 adapter->link_duplex = 0; 1503 1504 igbvf_reset(adapter); 1505 igbvf_clean_tx_ring(adapter->tx_ring); 1506 igbvf_clean_rx_ring(adapter->rx_ring); 1507 } 1508 1509 void igbvf_reinit_locked(struct igbvf_adapter *adapter) 1510 { 1511 might_sleep(); 1512 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 1513 msleep(1); 1514 igbvf_down(adapter); 1515 igbvf_up(adapter); 1516 clear_bit(__IGBVF_RESETTING, &adapter->state); 1517 } 1518 1519 /** 1520 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) 1521 * @adapter: board private structure to initialize 1522 * 1523 * igbvf_sw_init initializes the Adapter private data structure. 1524 * Fields are initialized based on PCI device information and 1525 * OS network device settings (MTU size). 1526 **/ 1527 static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) 1528 { 1529 struct net_device *netdev = adapter->netdev; 1530 s32 rc; 1531 1532 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 1533 adapter->rx_ps_hdr_size = 0; 1534 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1535 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1536 1537 adapter->tx_int_delay = 8; 1538 adapter->tx_abs_int_delay = 32; 1539 adapter->rx_int_delay = 0; 1540 adapter->rx_abs_int_delay = 8; 1541 adapter->requested_itr = 3; 1542 adapter->current_itr = IGBVF_START_ITR; 1543 1544 /* Set various function pointers */ 1545 adapter->ei->init_ops(&adapter->hw); 1546 1547 rc = adapter->hw.mac.ops.init_params(&adapter->hw); 1548 if (rc) 1549 return rc; 1550 1551 rc = adapter->hw.mbx.ops.init_params(&adapter->hw); 1552 if (rc) 1553 return rc; 1554 1555 igbvf_set_interrupt_capability(adapter); 1556 1557 if (igbvf_alloc_queues(adapter)) 1558 return -ENOMEM; 1559 1560 spin_lock_init(&adapter->tx_queue_lock); 1561 1562 /* Explicitly disable IRQ since the NIC can be in any state. */ 1563 igbvf_irq_disable(adapter); 1564 1565 spin_lock_init(&adapter->stats_lock); 1566 1567 set_bit(__IGBVF_DOWN, &adapter->state); 1568 return 0; 1569 } 1570 1571 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) 1572 { 1573 struct e1000_hw *hw = &adapter->hw; 1574 1575 adapter->stats.last_gprc = er32(VFGPRC); 1576 adapter->stats.last_gorc = er32(VFGORC); 1577 adapter->stats.last_gptc = er32(VFGPTC); 1578 adapter->stats.last_gotc = er32(VFGOTC); 1579 adapter->stats.last_mprc = er32(VFMPRC); 1580 adapter->stats.last_gotlbc = er32(VFGOTLBC); 1581 adapter->stats.last_gptlbc = er32(VFGPTLBC); 1582 adapter->stats.last_gorlbc = er32(VFGORLBC); 1583 adapter->stats.last_gprlbc = er32(VFGPRLBC); 1584 1585 adapter->stats.base_gprc = er32(VFGPRC); 1586 adapter->stats.base_gorc = er32(VFGORC); 1587 adapter->stats.base_gptc = er32(VFGPTC); 1588 adapter->stats.base_gotc = er32(VFGOTC); 1589 adapter->stats.base_mprc = er32(VFMPRC); 1590 adapter->stats.base_gotlbc = er32(VFGOTLBC); 1591 adapter->stats.base_gptlbc = er32(VFGPTLBC); 1592 adapter->stats.base_gorlbc = er32(VFGORLBC); 1593 adapter->stats.base_gprlbc = er32(VFGPRLBC); 1594 } 1595 1596 /** 1597 * igbvf_open - Called when a network interface is made active 1598 * @netdev: network interface device structure 1599 * 1600 * Returns 0 on success, negative value on failure 1601 * 1602 * The open entry point is called when a network interface is made 1603 * active by the system (IFF_UP). At this point all resources needed 1604 * for transmit and receive operations are allocated, the interrupt 1605 * handler is registered with the OS, the watchdog timer is started, 1606 * and the stack is notified that the interface is ready. 1607 **/ 1608 static int igbvf_open(struct net_device *netdev) 1609 { 1610 struct igbvf_adapter *adapter = netdev_priv(netdev); 1611 struct e1000_hw *hw = &adapter->hw; 1612 int err; 1613 1614 /* disallow open during test */ 1615 if (test_bit(__IGBVF_TESTING, &adapter->state)) 1616 return -EBUSY; 1617 1618 /* allocate transmit descriptors */ 1619 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); 1620 if (err) 1621 goto err_setup_tx; 1622 1623 /* allocate receive descriptors */ 1624 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); 1625 if (err) 1626 goto err_setup_rx; 1627 1628 /* 1629 * before we allocate an interrupt, we must be ready to handle it. 1630 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1631 * as soon as we call pci_request_irq, so we have to setup our 1632 * clean_rx handler before we do so. 1633 */ 1634 igbvf_configure(adapter); 1635 1636 err = igbvf_request_irq(adapter); 1637 if (err) 1638 goto err_req_irq; 1639 1640 /* From here on the code is the same as igbvf_up() */ 1641 clear_bit(__IGBVF_DOWN, &adapter->state); 1642 1643 napi_enable(&adapter->rx_ring->napi); 1644 1645 /* clear any pending interrupts */ 1646 er32(EICR); 1647 1648 igbvf_irq_enable(adapter); 1649 1650 /* start the watchdog */ 1651 hw->mac.get_link_status = 1; 1652 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1653 1654 return 0; 1655 1656 err_req_irq: 1657 igbvf_free_rx_resources(adapter->rx_ring); 1658 err_setup_rx: 1659 igbvf_free_tx_resources(adapter->tx_ring); 1660 err_setup_tx: 1661 igbvf_reset(adapter); 1662 1663 return err; 1664 } 1665 1666 /** 1667 * igbvf_close - Disables a network interface 1668 * @netdev: network interface device structure 1669 * 1670 * Returns 0, this is not allowed to fail 1671 * 1672 * The close entry point is called when an interface is de-activated 1673 * by the OS. The hardware is still under the drivers control, but 1674 * needs to be disabled. A global MAC reset is issued to stop the 1675 * hardware, and all transmit and receive resources are freed. 1676 **/ 1677 static int igbvf_close(struct net_device *netdev) 1678 { 1679 struct igbvf_adapter *adapter = netdev_priv(netdev); 1680 1681 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 1682 igbvf_down(adapter); 1683 1684 igbvf_free_irq(adapter); 1685 1686 igbvf_free_tx_resources(adapter->tx_ring); 1687 igbvf_free_rx_resources(adapter->rx_ring); 1688 1689 return 0; 1690 } 1691 /** 1692 * igbvf_set_mac - Change the Ethernet Address of the NIC 1693 * @netdev: network interface device structure 1694 * @p: pointer to an address structure 1695 * 1696 * Returns 0 on success, negative on failure 1697 **/ 1698 static int igbvf_set_mac(struct net_device *netdev, void *p) 1699 { 1700 struct igbvf_adapter *adapter = netdev_priv(netdev); 1701 struct e1000_hw *hw = &adapter->hw; 1702 struct sockaddr *addr = p; 1703 1704 if (!is_valid_ether_addr(addr->sa_data)) 1705 return -EADDRNOTAVAIL; 1706 1707 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 1708 1709 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 1710 1711 if (memcmp(addr->sa_data, hw->mac.addr, 6)) 1712 return -EADDRNOTAVAIL; 1713 1714 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1715 1716 return 0; 1717 } 1718 1719 #define UPDATE_VF_COUNTER(reg, name) \ 1720 { \ 1721 u32 current_counter = er32(reg); \ 1722 if (current_counter < adapter->stats.last_##name) \ 1723 adapter->stats.name += 0x100000000LL; \ 1724 adapter->stats.last_##name = current_counter; \ 1725 adapter->stats.name &= 0xFFFFFFFF00000000LL; \ 1726 adapter->stats.name |= current_counter; \ 1727 } 1728 1729 /** 1730 * igbvf_update_stats - Update the board statistics counters 1731 * @adapter: board private structure 1732 **/ 1733 void igbvf_update_stats(struct igbvf_adapter *adapter) 1734 { 1735 struct e1000_hw *hw = &adapter->hw; 1736 struct pci_dev *pdev = adapter->pdev; 1737 1738 /* 1739 * Prevent stats update while adapter is being reset, link is down 1740 * or if the pci connection is down. 1741 */ 1742 if (adapter->link_speed == 0) 1743 return; 1744 1745 if (test_bit(__IGBVF_RESETTING, &adapter->state)) 1746 return; 1747 1748 if (pci_channel_offline(pdev)) 1749 return; 1750 1751 UPDATE_VF_COUNTER(VFGPRC, gprc); 1752 UPDATE_VF_COUNTER(VFGORC, gorc); 1753 UPDATE_VF_COUNTER(VFGPTC, gptc); 1754 UPDATE_VF_COUNTER(VFGOTC, gotc); 1755 UPDATE_VF_COUNTER(VFMPRC, mprc); 1756 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); 1757 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); 1758 UPDATE_VF_COUNTER(VFGORLBC, gorlbc); 1759 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); 1760 1761 /* Fill out the OS statistics structure */ 1762 adapter->net_stats.multicast = adapter->stats.mprc; 1763 } 1764 1765 static void igbvf_print_link_info(struct igbvf_adapter *adapter) 1766 { 1767 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", 1768 adapter->link_speed, 1769 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); 1770 } 1771 1772 static bool igbvf_has_link(struct igbvf_adapter *adapter) 1773 { 1774 struct e1000_hw *hw = &adapter->hw; 1775 s32 ret_val = E1000_SUCCESS; 1776 bool link_active; 1777 1778 /* If interface is down, stay link down */ 1779 if (test_bit(__IGBVF_DOWN, &adapter->state)) 1780 return false; 1781 1782 ret_val = hw->mac.ops.check_for_link(hw); 1783 link_active = !hw->mac.get_link_status; 1784 1785 /* if check for link returns error we will need to reset */ 1786 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) 1787 schedule_work(&adapter->reset_task); 1788 1789 return link_active; 1790 } 1791 1792 /** 1793 * igbvf_watchdog - Timer Call-back 1794 * @data: pointer to adapter cast into an unsigned long 1795 **/ 1796 static void igbvf_watchdog(unsigned long data) 1797 { 1798 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; 1799 1800 /* Do the rest outside of interrupt context */ 1801 schedule_work(&adapter->watchdog_task); 1802 } 1803 1804 static void igbvf_watchdog_task(struct work_struct *work) 1805 { 1806 struct igbvf_adapter *adapter = container_of(work, 1807 struct igbvf_adapter, 1808 watchdog_task); 1809 struct net_device *netdev = adapter->netdev; 1810 struct e1000_mac_info *mac = &adapter->hw.mac; 1811 struct igbvf_ring *tx_ring = adapter->tx_ring; 1812 struct e1000_hw *hw = &adapter->hw; 1813 u32 link; 1814 int tx_pending = 0; 1815 1816 link = igbvf_has_link(adapter); 1817 1818 if (link) { 1819 if (!netif_carrier_ok(netdev)) { 1820 mac->ops.get_link_up_info(&adapter->hw, 1821 &adapter->link_speed, 1822 &adapter->link_duplex); 1823 igbvf_print_link_info(adapter); 1824 1825 netif_carrier_on(netdev); 1826 netif_wake_queue(netdev); 1827 } 1828 } else { 1829 if (netif_carrier_ok(netdev)) { 1830 adapter->link_speed = 0; 1831 adapter->link_duplex = 0; 1832 dev_info(&adapter->pdev->dev, "Link is Down\n"); 1833 netif_carrier_off(netdev); 1834 netif_stop_queue(netdev); 1835 } 1836 } 1837 1838 if (netif_carrier_ok(netdev)) { 1839 igbvf_update_stats(adapter); 1840 } else { 1841 tx_pending = (igbvf_desc_unused(tx_ring) + 1 < 1842 tx_ring->count); 1843 if (tx_pending) { 1844 /* 1845 * We've lost link, so the controller stops DMA, 1846 * but we've got queued Tx work that's never going 1847 * to get done, so reset controller to flush Tx. 1848 * (Do the reset outside of interrupt context). 1849 */ 1850 adapter->tx_timeout_count++; 1851 schedule_work(&adapter->reset_task); 1852 } 1853 } 1854 1855 /* Cause software interrupt to ensure Rx ring is cleaned */ 1856 ew32(EICS, adapter->rx_ring->eims_value); 1857 1858 /* Reset the timer */ 1859 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1860 mod_timer(&adapter->watchdog_timer, 1861 round_jiffies(jiffies + (2 * HZ))); 1862 } 1863 1864 #define IGBVF_TX_FLAGS_CSUM 0x00000001 1865 #define IGBVF_TX_FLAGS_VLAN 0x00000002 1866 #define IGBVF_TX_FLAGS_TSO 0x00000004 1867 #define IGBVF_TX_FLAGS_IPV4 0x00000008 1868 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 1869 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 1870 1871 static int igbvf_tso(struct igbvf_adapter *adapter, 1872 struct igbvf_ring *tx_ring, 1873 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1874 { 1875 struct e1000_adv_tx_context_desc *context_desc; 1876 unsigned int i; 1877 int err; 1878 struct igbvf_buffer *buffer_info; 1879 u32 info = 0, tu_cmd = 0; 1880 u32 mss_l4len_idx, l4len; 1881 *hdr_len = 0; 1882 1883 if (skb_header_cloned(skb)) { 1884 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1885 if (err) { 1886 dev_err(&adapter->pdev->dev, 1887 "igbvf_tso returning an error\n"); 1888 return err; 1889 } 1890 } 1891 1892 l4len = tcp_hdrlen(skb); 1893 *hdr_len += l4len; 1894 1895 if (skb->protocol == htons(ETH_P_IP)) { 1896 struct iphdr *iph = ip_hdr(skb); 1897 iph->tot_len = 0; 1898 iph->check = 0; 1899 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1900 iph->daddr, 0, 1901 IPPROTO_TCP, 1902 0); 1903 } else if (skb_is_gso_v6(skb)) { 1904 ipv6_hdr(skb)->payload_len = 0; 1905 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1906 &ipv6_hdr(skb)->daddr, 1907 0, IPPROTO_TCP, 0); 1908 } 1909 1910 i = tx_ring->next_to_use; 1911 1912 buffer_info = &tx_ring->buffer_info[i]; 1913 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 1914 /* VLAN MACLEN IPLEN */ 1915 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 1916 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 1917 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 1918 *hdr_len += skb_network_offset(skb); 1919 info |= (skb_transport_header(skb) - skb_network_header(skb)); 1920 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); 1921 context_desc->vlan_macip_lens = cpu_to_le32(info); 1922 1923 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1924 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1925 1926 if (skb->protocol == htons(ETH_P_IP)) 1927 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1928 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1929 1930 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 1931 1932 /* MSS L4LEN IDX */ 1933 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 1934 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 1935 1936 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1937 context_desc->seqnum_seed = 0; 1938 1939 buffer_info->time_stamp = jiffies; 1940 buffer_info->next_to_watch = i; 1941 buffer_info->dma = 0; 1942 i++; 1943 if (i == tx_ring->count) 1944 i = 0; 1945 1946 tx_ring->next_to_use = i; 1947 1948 return true; 1949 } 1950 1951 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1952 struct igbvf_ring *tx_ring, 1953 struct sk_buff *skb, u32 tx_flags) 1954 { 1955 struct e1000_adv_tx_context_desc *context_desc; 1956 unsigned int i; 1957 struct igbvf_buffer *buffer_info; 1958 u32 info = 0, tu_cmd = 0; 1959 1960 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 1961 (tx_flags & IGBVF_TX_FLAGS_VLAN)) { 1962 i = tx_ring->next_to_use; 1963 buffer_info = &tx_ring->buffer_info[i]; 1964 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 1965 1966 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 1967 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 1968 1969 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 1970 if (skb->ip_summed == CHECKSUM_PARTIAL) 1971 info |= (skb_transport_header(skb) - 1972 skb_network_header(skb)); 1973 1974 1975 context_desc->vlan_macip_lens = cpu_to_le32(info); 1976 1977 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1978 1979 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1980 switch (skb->protocol) { 1981 case __constant_htons(ETH_P_IP): 1982 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1983 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1984 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1985 break; 1986 case __constant_htons(ETH_P_IPV6): 1987 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1988 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1989 break; 1990 default: 1991 break; 1992 } 1993 } 1994 1995 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 1996 context_desc->seqnum_seed = 0; 1997 context_desc->mss_l4len_idx = 0; 1998 1999 buffer_info->time_stamp = jiffies; 2000 buffer_info->next_to_watch = i; 2001 buffer_info->dma = 0; 2002 i++; 2003 if (i == tx_ring->count) 2004 i = 0; 2005 tx_ring->next_to_use = i; 2006 2007 return true; 2008 } 2009 2010 return false; 2011 } 2012 2013 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) 2014 { 2015 struct igbvf_adapter *adapter = netdev_priv(netdev); 2016 2017 /* there is enough descriptors then we don't need to worry */ 2018 if (igbvf_desc_unused(adapter->tx_ring) >= size) 2019 return 0; 2020 2021 netif_stop_queue(netdev); 2022 2023 smp_mb(); 2024 2025 /* We need to check again just in case room has been made available */ 2026 if (igbvf_desc_unused(adapter->tx_ring) < size) 2027 return -EBUSY; 2028 2029 netif_wake_queue(netdev); 2030 2031 ++adapter->restart_queue; 2032 return 0; 2033 } 2034 2035 #define IGBVF_MAX_TXD_PWR 16 2036 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) 2037 2038 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, 2039 struct igbvf_ring *tx_ring, 2040 struct sk_buff *skb, 2041 unsigned int first) 2042 { 2043 struct igbvf_buffer *buffer_info; 2044 struct pci_dev *pdev = adapter->pdev; 2045 unsigned int len = skb_headlen(skb); 2046 unsigned int count = 0, i; 2047 unsigned int f; 2048 2049 i = tx_ring->next_to_use; 2050 2051 buffer_info = &tx_ring->buffer_info[i]; 2052 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2053 buffer_info->length = len; 2054 /* set time_stamp *before* dma to help avoid a possible race */ 2055 buffer_info->time_stamp = jiffies; 2056 buffer_info->next_to_watch = i; 2057 buffer_info->mapped_as_page = false; 2058 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, 2059 DMA_TO_DEVICE); 2060 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2061 goto dma_error; 2062 2063 2064 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2065 const struct skb_frag_struct *frag; 2066 2067 count++; 2068 i++; 2069 if (i == tx_ring->count) 2070 i = 0; 2071 2072 frag = &skb_shinfo(skb)->frags[f]; 2073 len = skb_frag_size(frag); 2074 2075 buffer_info = &tx_ring->buffer_info[i]; 2076 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2077 buffer_info->length = len; 2078 buffer_info->time_stamp = jiffies; 2079 buffer_info->next_to_watch = i; 2080 buffer_info->mapped_as_page = true; 2081 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, 2082 DMA_TO_DEVICE); 2083 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2084 goto dma_error; 2085 } 2086 2087 tx_ring->buffer_info[i].skb = skb; 2088 tx_ring->buffer_info[first].next_to_watch = i; 2089 2090 return ++count; 2091 2092 dma_error: 2093 dev_err(&pdev->dev, "TX DMA map failed\n"); 2094 2095 /* clear timestamp and dma mappings for failed buffer_info mapping */ 2096 buffer_info->dma = 0; 2097 buffer_info->time_stamp = 0; 2098 buffer_info->length = 0; 2099 buffer_info->next_to_watch = 0; 2100 buffer_info->mapped_as_page = false; 2101 if (count) 2102 count--; 2103 2104 /* clear timestamp and dma mappings for remaining portion of packet */ 2105 while (count--) { 2106 if (i==0) 2107 i += tx_ring->count; 2108 i--; 2109 buffer_info = &tx_ring->buffer_info[i]; 2110 igbvf_put_txbuf(adapter, buffer_info); 2111 } 2112 2113 return 0; 2114 } 2115 2116 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, 2117 struct igbvf_ring *tx_ring, 2118 int tx_flags, int count, u32 paylen, 2119 u8 hdr_len) 2120 { 2121 union e1000_adv_tx_desc *tx_desc = NULL; 2122 struct igbvf_buffer *buffer_info; 2123 u32 olinfo_status = 0, cmd_type_len; 2124 unsigned int i; 2125 2126 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 2127 E1000_ADVTXD_DCMD_DEXT); 2128 2129 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 2130 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 2131 2132 if (tx_flags & IGBVF_TX_FLAGS_TSO) { 2133 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 2134 2135 /* insert tcp checksum */ 2136 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2137 2138 /* insert ip checksum */ 2139 if (tx_flags & IGBVF_TX_FLAGS_IPV4) 2140 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 2141 2142 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { 2143 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2144 } 2145 2146 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 2147 2148 i = tx_ring->next_to_use; 2149 while (count--) { 2150 buffer_info = &tx_ring->buffer_info[i]; 2151 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 2152 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 2153 tx_desc->read.cmd_type_len = 2154 cpu_to_le32(cmd_type_len | buffer_info->length); 2155 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2156 i++; 2157 if (i == tx_ring->count) 2158 i = 0; 2159 } 2160 2161 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 2162 /* Force memory writes to complete before letting h/w 2163 * know there are new descriptors to fetch. (Only 2164 * applicable for weak-ordered memory model archs, 2165 * such as IA-64). */ 2166 wmb(); 2167 2168 tx_ring->next_to_use = i; 2169 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2170 /* we need this if more than one processor can write to our tail 2171 * at a time, it syncronizes IO on IA64/Altix systems */ 2172 mmiowb(); 2173 } 2174 2175 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, 2176 struct net_device *netdev, 2177 struct igbvf_ring *tx_ring) 2178 { 2179 struct igbvf_adapter *adapter = netdev_priv(netdev); 2180 unsigned int first, tx_flags = 0; 2181 u8 hdr_len = 0; 2182 int count = 0; 2183 int tso = 0; 2184 2185 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2186 dev_kfree_skb_any(skb); 2187 return NETDEV_TX_OK; 2188 } 2189 2190 if (skb->len <= 0) { 2191 dev_kfree_skb_any(skb); 2192 return NETDEV_TX_OK; 2193 } 2194 2195 /* 2196 * need: count + 4 desc gap to keep tail from touching 2197 * + 2 desc gap to keep tail from touching head, 2198 * + 1 desc for skb->data, 2199 * + 1 desc for context descriptor, 2200 * head, otherwise try next time 2201 */ 2202 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { 2203 /* this is a hard error */ 2204 return NETDEV_TX_BUSY; 2205 } 2206 2207 if (vlan_tx_tag_present(skb)) { 2208 tx_flags |= IGBVF_TX_FLAGS_VLAN; 2209 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); 2210 } 2211 2212 if (skb->protocol == htons(ETH_P_IP)) 2213 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2214 2215 first = tx_ring->next_to_use; 2216 2217 tso = skb_is_gso(skb) ? 2218 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; 2219 if (unlikely(tso < 0)) { 2220 dev_kfree_skb_any(skb); 2221 return NETDEV_TX_OK; 2222 } 2223 2224 if (tso) 2225 tx_flags |= IGBVF_TX_FLAGS_TSO; 2226 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2227 (skb->ip_summed == CHECKSUM_PARTIAL)) 2228 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2229 2230 /* 2231 * count reflects descriptors mapped, if 0 then mapping error 2232 * has occurred and we need to rewind the descriptor queue 2233 */ 2234 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); 2235 2236 if (count) { 2237 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, 2238 skb->len, hdr_len); 2239 /* Make sure there is space in the ring for the next send. */ 2240 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); 2241 } else { 2242 dev_kfree_skb_any(skb); 2243 tx_ring->buffer_info[first].time_stamp = 0; 2244 tx_ring->next_to_use = first; 2245 } 2246 2247 return NETDEV_TX_OK; 2248 } 2249 2250 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb, 2251 struct net_device *netdev) 2252 { 2253 struct igbvf_adapter *adapter = netdev_priv(netdev); 2254 struct igbvf_ring *tx_ring; 2255 2256 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2257 dev_kfree_skb_any(skb); 2258 return NETDEV_TX_OK; 2259 } 2260 2261 tx_ring = &adapter->tx_ring[0]; 2262 2263 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); 2264 } 2265 2266 /** 2267 * igbvf_tx_timeout - Respond to a Tx Hang 2268 * @netdev: network interface device structure 2269 **/ 2270 static void igbvf_tx_timeout(struct net_device *netdev) 2271 { 2272 struct igbvf_adapter *adapter = netdev_priv(netdev); 2273 2274 /* Do the reset outside of interrupt context */ 2275 adapter->tx_timeout_count++; 2276 schedule_work(&adapter->reset_task); 2277 } 2278 2279 static void igbvf_reset_task(struct work_struct *work) 2280 { 2281 struct igbvf_adapter *adapter; 2282 adapter = container_of(work, struct igbvf_adapter, reset_task); 2283 2284 igbvf_reinit_locked(adapter); 2285 } 2286 2287 /** 2288 * igbvf_get_stats - Get System Network Statistics 2289 * @netdev: network interface device structure 2290 * 2291 * Returns the address of the device statistics structure. 2292 * The statistics are actually updated from the timer callback. 2293 **/ 2294 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) 2295 { 2296 struct igbvf_adapter *adapter = netdev_priv(netdev); 2297 2298 /* only return the current stats */ 2299 return &adapter->net_stats; 2300 } 2301 2302 /** 2303 * igbvf_change_mtu - Change the Maximum Transfer Unit 2304 * @netdev: network interface device structure 2305 * @new_mtu: new value for maximum frame size 2306 * 2307 * Returns 0 on success, negative on failure 2308 **/ 2309 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) 2310 { 2311 struct igbvf_adapter *adapter = netdev_priv(netdev); 2312 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2313 2314 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { 2315 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); 2316 return -EINVAL; 2317 } 2318 2319 #define MAX_STD_JUMBO_FRAME_SIZE 9234 2320 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 2321 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 2322 return -EINVAL; 2323 } 2324 2325 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 2326 msleep(1); 2327 /* igbvf_down has a dependency on max_frame_size */ 2328 adapter->max_frame_size = max_frame; 2329 if (netif_running(netdev)) 2330 igbvf_down(adapter); 2331 2332 /* 2333 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 2334 * means we reserve 2 more, this pushes us to allocate from the next 2335 * larger slab size. 2336 * i.e. RXBUFFER_2048 --> size-4096 slab 2337 * However with the new *_jumbo_rx* routines, jumbo receives will use 2338 * fragmented skbs 2339 */ 2340 2341 if (max_frame <= 1024) 2342 adapter->rx_buffer_len = 1024; 2343 else if (max_frame <= 2048) 2344 adapter->rx_buffer_len = 2048; 2345 else 2346 #if (PAGE_SIZE / 2) > 16384 2347 adapter->rx_buffer_len = 16384; 2348 #else 2349 adapter->rx_buffer_len = PAGE_SIZE / 2; 2350 #endif 2351 2352 2353 /* adjust allocation if LPE protects us, and we aren't using SBP */ 2354 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 2355 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 2356 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + 2357 ETH_FCS_LEN; 2358 2359 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 2360 netdev->mtu, new_mtu); 2361 netdev->mtu = new_mtu; 2362 2363 if (netif_running(netdev)) 2364 igbvf_up(adapter); 2365 else 2366 igbvf_reset(adapter); 2367 2368 clear_bit(__IGBVF_RESETTING, &adapter->state); 2369 2370 return 0; 2371 } 2372 2373 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2374 { 2375 switch (cmd) { 2376 default: 2377 return -EOPNOTSUPP; 2378 } 2379 } 2380 2381 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) 2382 { 2383 struct net_device *netdev = pci_get_drvdata(pdev); 2384 struct igbvf_adapter *adapter = netdev_priv(netdev); 2385 #ifdef CONFIG_PM 2386 int retval = 0; 2387 #endif 2388 2389 netif_device_detach(netdev); 2390 2391 if (netif_running(netdev)) { 2392 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 2393 igbvf_down(adapter); 2394 igbvf_free_irq(adapter); 2395 } 2396 2397 #ifdef CONFIG_PM 2398 retval = pci_save_state(pdev); 2399 if (retval) 2400 return retval; 2401 #endif 2402 2403 pci_disable_device(pdev); 2404 2405 return 0; 2406 } 2407 2408 #ifdef CONFIG_PM 2409 static int igbvf_resume(struct pci_dev *pdev) 2410 { 2411 struct net_device *netdev = pci_get_drvdata(pdev); 2412 struct igbvf_adapter *adapter = netdev_priv(netdev); 2413 u32 err; 2414 2415 pci_restore_state(pdev); 2416 err = pci_enable_device_mem(pdev); 2417 if (err) { 2418 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 2419 return err; 2420 } 2421 2422 pci_set_master(pdev); 2423 2424 if (netif_running(netdev)) { 2425 err = igbvf_request_irq(adapter); 2426 if (err) 2427 return err; 2428 } 2429 2430 igbvf_reset(adapter); 2431 2432 if (netif_running(netdev)) 2433 igbvf_up(adapter); 2434 2435 netif_device_attach(netdev); 2436 2437 return 0; 2438 } 2439 #endif 2440 2441 static void igbvf_shutdown(struct pci_dev *pdev) 2442 { 2443 igbvf_suspend(pdev, PMSG_SUSPEND); 2444 } 2445 2446 #ifdef CONFIG_NET_POLL_CONTROLLER 2447 /* 2448 * Polling 'interrupt' - used by things like netconsole to send skbs 2449 * without having to re-enable interrupts. It's not called while 2450 * the interrupt routine is executing. 2451 */ 2452 static void igbvf_netpoll(struct net_device *netdev) 2453 { 2454 struct igbvf_adapter *adapter = netdev_priv(netdev); 2455 2456 disable_irq(adapter->pdev->irq); 2457 2458 igbvf_clean_tx_irq(adapter->tx_ring); 2459 2460 enable_irq(adapter->pdev->irq); 2461 } 2462 #endif 2463 2464 /** 2465 * igbvf_io_error_detected - called when PCI error is detected 2466 * @pdev: Pointer to PCI device 2467 * @state: The current pci connection state 2468 * 2469 * This function is called after a PCI bus error affecting 2470 * this device has been detected. 2471 */ 2472 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, 2473 pci_channel_state_t state) 2474 { 2475 struct net_device *netdev = pci_get_drvdata(pdev); 2476 struct igbvf_adapter *adapter = netdev_priv(netdev); 2477 2478 netif_device_detach(netdev); 2479 2480 if (state == pci_channel_io_perm_failure) 2481 return PCI_ERS_RESULT_DISCONNECT; 2482 2483 if (netif_running(netdev)) 2484 igbvf_down(adapter); 2485 pci_disable_device(pdev); 2486 2487 /* Request a slot slot reset. */ 2488 return PCI_ERS_RESULT_NEED_RESET; 2489 } 2490 2491 /** 2492 * igbvf_io_slot_reset - called after the pci bus has been reset. 2493 * @pdev: Pointer to PCI device 2494 * 2495 * Restart the card from scratch, as if from a cold-boot. Implementation 2496 * resembles the first-half of the igbvf_resume routine. 2497 */ 2498 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) 2499 { 2500 struct net_device *netdev = pci_get_drvdata(pdev); 2501 struct igbvf_adapter *adapter = netdev_priv(netdev); 2502 2503 if (pci_enable_device_mem(pdev)) { 2504 dev_err(&pdev->dev, 2505 "Cannot re-enable PCI device after reset.\n"); 2506 return PCI_ERS_RESULT_DISCONNECT; 2507 } 2508 pci_set_master(pdev); 2509 2510 igbvf_reset(adapter); 2511 2512 return PCI_ERS_RESULT_RECOVERED; 2513 } 2514 2515 /** 2516 * igbvf_io_resume - called when traffic can start flowing again. 2517 * @pdev: Pointer to PCI device 2518 * 2519 * This callback is called when the error recovery driver tells us that 2520 * its OK to resume normal operation. Implementation resembles the 2521 * second-half of the igbvf_resume routine. 2522 */ 2523 static void igbvf_io_resume(struct pci_dev *pdev) 2524 { 2525 struct net_device *netdev = pci_get_drvdata(pdev); 2526 struct igbvf_adapter *adapter = netdev_priv(netdev); 2527 2528 if (netif_running(netdev)) { 2529 if (igbvf_up(adapter)) { 2530 dev_err(&pdev->dev, 2531 "can't bring device back up after reset\n"); 2532 return; 2533 } 2534 } 2535 2536 netif_device_attach(netdev); 2537 } 2538 2539 static void igbvf_print_device_info(struct igbvf_adapter *adapter) 2540 { 2541 struct e1000_hw *hw = &adapter->hw; 2542 struct net_device *netdev = adapter->netdev; 2543 struct pci_dev *pdev = adapter->pdev; 2544 2545 if (hw->mac.type == e1000_vfadapt_i350) 2546 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n"); 2547 else 2548 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); 2549 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); 2550 } 2551 2552 static int igbvf_set_features(struct net_device *netdev, 2553 netdev_features_t features) 2554 { 2555 struct igbvf_adapter *adapter = netdev_priv(netdev); 2556 2557 if (features & NETIF_F_RXCSUM) 2558 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; 2559 else 2560 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; 2561 2562 return 0; 2563 } 2564 2565 static const struct net_device_ops igbvf_netdev_ops = { 2566 .ndo_open = igbvf_open, 2567 .ndo_stop = igbvf_close, 2568 .ndo_start_xmit = igbvf_xmit_frame, 2569 .ndo_get_stats = igbvf_get_stats, 2570 .ndo_set_rx_mode = igbvf_set_multi, 2571 .ndo_set_mac_address = igbvf_set_mac, 2572 .ndo_change_mtu = igbvf_change_mtu, 2573 .ndo_do_ioctl = igbvf_ioctl, 2574 .ndo_tx_timeout = igbvf_tx_timeout, 2575 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, 2576 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, 2577 #ifdef CONFIG_NET_POLL_CONTROLLER 2578 .ndo_poll_controller = igbvf_netpoll, 2579 #endif 2580 .ndo_set_features = igbvf_set_features, 2581 }; 2582 2583 /** 2584 * igbvf_probe - Device Initialization Routine 2585 * @pdev: PCI device information struct 2586 * @ent: entry in igbvf_pci_tbl 2587 * 2588 * Returns 0 on success, negative on failure 2589 * 2590 * igbvf_probe initializes an adapter identified by a pci_dev structure. 2591 * The OS initialization, configuring of the adapter private structure, 2592 * and a hardware reset occur. 2593 **/ 2594 static int __devinit igbvf_probe(struct pci_dev *pdev, 2595 const struct pci_device_id *ent) 2596 { 2597 struct net_device *netdev; 2598 struct igbvf_adapter *adapter; 2599 struct e1000_hw *hw; 2600 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; 2601 2602 static int cards_found; 2603 int err, pci_using_dac; 2604 2605 err = pci_enable_device_mem(pdev); 2606 if (err) 2607 return err; 2608 2609 pci_using_dac = 0; 2610 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 2611 if (!err) { 2612 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2613 if (!err) 2614 pci_using_dac = 1; 2615 } else { 2616 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2617 if (err) { 2618 err = dma_set_coherent_mask(&pdev->dev, 2619 DMA_BIT_MASK(32)); 2620 if (err) { 2621 dev_err(&pdev->dev, "No usable DMA " 2622 "configuration, aborting\n"); 2623 goto err_dma; 2624 } 2625 } 2626 } 2627 2628 err = pci_request_regions(pdev, igbvf_driver_name); 2629 if (err) 2630 goto err_pci_reg; 2631 2632 pci_set_master(pdev); 2633 2634 err = -ENOMEM; 2635 netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); 2636 if (!netdev) 2637 goto err_alloc_etherdev; 2638 2639 SET_NETDEV_DEV(netdev, &pdev->dev); 2640 2641 pci_set_drvdata(pdev, netdev); 2642 adapter = netdev_priv(netdev); 2643 hw = &adapter->hw; 2644 adapter->netdev = netdev; 2645 adapter->pdev = pdev; 2646 adapter->ei = ei; 2647 adapter->pba = ei->pba; 2648 adapter->flags = ei->flags; 2649 adapter->hw.back = adapter; 2650 adapter->hw.mac.type = ei->mac; 2651 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 2652 2653 /* PCI config space info */ 2654 2655 hw->vendor_id = pdev->vendor; 2656 hw->device_id = pdev->device; 2657 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2658 hw->subsystem_device_id = pdev->subsystem_device; 2659 hw->revision_id = pdev->revision; 2660 2661 err = -EIO; 2662 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2663 pci_resource_len(pdev, 0)); 2664 2665 if (!adapter->hw.hw_addr) 2666 goto err_ioremap; 2667 2668 if (ei->get_variants) { 2669 err = ei->get_variants(adapter); 2670 if (err) 2671 goto err_ioremap; 2672 } 2673 2674 /* setup adapter struct */ 2675 err = igbvf_sw_init(adapter); 2676 if (err) 2677 goto err_sw_init; 2678 2679 /* construct the net_device struct */ 2680 netdev->netdev_ops = &igbvf_netdev_ops; 2681 2682 igbvf_set_ethtool_ops(netdev); 2683 netdev->watchdog_timeo = 5 * HZ; 2684 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 2685 2686 adapter->bd_number = cards_found++; 2687 2688 netdev->hw_features = NETIF_F_SG | 2689 NETIF_F_IP_CSUM | 2690 NETIF_F_IPV6_CSUM | 2691 NETIF_F_TSO | 2692 NETIF_F_TSO6 | 2693 NETIF_F_RXCSUM; 2694 2695 netdev->features = netdev->hw_features | 2696 NETIF_F_HW_VLAN_TX | 2697 NETIF_F_HW_VLAN_RX | 2698 NETIF_F_HW_VLAN_FILTER; 2699 2700 if (pci_using_dac) 2701 netdev->features |= NETIF_F_HIGHDMA; 2702 2703 netdev->vlan_features |= NETIF_F_TSO; 2704 netdev->vlan_features |= NETIF_F_TSO6; 2705 netdev->vlan_features |= NETIF_F_IP_CSUM; 2706 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 2707 netdev->vlan_features |= NETIF_F_SG; 2708 2709 /*reset the controller to put the device in a known good state */ 2710 err = hw->mac.ops.reset_hw(hw); 2711 if (err) { 2712 dev_info(&pdev->dev, 2713 "PF still in reset state, assigning new address." 2714 " Is the PF interface up?\n"); 2715 dev_hw_addr_random(adapter->netdev, hw->mac.addr); 2716 } else { 2717 err = hw->mac.ops.read_mac_addr(hw); 2718 if (err) { 2719 dev_err(&pdev->dev, "Error reading MAC address\n"); 2720 goto err_hw_init; 2721 } 2722 } 2723 2724 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2725 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2726 2727 if (!is_valid_ether_addr(netdev->perm_addr)) { 2728 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", 2729 netdev->dev_addr); 2730 err = -EIO; 2731 goto err_hw_init; 2732 } 2733 2734 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, 2735 (unsigned long) adapter); 2736 2737 INIT_WORK(&adapter->reset_task, igbvf_reset_task); 2738 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); 2739 2740 /* ring size defaults */ 2741 adapter->rx_ring->count = 1024; 2742 adapter->tx_ring->count = 1024; 2743 2744 /* reset the hardware with the new settings */ 2745 igbvf_reset(adapter); 2746 2747 strcpy(netdev->name, "eth%d"); 2748 err = register_netdev(netdev); 2749 if (err) 2750 goto err_hw_init; 2751 2752 /* tell the stack to leave us alone until igbvf_open() is called */ 2753 netif_carrier_off(netdev); 2754 netif_stop_queue(netdev); 2755 2756 igbvf_print_device_info(adapter); 2757 2758 igbvf_initialize_last_counter_stats(adapter); 2759 2760 return 0; 2761 2762 err_hw_init: 2763 kfree(adapter->tx_ring); 2764 kfree(adapter->rx_ring); 2765 err_sw_init: 2766 igbvf_reset_interrupt_capability(adapter); 2767 iounmap(adapter->hw.hw_addr); 2768 err_ioremap: 2769 free_netdev(netdev); 2770 err_alloc_etherdev: 2771 pci_release_regions(pdev); 2772 err_pci_reg: 2773 err_dma: 2774 pci_disable_device(pdev); 2775 return err; 2776 } 2777 2778 /** 2779 * igbvf_remove - Device Removal Routine 2780 * @pdev: PCI device information struct 2781 * 2782 * igbvf_remove is called by the PCI subsystem to alert the driver 2783 * that it should release a PCI device. The could be caused by a 2784 * Hot-Plug event, or because the driver is going to be removed from 2785 * memory. 2786 **/ 2787 static void __devexit igbvf_remove(struct pci_dev *pdev) 2788 { 2789 struct net_device *netdev = pci_get_drvdata(pdev); 2790 struct igbvf_adapter *adapter = netdev_priv(netdev); 2791 struct e1000_hw *hw = &adapter->hw; 2792 2793 /* 2794 * The watchdog timer may be rescheduled, so explicitly 2795 * disable it from being rescheduled. 2796 */ 2797 set_bit(__IGBVF_DOWN, &adapter->state); 2798 del_timer_sync(&adapter->watchdog_timer); 2799 2800 cancel_work_sync(&adapter->reset_task); 2801 cancel_work_sync(&adapter->watchdog_task); 2802 2803 unregister_netdev(netdev); 2804 2805 igbvf_reset_interrupt_capability(adapter); 2806 2807 /* 2808 * it is important to delete the napi struct prior to freeing the 2809 * rx ring so that you do not end up with null pointer refs 2810 */ 2811 netif_napi_del(&adapter->rx_ring->napi); 2812 kfree(adapter->tx_ring); 2813 kfree(adapter->rx_ring); 2814 2815 iounmap(hw->hw_addr); 2816 if (hw->flash_address) 2817 iounmap(hw->flash_address); 2818 pci_release_regions(pdev); 2819 2820 free_netdev(netdev); 2821 2822 pci_disable_device(pdev); 2823 } 2824 2825 /* PCI Error Recovery (ERS) */ 2826 static struct pci_error_handlers igbvf_err_handler = { 2827 .error_detected = igbvf_io_error_detected, 2828 .slot_reset = igbvf_io_slot_reset, 2829 .resume = igbvf_io_resume, 2830 }; 2831 2832 static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = { 2833 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, 2834 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, 2835 { } /* terminate list */ 2836 }; 2837 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); 2838 2839 /* PCI Device API Driver */ 2840 static struct pci_driver igbvf_driver = { 2841 .name = igbvf_driver_name, 2842 .id_table = igbvf_pci_tbl, 2843 .probe = igbvf_probe, 2844 .remove = __devexit_p(igbvf_remove), 2845 #ifdef CONFIG_PM 2846 /* Power Management Hooks */ 2847 .suspend = igbvf_suspend, 2848 .resume = igbvf_resume, 2849 #endif 2850 .shutdown = igbvf_shutdown, 2851 .err_handler = &igbvf_err_handler 2852 }; 2853 2854 /** 2855 * igbvf_init_module - Driver Registration Routine 2856 * 2857 * igbvf_init_module is the first routine called when the driver is 2858 * loaded. All it does is register with the PCI subsystem. 2859 **/ 2860 static int __init igbvf_init_module(void) 2861 { 2862 int ret; 2863 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); 2864 pr_info("%s\n", igbvf_copyright); 2865 2866 ret = pci_register_driver(&igbvf_driver); 2867 2868 return ret; 2869 } 2870 module_init(igbvf_init_module); 2871 2872 /** 2873 * igbvf_exit_module - Driver Exit Cleanup Routine 2874 * 2875 * igbvf_exit_module is called just before the driver is removed 2876 * from memory. 2877 **/ 2878 static void __exit igbvf_exit_module(void) 2879 { 2880 pci_unregister_driver(&igbvf_driver); 2881 } 2882 module_exit(igbvf_exit_module); 2883 2884 2885 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 2886 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); 2887 MODULE_LICENSE("GPL"); 2888 MODULE_VERSION(DRV_VERSION); 2889 2890 /* netdev.c */ 2891