1 /******************************************************************************* 2 3 Intel(R) 82576 Virtual Function Linux driver 4 Copyright(c) 2009 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/module.h> 31 #include <linux/types.h> 32 #include <linux/init.h> 33 #include <linux/pci.h> 34 #include <linux/vmalloc.h> 35 #include <linux/pagemap.h> 36 #include <linux/delay.h> 37 #include <linux/netdevice.h> 38 #include <linux/tcp.h> 39 #include <linux/ipv6.h> 40 #include <linux/slab.h> 41 #include <net/checksum.h> 42 #include <net/ip6_checksum.h> 43 #include <linux/mii.h> 44 #include <linux/ethtool.h> 45 #include <linux/if_vlan.h> 46 #include <linux/prefetch.h> 47 48 #include "igbvf.h" 49 50 #define DRV_VERSION "2.0.2-k" 51 char igbvf_driver_name[] = "igbvf"; 52 const char igbvf_driver_version[] = DRV_VERSION; 53 static const char igbvf_driver_string[] = 54 "Intel(R) Gigabit Virtual Function Network Driver"; 55 static const char igbvf_copyright[] = 56 "Copyright (c) 2009 - 2012 Intel Corporation."; 57 58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 59 static int debug = -1; 60 module_param(debug, int, 0); 61 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 62 63 static int igbvf_poll(struct napi_struct *napi, int budget); 64 static void igbvf_reset(struct igbvf_adapter *); 65 static void igbvf_set_interrupt_capability(struct igbvf_adapter *); 66 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); 67 68 static struct igbvf_info igbvf_vf_info = { 69 .mac = e1000_vfadapt, 70 .flags = 0, 71 .pba = 10, 72 .init_ops = e1000_init_function_pointers_vf, 73 }; 74 75 static struct igbvf_info igbvf_i350_vf_info = { 76 .mac = e1000_vfadapt_i350, 77 .flags = 0, 78 .pba = 10, 79 .init_ops = e1000_init_function_pointers_vf, 80 }; 81 82 static const struct igbvf_info *igbvf_info_tbl[] = { 83 [board_vf] = &igbvf_vf_info, 84 [board_i350_vf] = &igbvf_i350_vf_info, 85 }; 86 87 /** 88 * igbvf_desc_unused - calculate if we have unused descriptors 89 **/ 90 static int igbvf_desc_unused(struct igbvf_ring *ring) 91 { 92 if (ring->next_to_clean > ring->next_to_use) 93 return ring->next_to_clean - ring->next_to_use - 1; 94 95 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 96 } 97 98 /** 99 * igbvf_receive_skb - helper function to handle Rx indications 100 * @adapter: board private structure 101 * @status: descriptor status field as written by hardware 102 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 103 * @skb: pointer to sk_buff to be indicated to stack 104 **/ 105 static void igbvf_receive_skb(struct igbvf_adapter *adapter, 106 struct net_device *netdev, 107 struct sk_buff *skb, 108 u32 status, u16 vlan) 109 { 110 u16 vid; 111 112 if (status & E1000_RXD_STAT_VP) { 113 if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) && 114 (status & E1000_RXDEXT_STATERR_LB)) 115 vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 116 else 117 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 118 if (test_bit(vid, adapter->active_vlans)) 119 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 120 } 121 122 napi_gro_receive(&adapter->rx_ring->napi, skb); 123 } 124 125 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, 126 u32 status_err, struct sk_buff *skb) 127 { 128 skb_checksum_none_assert(skb); 129 130 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 131 if ((status_err & E1000_RXD_STAT_IXSM) || 132 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) 133 return; 134 135 /* TCP/UDP checksum error bit is set */ 136 if (status_err & 137 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 138 /* let the stack verify checksum errors */ 139 adapter->hw_csum_err++; 140 return; 141 } 142 143 /* It must be a TCP or UDP packet with a valid checksum */ 144 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 145 skb->ip_summed = CHECKSUM_UNNECESSARY; 146 147 adapter->hw_csum_good++; 148 } 149 150 /** 151 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split 152 * @rx_ring: address of ring structure to repopulate 153 * @cleaned_count: number of buffers to repopulate 154 **/ 155 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, 156 int cleaned_count) 157 { 158 struct igbvf_adapter *adapter = rx_ring->adapter; 159 struct net_device *netdev = adapter->netdev; 160 struct pci_dev *pdev = adapter->pdev; 161 union e1000_adv_rx_desc *rx_desc; 162 struct igbvf_buffer *buffer_info; 163 struct sk_buff *skb; 164 unsigned int i; 165 int bufsz; 166 167 i = rx_ring->next_to_use; 168 buffer_info = &rx_ring->buffer_info[i]; 169 170 if (adapter->rx_ps_hdr_size) 171 bufsz = adapter->rx_ps_hdr_size; 172 else 173 bufsz = adapter->rx_buffer_len; 174 175 while (cleaned_count--) { 176 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 177 178 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 179 if (!buffer_info->page) { 180 buffer_info->page = alloc_page(GFP_ATOMIC); 181 if (!buffer_info->page) { 182 adapter->alloc_rx_buff_failed++; 183 goto no_buffers; 184 } 185 buffer_info->page_offset = 0; 186 } else { 187 buffer_info->page_offset ^= PAGE_SIZE / 2; 188 } 189 buffer_info->page_dma = 190 dma_map_page(&pdev->dev, buffer_info->page, 191 buffer_info->page_offset, 192 PAGE_SIZE / 2, 193 DMA_FROM_DEVICE); 194 if (dma_mapping_error(&pdev->dev, 195 buffer_info->page_dma)) { 196 __free_page(buffer_info->page); 197 buffer_info->page = NULL; 198 dev_err(&pdev->dev, "RX DMA map failed\n"); 199 break; 200 } 201 } 202 203 if (!buffer_info->skb) { 204 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 205 if (!skb) { 206 adapter->alloc_rx_buff_failed++; 207 goto no_buffers; 208 } 209 210 buffer_info->skb = skb; 211 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 212 bufsz, 213 DMA_FROM_DEVICE); 214 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 215 dev_kfree_skb(buffer_info->skb); 216 buffer_info->skb = NULL; 217 dev_err(&pdev->dev, "RX DMA map failed\n"); 218 goto no_buffers; 219 } 220 } 221 /* Refresh the desc even if buffer_addrs didn't change because 222 * each write-back erases this info. */ 223 if (adapter->rx_ps_hdr_size) { 224 rx_desc->read.pkt_addr = 225 cpu_to_le64(buffer_info->page_dma); 226 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 227 } else { 228 rx_desc->read.pkt_addr = 229 cpu_to_le64(buffer_info->dma); 230 rx_desc->read.hdr_addr = 0; 231 } 232 233 i++; 234 if (i == rx_ring->count) 235 i = 0; 236 buffer_info = &rx_ring->buffer_info[i]; 237 } 238 239 no_buffers: 240 if (rx_ring->next_to_use != i) { 241 rx_ring->next_to_use = i; 242 if (i == 0) 243 i = (rx_ring->count - 1); 244 else 245 i--; 246 247 /* Force memory writes to complete before letting h/w 248 * know there are new descriptors to fetch. (Only 249 * applicable for weak-ordered memory model archs, 250 * such as IA-64). */ 251 wmb(); 252 writel(i, adapter->hw.hw_addr + rx_ring->tail); 253 } 254 } 255 256 /** 257 * igbvf_clean_rx_irq - Send received data up the network stack; legacy 258 * @adapter: board private structure 259 * 260 * the return value indicates whether actual cleaning was done, there 261 * is no guarantee that everything was cleaned 262 **/ 263 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, 264 int *work_done, int work_to_do) 265 { 266 struct igbvf_ring *rx_ring = adapter->rx_ring; 267 struct net_device *netdev = adapter->netdev; 268 struct pci_dev *pdev = adapter->pdev; 269 union e1000_adv_rx_desc *rx_desc, *next_rxd; 270 struct igbvf_buffer *buffer_info, *next_buffer; 271 struct sk_buff *skb; 272 bool cleaned = false; 273 int cleaned_count = 0; 274 unsigned int total_bytes = 0, total_packets = 0; 275 unsigned int i; 276 u32 length, hlen, staterr; 277 278 i = rx_ring->next_to_clean; 279 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 280 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 281 282 while (staterr & E1000_RXD_STAT_DD) { 283 if (*work_done >= work_to_do) 284 break; 285 (*work_done)++; 286 rmb(); /* read descriptor and rx_buffer_info after status DD */ 287 288 buffer_info = &rx_ring->buffer_info[i]; 289 290 /* HW will not DMA in data larger than the given buffer, even 291 * if it parses the (NFS, of course) header to be larger. In 292 * that case, it fills the header buffer and spills the rest 293 * into the page. 294 */ 295 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & 296 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 297 if (hlen > adapter->rx_ps_hdr_size) 298 hlen = adapter->rx_ps_hdr_size; 299 300 length = le16_to_cpu(rx_desc->wb.upper.length); 301 cleaned = true; 302 cleaned_count++; 303 304 skb = buffer_info->skb; 305 prefetch(skb->data - NET_IP_ALIGN); 306 buffer_info->skb = NULL; 307 if (!adapter->rx_ps_hdr_size) { 308 dma_unmap_single(&pdev->dev, buffer_info->dma, 309 adapter->rx_buffer_len, 310 DMA_FROM_DEVICE); 311 buffer_info->dma = 0; 312 skb_put(skb, length); 313 goto send_up; 314 } 315 316 if (!skb_shinfo(skb)->nr_frags) { 317 dma_unmap_single(&pdev->dev, buffer_info->dma, 318 adapter->rx_ps_hdr_size, 319 DMA_FROM_DEVICE); 320 skb_put(skb, hlen); 321 } 322 323 if (length) { 324 dma_unmap_page(&pdev->dev, buffer_info->page_dma, 325 PAGE_SIZE / 2, 326 DMA_FROM_DEVICE); 327 buffer_info->page_dma = 0; 328 329 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 330 buffer_info->page, 331 buffer_info->page_offset, 332 length); 333 334 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 335 (page_count(buffer_info->page) != 1)) 336 buffer_info->page = NULL; 337 else 338 get_page(buffer_info->page); 339 340 skb->len += length; 341 skb->data_len += length; 342 skb->truesize += PAGE_SIZE / 2; 343 } 344 send_up: 345 i++; 346 if (i == rx_ring->count) 347 i = 0; 348 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); 349 prefetch(next_rxd); 350 next_buffer = &rx_ring->buffer_info[i]; 351 352 if (!(staterr & E1000_RXD_STAT_EOP)) { 353 buffer_info->skb = next_buffer->skb; 354 buffer_info->dma = next_buffer->dma; 355 next_buffer->skb = skb; 356 next_buffer->dma = 0; 357 goto next_desc; 358 } 359 360 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 361 dev_kfree_skb_irq(skb); 362 goto next_desc; 363 } 364 365 total_bytes += skb->len; 366 total_packets++; 367 368 igbvf_rx_checksum_adv(adapter, staterr, skb); 369 370 skb->protocol = eth_type_trans(skb, netdev); 371 372 igbvf_receive_skb(adapter, netdev, skb, staterr, 373 rx_desc->wb.upper.vlan); 374 375 next_desc: 376 rx_desc->wb.upper.status_error = 0; 377 378 /* return some buffers to hardware, one at a time is too slow */ 379 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { 380 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 381 cleaned_count = 0; 382 } 383 384 /* use prefetched values */ 385 rx_desc = next_rxd; 386 buffer_info = next_buffer; 387 388 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 389 } 390 391 rx_ring->next_to_clean = i; 392 cleaned_count = igbvf_desc_unused(rx_ring); 393 394 if (cleaned_count) 395 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 396 397 adapter->total_rx_packets += total_packets; 398 adapter->total_rx_bytes += total_bytes; 399 adapter->net_stats.rx_bytes += total_bytes; 400 adapter->net_stats.rx_packets += total_packets; 401 return cleaned; 402 } 403 404 static void igbvf_put_txbuf(struct igbvf_adapter *adapter, 405 struct igbvf_buffer *buffer_info) 406 { 407 if (buffer_info->dma) { 408 if (buffer_info->mapped_as_page) 409 dma_unmap_page(&adapter->pdev->dev, 410 buffer_info->dma, 411 buffer_info->length, 412 DMA_TO_DEVICE); 413 else 414 dma_unmap_single(&adapter->pdev->dev, 415 buffer_info->dma, 416 buffer_info->length, 417 DMA_TO_DEVICE); 418 buffer_info->dma = 0; 419 } 420 if (buffer_info->skb) { 421 dev_kfree_skb_any(buffer_info->skb); 422 buffer_info->skb = NULL; 423 } 424 buffer_info->time_stamp = 0; 425 } 426 427 /** 428 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) 429 * @adapter: board private structure 430 * 431 * Return 0 on success, negative on failure 432 **/ 433 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, 434 struct igbvf_ring *tx_ring) 435 { 436 struct pci_dev *pdev = adapter->pdev; 437 int size; 438 439 size = sizeof(struct igbvf_buffer) * tx_ring->count; 440 tx_ring->buffer_info = vzalloc(size); 441 if (!tx_ring->buffer_info) 442 goto err; 443 444 /* round up to nearest 4K */ 445 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 446 tx_ring->size = ALIGN(tx_ring->size, 4096); 447 448 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 449 &tx_ring->dma, GFP_KERNEL); 450 if (!tx_ring->desc) 451 goto err; 452 453 tx_ring->adapter = adapter; 454 tx_ring->next_to_use = 0; 455 tx_ring->next_to_clean = 0; 456 457 return 0; 458 err: 459 vfree(tx_ring->buffer_info); 460 dev_err(&adapter->pdev->dev, 461 "Unable to allocate memory for the transmit descriptor ring\n"); 462 return -ENOMEM; 463 } 464 465 /** 466 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) 467 * @adapter: board private structure 468 * 469 * Returns 0 on success, negative on failure 470 **/ 471 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, 472 struct igbvf_ring *rx_ring) 473 { 474 struct pci_dev *pdev = adapter->pdev; 475 int size, desc_len; 476 477 size = sizeof(struct igbvf_buffer) * rx_ring->count; 478 rx_ring->buffer_info = vzalloc(size); 479 if (!rx_ring->buffer_info) 480 goto err; 481 482 desc_len = sizeof(union e1000_adv_rx_desc); 483 484 /* Round up to nearest 4K */ 485 rx_ring->size = rx_ring->count * desc_len; 486 rx_ring->size = ALIGN(rx_ring->size, 4096); 487 488 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 489 &rx_ring->dma, GFP_KERNEL); 490 if (!rx_ring->desc) 491 goto err; 492 493 rx_ring->next_to_clean = 0; 494 rx_ring->next_to_use = 0; 495 496 rx_ring->adapter = adapter; 497 498 return 0; 499 500 err: 501 vfree(rx_ring->buffer_info); 502 rx_ring->buffer_info = NULL; 503 dev_err(&adapter->pdev->dev, 504 "Unable to allocate memory for the receive descriptor ring\n"); 505 return -ENOMEM; 506 } 507 508 /** 509 * igbvf_clean_tx_ring - Free Tx Buffers 510 * @tx_ring: ring to be cleaned 511 **/ 512 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) 513 { 514 struct igbvf_adapter *adapter = tx_ring->adapter; 515 struct igbvf_buffer *buffer_info; 516 unsigned long size; 517 unsigned int i; 518 519 if (!tx_ring->buffer_info) 520 return; 521 522 /* Free all the Tx ring sk_buffs */ 523 for (i = 0; i < tx_ring->count; i++) { 524 buffer_info = &tx_ring->buffer_info[i]; 525 igbvf_put_txbuf(adapter, buffer_info); 526 } 527 528 size = sizeof(struct igbvf_buffer) * tx_ring->count; 529 memset(tx_ring->buffer_info, 0, size); 530 531 /* Zero out the descriptor ring */ 532 memset(tx_ring->desc, 0, tx_ring->size); 533 534 tx_ring->next_to_use = 0; 535 tx_ring->next_to_clean = 0; 536 537 writel(0, adapter->hw.hw_addr + tx_ring->head); 538 writel(0, adapter->hw.hw_addr + tx_ring->tail); 539 } 540 541 /** 542 * igbvf_free_tx_resources - Free Tx Resources per Queue 543 * @tx_ring: ring to free resources from 544 * 545 * Free all transmit software resources 546 **/ 547 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) 548 { 549 struct pci_dev *pdev = tx_ring->adapter->pdev; 550 551 igbvf_clean_tx_ring(tx_ring); 552 553 vfree(tx_ring->buffer_info); 554 tx_ring->buffer_info = NULL; 555 556 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 557 tx_ring->dma); 558 559 tx_ring->desc = NULL; 560 } 561 562 /** 563 * igbvf_clean_rx_ring - Free Rx Buffers per Queue 564 * @adapter: board private structure 565 **/ 566 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) 567 { 568 struct igbvf_adapter *adapter = rx_ring->adapter; 569 struct igbvf_buffer *buffer_info; 570 struct pci_dev *pdev = adapter->pdev; 571 unsigned long size; 572 unsigned int i; 573 574 if (!rx_ring->buffer_info) 575 return; 576 577 /* Free all the Rx ring sk_buffs */ 578 for (i = 0; i < rx_ring->count; i++) { 579 buffer_info = &rx_ring->buffer_info[i]; 580 if (buffer_info->dma) { 581 if (adapter->rx_ps_hdr_size){ 582 dma_unmap_single(&pdev->dev, buffer_info->dma, 583 adapter->rx_ps_hdr_size, 584 DMA_FROM_DEVICE); 585 } else { 586 dma_unmap_single(&pdev->dev, buffer_info->dma, 587 adapter->rx_buffer_len, 588 DMA_FROM_DEVICE); 589 } 590 buffer_info->dma = 0; 591 } 592 593 if (buffer_info->skb) { 594 dev_kfree_skb(buffer_info->skb); 595 buffer_info->skb = NULL; 596 } 597 598 if (buffer_info->page) { 599 if (buffer_info->page_dma) 600 dma_unmap_page(&pdev->dev, 601 buffer_info->page_dma, 602 PAGE_SIZE / 2, 603 DMA_FROM_DEVICE); 604 put_page(buffer_info->page); 605 buffer_info->page = NULL; 606 buffer_info->page_dma = 0; 607 buffer_info->page_offset = 0; 608 } 609 } 610 611 size = sizeof(struct igbvf_buffer) * rx_ring->count; 612 memset(rx_ring->buffer_info, 0, size); 613 614 /* Zero out the descriptor ring */ 615 memset(rx_ring->desc, 0, rx_ring->size); 616 617 rx_ring->next_to_clean = 0; 618 rx_ring->next_to_use = 0; 619 620 writel(0, adapter->hw.hw_addr + rx_ring->head); 621 writel(0, adapter->hw.hw_addr + rx_ring->tail); 622 } 623 624 /** 625 * igbvf_free_rx_resources - Free Rx Resources 626 * @rx_ring: ring to clean the resources from 627 * 628 * Free all receive software resources 629 **/ 630 631 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) 632 { 633 struct pci_dev *pdev = rx_ring->adapter->pdev; 634 635 igbvf_clean_rx_ring(rx_ring); 636 637 vfree(rx_ring->buffer_info); 638 rx_ring->buffer_info = NULL; 639 640 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 641 rx_ring->dma); 642 rx_ring->desc = NULL; 643 } 644 645 /** 646 * igbvf_update_itr - update the dynamic ITR value based on statistics 647 * @adapter: pointer to adapter 648 * @itr_setting: current adapter->itr 649 * @packets: the number of packets during this measurement interval 650 * @bytes: the number of bytes during this measurement interval 651 * 652 * Stores a new ITR value based on packets and byte 653 * counts during the last interrupt. The advantage of per interrupt 654 * computation is faster updates and more accurate ITR for the current 655 * traffic pattern. Constants in this function were computed 656 * based on theoretical maximum wire speed and thresholds were set based 657 * on testing data as well as attempting to minimize response time 658 * while increasing bulk throughput. 659 **/ 660 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, 661 enum latency_range itr_setting, 662 int packets, int bytes) 663 { 664 enum latency_range retval = itr_setting; 665 666 if (packets == 0) 667 goto update_itr_done; 668 669 switch (itr_setting) { 670 case lowest_latency: 671 /* handle TSO and jumbo frames */ 672 if (bytes/packets > 8000) 673 retval = bulk_latency; 674 else if ((packets < 5) && (bytes > 512)) 675 retval = low_latency; 676 break; 677 case low_latency: /* 50 usec aka 20000 ints/s */ 678 if (bytes > 10000) { 679 /* this if handles the TSO accounting */ 680 if (bytes/packets > 8000) 681 retval = bulk_latency; 682 else if ((packets < 10) || ((bytes/packets) > 1200)) 683 retval = bulk_latency; 684 else if ((packets > 35)) 685 retval = lowest_latency; 686 } else if (bytes/packets > 2000) { 687 retval = bulk_latency; 688 } else if (packets <= 2 && bytes < 512) { 689 retval = lowest_latency; 690 } 691 break; 692 case bulk_latency: /* 250 usec aka 4000 ints/s */ 693 if (bytes > 25000) { 694 if (packets > 35) 695 retval = low_latency; 696 } else if (bytes < 6000) { 697 retval = low_latency; 698 } 699 break; 700 default: 701 break; 702 } 703 704 update_itr_done: 705 return retval; 706 } 707 708 static int igbvf_range_to_itr(enum latency_range current_range) 709 { 710 int new_itr; 711 712 switch (current_range) { 713 /* counts and packets in update_itr are dependent on these numbers */ 714 case lowest_latency: 715 new_itr = IGBVF_70K_ITR; 716 break; 717 case low_latency: 718 new_itr = IGBVF_20K_ITR; 719 break; 720 case bulk_latency: 721 new_itr = IGBVF_4K_ITR; 722 break; 723 default: 724 new_itr = IGBVF_START_ITR; 725 break; 726 } 727 return new_itr; 728 } 729 730 static void igbvf_set_itr(struct igbvf_adapter *adapter) 731 { 732 u32 new_itr; 733 734 adapter->tx_ring->itr_range = 735 igbvf_update_itr(adapter, 736 adapter->tx_ring->itr_val, 737 adapter->total_tx_packets, 738 adapter->total_tx_bytes); 739 740 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 741 if (adapter->requested_itr == 3 && 742 adapter->tx_ring->itr_range == lowest_latency) 743 adapter->tx_ring->itr_range = low_latency; 744 745 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); 746 747 748 if (new_itr != adapter->tx_ring->itr_val) { 749 u32 current_itr = adapter->tx_ring->itr_val; 750 /* 751 * this attempts to bias the interrupt rate towards Bulk 752 * by adding intermediate steps when interrupt rate is 753 * increasing 754 */ 755 new_itr = new_itr > current_itr ? 756 min(current_itr + (new_itr >> 2), new_itr) : 757 new_itr; 758 adapter->tx_ring->itr_val = new_itr; 759 760 adapter->tx_ring->set_itr = 1; 761 } 762 763 adapter->rx_ring->itr_range = 764 igbvf_update_itr(adapter, adapter->rx_ring->itr_val, 765 adapter->total_rx_packets, 766 adapter->total_rx_bytes); 767 if (adapter->requested_itr == 3 && 768 adapter->rx_ring->itr_range == lowest_latency) 769 adapter->rx_ring->itr_range = low_latency; 770 771 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); 772 773 if (new_itr != adapter->rx_ring->itr_val) { 774 u32 current_itr = adapter->rx_ring->itr_val; 775 new_itr = new_itr > current_itr ? 776 min(current_itr + (new_itr >> 2), new_itr) : 777 new_itr; 778 adapter->rx_ring->itr_val = new_itr; 779 780 adapter->rx_ring->set_itr = 1; 781 } 782 } 783 784 /** 785 * igbvf_clean_tx_irq - Reclaim resources after transmit completes 786 * @adapter: board private structure 787 * 788 * returns true if ring is completely cleaned 789 **/ 790 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 791 { 792 struct igbvf_adapter *adapter = tx_ring->adapter; 793 struct net_device *netdev = adapter->netdev; 794 struct igbvf_buffer *buffer_info; 795 struct sk_buff *skb; 796 union e1000_adv_tx_desc *tx_desc, *eop_desc; 797 unsigned int total_bytes = 0, total_packets = 0; 798 unsigned int i, count = 0; 799 bool cleaned = false; 800 801 i = tx_ring->next_to_clean; 802 buffer_info = &tx_ring->buffer_info[i]; 803 eop_desc = buffer_info->next_to_watch; 804 805 do { 806 /* if next_to_watch is not set then there is no work pending */ 807 if (!eop_desc) 808 break; 809 810 /* prevent any other reads prior to eop_desc */ 811 read_barrier_depends(); 812 813 /* if DD is not set pending work has not been completed */ 814 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) 815 break; 816 817 /* clear next_to_watch to prevent false hangs */ 818 buffer_info->next_to_watch = NULL; 819 820 for (cleaned = false; !cleaned; count++) { 821 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 822 cleaned = (tx_desc == eop_desc); 823 skb = buffer_info->skb; 824 825 if (skb) { 826 unsigned int segs, bytecount; 827 828 /* gso_segs is currently only valid for tcp */ 829 segs = skb_shinfo(skb)->gso_segs ?: 1; 830 /* multiply data chunks by size of headers */ 831 bytecount = ((segs - 1) * skb_headlen(skb)) + 832 skb->len; 833 total_packets += segs; 834 total_bytes += bytecount; 835 } 836 837 igbvf_put_txbuf(adapter, buffer_info); 838 tx_desc->wb.status = 0; 839 840 i++; 841 if (i == tx_ring->count) 842 i = 0; 843 844 buffer_info = &tx_ring->buffer_info[i]; 845 } 846 847 eop_desc = buffer_info->next_to_watch; 848 } while (count < tx_ring->count); 849 850 tx_ring->next_to_clean = i; 851 852 if (unlikely(count && 853 netif_carrier_ok(netdev) && 854 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { 855 /* Make sure that anybody stopping the queue after this 856 * sees the new next_to_clean. 857 */ 858 smp_mb(); 859 if (netif_queue_stopped(netdev) && 860 !(test_bit(__IGBVF_DOWN, &adapter->state))) { 861 netif_wake_queue(netdev); 862 ++adapter->restart_queue; 863 } 864 } 865 866 adapter->net_stats.tx_bytes += total_bytes; 867 adapter->net_stats.tx_packets += total_packets; 868 return count < tx_ring->count; 869 } 870 871 static irqreturn_t igbvf_msix_other(int irq, void *data) 872 { 873 struct net_device *netdev = data; 874 struct igbvf_adapter *adapter = netdev_priv(netdev); 875 struct e1000_hw *hw = &adapter->hw; 876 877 adapter->int_counter1++; 878 879 netif_carrier_off(netdev); 880 hw->mac.get_link_status = 1; 881 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 882 mod_timer(&adapter->watchdog_timer, jiffies + 1); 883 884 ew32(EIMS, adapter->eims_other); 885 886 return IRQ_HANDLED; 887 } 888 889 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) 890 { 891 struct net_device *netdev = data; 892 struct igbvf_adapter *adapter = netdev_priv(netdev); 893 struct e1000_hw *hw = &adapter->hw; 894 struct igbvf_ring *tx_ring = adapter->tx_ring; 895 896 if (tx_ring->set_itr) { 897 writel(tx_ring->itr_val, 898 adapter->hw.hw_addr + tx_ring->itr_register); 899 adapter->tx_ring->set_itr = 0; 900 } 901 902 adapter->total_tx_bytes = 0; 903 adapter->total_tx_packets = 0; 904 905 /* auto mask will automatically reenable the interrupt when we write 906 * EICS */ 907 if (!igbvf_clean_tx_irq(tx_ring)) 908 /* Ring was not completely cleaned, so fire another interrupt */ 909 ew32(EICS, tx_ring->eims_value); 910 else 911 ew32(EIMS, tx_ring->eims_value); 912 913 return IRQ_HANDLED; 914 } 915 916 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) 917 { 918 struct net_device *netdev = data; 919 struct igbvf_adapter *adapter = netdev_priv(netdev); 920 921 adapter->int_counter0++; 922 923 /* Write the ITR value calculated at the end of the 924 * previous interrupt. 925 */ 926 if (adapter->rx_ring->set_itr) { 927 writel(adapter->rx_ring->itr_val, 928 adapter->hw.hw_addr + adapter->rx_ring->itr_register); 929 adapter->rx_ring->set_itr = 0; 930 } 931 932 if (napi_schedule_prep(&adapter->rx_ring->napi)) { 933 adapter->total_rx_bytes = 0; 934 adapter->total_rx_packets = 0; 935 __napi_schedule(&adapter->rx_ring->napi); 936 } 937 938 return IRQ_HANDLED; 939 } 940 941 #define IGBVF_NO_QUEUE -1 942 943 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, 944 int tx_queue, int msix_vector) 945 { 946 struct e1000_hw *hw = &adapter->hw; 947 u32 ivar, index; 948 949 /* 82576 uses a table-based method for assigning vectors. 950 Each queue has a single entry in the table to which we write 951 a vector number along with a "valid" bit. Sadly, the layout 952 of the table is somewhat counterintuitive. */ 953 if (rx_queue > IGBVF_NO_QUEUE) { 954 index = (rx_queue >> 1); 955 ivar = array_er32(IVAR0, index); 956 if (rx_queue & 0x1) { 957 /* vector goes into third byte of register */ 958 ivar = ivar & 0xFF00FFFF; 959 ivar |= (msix_vector | E1000_IVAR_VALID) << 16; 960 } else { 961 /* vector goes into low byte of register */ 962 ivar = ivar & 0xFFFFFF00; 963 ivar |= msix_vector | E1000_IVAR_VALID; 964 } 965 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; 966 array_ew32(IVAR0, index, ivar); 967 } 968 if (tx_queue > IGBVF_NO_QUEUE) { 969 index = (tx_queue >> 1); 970 ivar = array_er32(IVAR0, index); 971 if (tx_queue & 0x1) { 972 /* vector goes into high byte of register */ 973 ivar = ivar & 0x00FFFFFF; 974 ivar |= (msix_vector | E1000_IVAR_VALID) << 24; 975 } else { 976 /* vector goes into second byte of register */ 977 ivar = ivar & 0xFFFF00FF; 978 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 979 } 980 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; 981 array_ew32(IVAR0, index, ivar); 982 } 983 } 984 985 /** 986 * igbvf_configure_msix - Configure MSI-X hardware 987 * 988 * igbvf_configure_msix sets up the hardware to properly 989 * generate MSI-X interrupts. 990 **/ 991 static void igbvf_configure_msix(struct igbvf_adapter *adapter) 992 { 993 u32 tmp; 994 struct e1000_hw *hw = &adapter->hw; 995 struct igbvf_ring *tx_ring = adapter->tx_ring; 996 struct igbvf_ring *rx_ring = adapter->rx_ring; 997 int vector = 0; 998 999 adapter->eims_enable_mask = 0; 1000 1001 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); 1002 adapter->eims_enable_mask |= tx_ring->eims_value; 1003 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); 1004 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); 1005 adapter->eims_enable_mask |= rx_ring->eims_value; 1006 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); 1007 1008 /* set vector for other causes, i.e. link changes */ 1009 1010 tmp = (vector++ | E1000_IVAR_VALID); 1011 1012 ew32(IVAR_MISC, tmp); 1013 1014 adapter->eims_enable_mask = (1 << (vector)) - 1; 1015 adapter->eims_other = 1 << (vector - 1); 1016 e1e_flush(); 1017 } 1018 1019 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) 1020 { 1021 if (adapter->msix_entries) { 1022 pci_disable_msix(adapter->pdev); 1023 kfree(adapter->msix_entries); 1024 adapter->msix_entries = NULL; 1025 } 1026 } 1027 1028 /** 1029 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported 1030 * 1031 * Attempt to configure interrupts using the best available 1032 * capabilities of the hardware and kernel. 1033 **/ 1034 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) 1035 { 1036 int err = -ENOMEM; 1037 int i; 1038 1039 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ 1040 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), 1041 GFP_KERNEL); 1042 if (adapter->msix_entries) { 1043 for (i = 0; i < 3; i++) 1044 adapter->msix_entries[i].entry = i; 1045 1046 err = pci_enable_msix_range(adapter->pdev, 1047 adapter->msix_entries, 3, 3); 1048 } 1049 1050 if (err < 0) { 1051 /* MSI-X failed */ 1052 dev_err(&adapter->pdev->dev, 1053 "Failed to initialize MSI-X interrupts.\n"); 1054 igbvf_reset_interrupt_capability(adapter); 1055 } 1056 } 1057 1058 /** 1059 * igbvf_request_msix - Initialize MSI-X interrupts 1060 * 1061 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the 1062 * kernel. 1063 **/ 1064 static int igbvf_request_msix(struct igbvf_adapter *adapter) 1065 { 1066 struct net_device *netdev = adapter->netdev; 1067 int err = 0, vector = 0; 1068 1069 if (strlen(netdev->name) < (IFNAMSIZ - 5)) { 1070 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); 1071 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); 1072 } else { 1073 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1074 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1075 } 1076 1077 err = request_irq(adapter->msix_entries[vector].vector, 1078 igbvf_intr_msix_tx, 0, adapter->tx_ring->name, 1079 netdev); 1080 if (err) 1081 goto out; 1082 1083 adapter->tx_ring->itr_register = E1000_EITR(vector); 1084 adapter->tx_ring->itr_val = adapter->current_itr; 1085 vector++; 1086 1087 err = request_irq(adapter->msix_entries[vector].vector, 1088 igbvf_intr_msix_rx, 0, adapter->rx_ring->name, 1089 netdev); 1090 if (err) 1091 goto out; 1092 1093 adapter->rx_ring->itr_register = E1000_EITR(vector); 1094 adapter->rx_ring->itr_val = adapter->current_itr; 1095 vector++; 1096 1097 err = request_irq(adapter->msix_entries[vector].vector, 1098 igbvf_msix_other, 0, netdev->name, netdev); 1099 if (err) 1100 goto out; 1101 1102 igbvf_configure_msix(adapter); 1103 return 0; 1104 out: 1105 return err; 1106 } 1107 1108 /** 1109 * igbvf_alloc_queues - Allocate memory for all rings 1110 * @adapter: board private structure to initialize 1111 **/ 1112 static int igbvf_alloc_queues(struct igbvf_adapter *adapter) 1113 { 1114 struct net_device *netdev = adapter->netdev; 1115 1116 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1117 if (!adapter->tx_ring) 1118 return -ENOMEM; 1119 1120 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1121 if (!adapter->rx_ring) { 1122 kfree(adapter->tx_ring); 1123 return -ENOMEM; 1124 } 1125 1126 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); 1127 1128 return 0; 1129 } 1130 1131 /** 1132 * igbvf_request_irq - initialize interrupts 1133 * 1134 * Attempts to configure interrupts using the best available 1135 * capabilities of the hardware and kernel. 1136 **/ 1137 static int igbvf_request_irq(struct igbvf_adapter *adapter) 1138 { 1139 int err = -1; 1140 1141 /* igbvf supports msi-x only */ 1142 if (adapter->msix_entries) 1143 err = igbvf_request_msix(adapter); 1144 1145 if (!err) 1146 return err; 1147 1148 dev_err(&adapter->pdev->dev, 1149 "Unable to allocate interrupt, Error: %d\n", err); 1150 1151 return err; 1152 } 1153 1154 static void igbvf_free_irq(struct igbvf_adapter *adapter) 1155 { 1156 struct net_device *netdev = adapter->netdev; 1157 int vector; 1158 1159 if (adapter->msix_entries) { 1160 for (vector = 0; vector < 3; vector++) 1161 free_irq(adapter->msix_entries[vector].vector, netdev); 1162 } 1163 } 1164 1165 /** 1166 * igbvf_irq_disable - Mask off interrupt generation on the NIC 1167 **/ 1168 static void igbvf_irq_disable(struct igbvf_adapter *adapter) 1169 { 1170 struct e1000_hw *hw = &adapter->hw; 1171 1172 ew32(EIMC, ~0); 1173 1174 if (adapter->msix_entries) 1175 ew32(EIAC, 0); 1176 } 1177 1178 /** 1179 * igbvf_irq_enable - Enable default interrupt generation settings 1180 **/ 1181 static void igbvf_irq_enable(struct igbvf_adapter *adapter) 1182 { 1183 struct e1000_hw *hw = &adapter->hw; 1184 1185 ew32(EIAC, adapter->eims_enable_mask); 1186 ew32(EIAM, adapter->eims_enable_mask); 1187 ew32(EIMS, adapter->eims_enable_mask); 1188 } 1189 1190 /** 1191 * igbvf_poll - NAPI Rx polling callback 1192 * @napi: struct associated with this polling callback 1193 * @budget: amount of packets driver is allowed to process this poll 1194 **/ 1195 static int igbvf_poll(struct napi_struct *napi, int budget) 1196 { 1197 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); 1198 struct igbvf_adapter *adapter = rx_ring->adapter; 1199 struct e1000_hw *hw = &adapter->hw; 1200 int work_done = 0; 1201 1202 igbvf_clean_rx_irq(adapter, &work_done, budget); 1203 1204 /* If not enough Rx work done, exit the polling mode */ 1205 if (work_done < budget) { 1206 napi_complete(napi); 1207 1208 if (adapter->requested_itr & 3) 1209 igbvf_set_itr(adapter); 1210 1211 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1212 ew32(EIMS, adapter->rx_ring->eims_value); 1213 } 1214 1215 return work_done; 1216 } 1217 1218 /** 1219 * igbvf_set_rlpml - set receive large packet maximum length 1220 * @adapter: board private structure 1221 * 1222 * Configure the maximum size of packets that will be received 1223 */ 1224 static void igbvf_set_rlpml(struct igbvf_adapter *adapter) 1225 { 1226 int max_frame_size; 1227 struct e1000_hw *hw = &adapter->hw; 1228 1229 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; 1230 e1000_rlpml_set_vf(hw, max_frame_size); 1231 } 1232 1233 static int igbvf_vlan_rx_add_vid(struct net_device *netdev, 1234 __be16 proto, u16 vid) 1235 { 1236 struct igbvf_adapter *adapter = netdev_priv(netdev); 1237 struct e1000_hw *hw = &adapter->hw; 1238 1239 if (hw->mac.ops.set_vfta(hw, vid, true)) { 1240 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); 1241 return -EINVAL; 1242 } 1243 set_bit(vid, adapter->active_vlans); 1244 return 0; 1245 } 1246 1247 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, 1248 __be16 proto, u16 vid) 1249 { 1250 struct igbvf_adapter *adapter = netdev_priv(netdev); 1251 struct e1000_hw *hw = &adapter->hw; 1252 1253 if (hw->mac.ops.set_vfta(hw, vid, false)) { 1254 dev_err(&adapter->pdev->dev, 1255 "Failed to remove vlan id %d\n", vid); 1256 return -EINVAL; 1257 } 1258 clear_bit(vid, adapter->active_vlans); 1259 return 0; 1260 } 1261 1262 static void igbvf_restore_vlan(struct igbvf_adapter *adapter) 1263 { 1264 u16 vid; 1265 1266 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1267 igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 1268 } 1269 1270 /** 1271 * igbvf_configure_tx - Configure Transmit Unit after Reset 1272 * @adapter: board private structure 1273 * 1274 * Configure the Tx unit of the MAC after a reset. 1275 **/ 1276 static void igbvf_configure_tx(struct igbvf_adapter *adapter) 1277 { 1278 struct e1000_hw *hw = &adapter->hw; 1279 struct igbvf_ring *tx_ring = adapter->tx_ring; 1280 u64 tdba; 1281 u32 txdctl, dca_txctrl; 1282 1283 /* disable transmits */ 1284 txdctl = er32(TXDCTL(0)); 1285 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1286 e1e_flush(); 1287 msleep(10); 1288 1289 /* Setup the HW Tx Head and Tail descriptor pointers */ 1290 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); 1291 tdba = tx_ring->dma; 1292 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); 1293 ew32(TDBAH(0), (tdba >> 32)); 1294 ew32(TDH(0), 0); 1295 ew32(TDT(0), 0); 1296 tx_ring->head = E1000_TDH(0); 1297 tx_ring->tail = E1000_TDT(0); 1298 1299 /* Turn off Relaxed Ordering on head write-backs. The writebacks 1300 * MUST be delivered in order or it will completely screw up 1301 * our bookeeping. 1302 */ 1303 dca_txctrl = er32(DCA_TXCTRL(0)); 1304 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1305 ew32(DCA_TXCTRL(0), dca_txctrl); 1306 1307 /* enable transmits */ 1308 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1309 ew32(TXDCTL(0), txdctl); 1310 1311 /* Setup Transmit Descriptor Settings for eop descriptor */ 1312 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; 1313 1314 /* enable Report Status bit */ 1315 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; 1316 } 1317 1318 /** 1319 * igbvf_setup_srrctl - configure the receive control registers 1320 * @adapter: Board private structure 1321 **/ 1322 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) 1323 { 1324 struct e1000_hw *hw = &adapter->hw; 1325 u32 srrctl = 0; 1326 1327 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | 1328 E1000_SRRCTL_BSIZEHDR_MASK | 1329 E1000_SRRCTL_BSIZEPKT_MASK); 1330 1331 /* Enable queue drop to avoid head of line blocking */ 1332 srrctl |= E1000_SRRCTL_DROP_EN; 1333 1334 /* Setup buffer sizes */ 1335 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> 1336 E1000_SRRCTL_BSIZEPKT_SHIFT; 1337 1338 if (adapter->rx_buffer_len < 2048) { 1339 adapter->rx_ps_hdr_size = 0; 1340 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 1341 } else { 1342 adapter->rx_ps_hdr_size = 128; 1343 srrctl |= adapter->rx_ps_hdr_size << 1344 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 1345 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1346 } 1347 1348 ew32(SRRCTL(0), srrctl); 1349 } 1350 1351 /** 1352 * igbvf_configure_rx - Configure Receive Unit after Reset 1353 * @adapter: board private structure 1354 * 1355 * Configure the Rx unit of the MAC after a reset. 1356 **/ 1357 static void igbvf_configure_rx(struct igbvf_adapter *adapter) 1358 { 1359 struct e1000_hw *hw = &adapter->hw; 1360 struct igbvf_ring *rx_ring = adapter->rx_ring; 1361 u64 rdba; 1362 u32 rdlen, rxdctl; 1363 1364 /* disable receives */ 1365 rxdctl = er32(RXDCTL(0)); 1366 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1367 e1e_flush(); 1368 msleep(10); 1369 1370 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1371 1372 /* 1373 * Setup the HW Rx Head and Tail Descriptor Pointers and 1374 * the Base and Length of the Rx Descriptor Ring 1375 */ 1376 rdba = rx_ring->dma; 1377 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); 1378 ew32(RDBAH(0), (rdba >> 32)); 1379 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); 1380 rx_ring->head = E1000_RDH(0); 1381 rx_ring->tail = E1000_RDT(0); 1382 ew32(RDH(0), 0); 1383 ew32(RDT(0), 0); 1384 1385 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 1386 rxdctl &= 0xFFF00000; 1387 rxdctl |= IGBVF_RX_PTHRESH; 1388 rxdctl |= IGBVF_RX_HTHRESH << 8; 1389 rxdctl |= IGBVF_RX_WTHRESH << 16; 1390 1391 igbvf_set_rlpml(adapter); 1392 1393 /* enable receives */ 1394 ew32(RXDCTL(0), rxdctl); 1395 } 1396 1397 /** 1398 * igbvf_set_multi - Multicast and Promiscuous mode set 1399 * @netdev: network interface device structure 1400 * 1401 * The set_multi entry point is called whenever the multicast address 1402 * list or the network interface flags are updated. This routine is 1403 * responsible for configuring the hardware for proper multicast, 1404 * promiscuous mode, and all-multi behavior. 1405 **/ 1406 static void igbvf_set_multi(struct net_device *netdev) 1407 { 1408 struct igbvf_adapter *adapter = netdev_priv(netdev); 1409 struct e1000_hw *hw = &adapter->hw; 1410 struct netdev_hw_addr *ha; 1411 u8 *mta_list = NULL; 1412 int i; 1413 1414 if (!netdev_mc_empty(netdev)) { 1415 mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN, 1416 GFP_ATOMIC); 1417 if (!mta_list) 1418 return; 1419 } 1420 1421 /* prepare a packed array of only addresses. */ 1422 i = 0; 1423 netdev_for_each_mc_addr(ha, netdev) 1424 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 1425 1426 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); 1427 kfree(mta_list); 1428 } 1429 1430 /** 1431 * igbvf_configure - configure the hardware for Rx and Tx 1432 * @adapter: private board structure 1433 **/ 1434 static void igbvf_configure(struct igbvf_adapter *adapter) 1435 { 1436 igbvf_set_multi(adapter->netdev); 1437 1438 igbvf_restore_vlan(adapter); 1439 1440 igbvf_configure_tx(adapter); 1441 igbvf_setup_srrctl(adapter); 1442 igbvf_configure_rx(adapter); 1443 igbvf_alloc_rx_buffers(adapter->rx_ring, 1444 igbvf_desc_unused(adapter->rx_ring)); 1445 } 1446 1447 /* igbvf_reset - bring the hardware into a known good state 1448 * 1449 * This function boots the hardware and enables some settings that 1450 * require a configuration cycle of the hardware - those cannot be 1451 * set/changed during runtime. After reset the device needs to be 1452 * properly configured for Rx, Tx etc. 1453 */ 1454 static void igbvf_reset(struct igbvf_adapter *adapter) 1455 { 1456 struct e1000_mac_info *mac = &adapter->hw.mac; 1457 struct net_device *netdev = adapter->netdev; 1458 struct e1000_hw *hw = &adapter->hw; 1459 1460 /* Allow time for pending master requests to run */ 1461 if (mac->ops.reset_hw(hw)) 1462 dev_err(&adapter->pdev->dev, "PF still resetting\n"); 1463 1464 mac->ops.init_hw(hw); 1465 1466 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1467 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1468 netdev->addr_len); 1469 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1470 netdev->addr_len); 1471 } 1472 1473 adapter->last_reset = jiffies; 1474 } 1475 1476 int igbvf_up(struct igbvf_adapter *adapter) 1477 { 1478 struct e1000_hw *hw = &adapter->hw; 1479 1480 /* hardware has been reset, we need to reload some things */ 1481 igbvf_configure(adapter); 1482 1483 clear_bit(__IGBVF_DOWN, &adapter->state); 1484 1485 napi_enable(&adapter->rx_ring->napi); 1486 if (adapter->msix_entries) 1487 igbvf_configure_msix(adapter); 1488 1489 /* Clear any pending interrupts. */ 1490 er32(EICR); 1491 igbvf_irq_enable(adapter); 1492 1493 /* start the watchdog */ 1494 hw->mac.get_link_status = 1; 1495 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1496 1497 1498 return 0; 1499 } 1500 1501 void igbvf_down(struct igbvf_adapter *adapter) 1502 { 1503 struct net_device *netdev = adapter->netdev; 1504 struct e1000_hw *hw = &adapter->hw; 1505 u32 rxdctl, txdctl; 1506 1507 /* 1508 * signal that we're down so the interrupt handler does not 1509 * reschedule our watchdog timer 1510 */ 1511 set_bit(__IGBVF_DOWN, &adapter->state); 1512 1513 /* disable receives in the hardware */ 1514 rxdctl = er32(RXDCTL(0)); 1515 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1516 1517 netif_stop_queue(netdev); 1518 1519 /* disable transmits in the hardware */ 1520 txdctl = er32(TXDCTL(0)); 1521 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1522 1523 /* flush both disables and wait for them to finish */ 1524 e1e_flush(); 1525 msleep(10); 1526 1527 napi_disable(&adapter->rx_ring->napi); 1528 1529 igbvf_irq_disable(adapter); 1530 1531 del_timer_sync(&adapter->watchdog_timer); 1532 1533 netif_carrier_off(netdev); 1534 1535 /* record the stats before reset*/ 1536 igbvf_update_stats(adapter); 1537 1538 adapter->link_speed = 0; 1539 adapter->link_duplex = 0; 1540 1541 igbvf_reset(adapter); 1542 igbvf_clean_tx_ring(adapter->tx_ring); 1543 igbvf_clean_rx_ring(adapter->rx_ring); 1544 } 1545 1546 void igbvf_reinit_locked(struct igbvf_adapter *adapter) 1547 { 1548 might_sleep(); 1549 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 1550 msleep(1); 1551 igbvf_down(adapter); 1552 igbvf_up(adapter); 1553 clear_bit(__IGBVF_RESETTING, &adapter->state); 1554 } 1555 1556 /** 1557 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) 1558 * @adapter: board private structure to initialize 1559 * 1560 * igbvf_sw_init initializes the Adapter private data structure. 1561 * Fields are initialized based on PCI device information and 1562 * OS network device settings (MTU size). 1563 **/ 1564 static int igbvf_sw_init(struct igbvf_adapter *adapter) 1565 { 1566 struct net_device *netdev = adapter->netdev; 1567 s32 rc; 1568 1569 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 1570 adapter->rx_ps_hdr_size = 0; 1571 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1572 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1573 1574 adapter->tx_int_delay = 8; 1575 adapter->tx_abs_int_delay = 32; 1576 adapter->rx_int_delay = 0; 1577 adapter->rx_abs_int_delay = 8; 1578 adapter->requested_itr = 3; 1579 adapter->current_itr = IGBVF_START_ITR; 1580 1581 /* Set various function pointers */ 1582 adapter->ei->init_ops(&adapter->hw); 1583 1584 rc = adapter->hw.mac.ops.init_params(&adapter->hw); 1585 if (rc) 1586 return rc; 1587 1588 rc = adapter->hw.mbx.ops.init_params(&adapter->hw); 1589 if (rc) 1590 return rc; 1591 1592 igbvf_set_interrupt_capability(adapter); 1593 1594 if (igbvf_alloc_queues(adapter)) 1595 return -ENOMEM; 1596 1597 spin_lock_init(&adapter->tx_queue_lock); 1598 1599 /* Explicitly disable IRQ since the NIC can be in any state. */ 1600 igbvf_irq_disable(adapter); 1601 1602 spin_lock_init(&adapter->stats_lock); 1603 1604 set_bit(__IGBVF_DOWN, &adapter->state); 1605 return 0; 1606 } 1607 1608 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) 1609 { 1610 struct e1000_hw *hw = &adapter->hw; 1611 1612 adapter->stats.last_gprc = er32(VFGPRC); 1613 adapter->stats.last_gorc = er32(VFGORC); 1614 adapter->stats.last_gptc = er32(VFGPTC); 1615 adapter->stats.last_gotc = er32(VFGOTC); 1616 adapter->stats.last_mprc = er32(VFMPRC); 1617 adapter->stats.last_gotlbc = er32(VFGOTLBC); 1618 adapter->stats.last_gptlbc = er32(VFGPTLBC); 1619 adapter->stats.last_gorlbc = er32(VFGORLBC); 1620 adapter->stats.last_gprlbc = er32(VFGPRLBC); 1621 1622 adapter->stats.base_gprc = er32(VFGPRC); 1623 adapter->stats.base_gorc = er32(VFGORC); 1624 adapter->stats.base_gptc = er32(VFGPTC); 1625 adapter->stats.base_gotc = er32(VFGOTC); 1626 adapter->stats.base_mprc = er32(VFMPRC); 1627 adapter->stats.base_gotlbc = er32(VFGOTLBC); 1628 adapter->stats.base_gptlbc = er32(VFGPTLBC); 1629 adapter->stats.base_gorlbc = er32(VFGORLBC); 1630 adapter->stats.base_gprlbc = er32(VFGPRLBC); 1631 } 1632 1633 /** 1634 * igbvf_open - Called when a network interface is made active 1635 * @netdev: network interface device structure 1636 * 1637 * Returns 0 on success, negative value on failure 1638 * 1639 * The open entry point is called when a network interface is made 1640 * active by the system (IFF_UP). At this point all resources needed 1641 * for transmit and receive operations are allocated, the interrupt 1642 * handler is registered with the OS, the watchdog timer is started, 1643 * and the stack is notified that the interface is ready. 1644 **/ 1645 static int igbvf_open(struct net_device *netdev) 1646 { 1647 struct igbvf_adapter *adapter = netdev_priv(netdev); 1648 struct e1000_hw *hw = &adapter->hw; 1649 int err; 1650 1651 /* disallow open during test */ 1652 if (test_bit(__IGBVF_TESTING, &adapter->state)) 1653 return -EBUSY; 1654 1655 /* allocate transmit descriptors */ 1656 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); 1657 if (err) 1658 goto err_setup_tx; 1659 1660 /* allocate receive descriptors */ 1661 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); 1662 if (err) 1663 goto err_setup_rx; 1664 1665 /* 1666 * before we allocate an interrupt, we must be ready to handle it. 1667 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1668 * as soon as we call pci_request_irq, so we have to setup our 1669 * clean_rx handler before we do so. 1670 */ 1671 igbvf_configure(adapter); 1672 1673 err = igbvf_request_irq(adapter); 1674 if (err) 1675 goto err_req_irq; 1676 1677 /* From here on the code is the same as igbvf_up() */ 1678 clear_bit(__IGBVF_DOWN, &adapter->state); 1679 1680 napi_enable(&adapter->rx_ring->napi); 1681 1682 /* clear any pending interrupts */ 1683 er32(EICR); 1684 1685 igbvf_irq_enable(adapter); 1686 1687 /* start the watchdog */ 1688 hw->mac.get_link_status = 1; 1689 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1690 1691 return 0; 1692 1693 err_req_irq: 1694 igbvf_free_rx_resources(adapter->rx_ring); 1695 err_setup_rx: 1696 igbvf_free_tx_resources(adapter->tx_ring); 1697 err_setup_tx: 1698 igbvf_reset(adapter); 1699 1700 return err; 1701 } 1702 1703 /** 1704 * igbvf_close - Disables a network interface 1705 * @netdev: network interface device structure 1706 * 1707 * Returns 0, this is not allowed to fail 1708 * 1709 * The close entry point is called when an interface is de-activated 1710 * by the OS. The hardware is still under the drivers control, but 1711 * needs to be disabled. A global MAC reset is issued to stop the 1712 * hardware, and all transmit and receive resources are freed. 1713 **/ 1714 static int igbvf_close(struct net_device *netdev) 1715 { 1716 struct igbvf_adapter *adapter = netdev_priv(netdev); 1717 1718 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 1719 igbvf_down(adapter); 1720 1721 igbvf_free_irq(adapter); 1722 1723 igbvf_free_tx_resources(adapter->tx_ring); 1724 igbvf_free_rx_resources(adapter->rx_ring); 1725 1726 return 0; 1727 } 1728 /** 1729 * igbvf_set_mac - Change the Ethernet Address of the NIC 1730 * @netdev: network interface device structure 1731 * @p: pointer to an address structure 1732 * 1733 * Returns 0 on success, negative on failure 1734 **/ 1735 static int igbvf_set_mac(struct net_device *netdev, void *p) 1736 { 1737 struct igbvf_adapter *adapter = netdev_priv(netdev); 1738 struct e1000_hw *hw = &adapter->hw; 1739 struct sockaddr *addr = p; 1740 1741 if (!is_valid_ether_addr(addr->sa_data)) 1742 return -EADDRNOTAVAIL; 1743 1744 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 1745 1746 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 1747 1748 if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) 1749 return -EADDRNOTAVAIL; 1750 1751 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1752 1753 return 0; 1754 } 1755 1756 #define UPDATE_VF_COUNTER(reg, name) \ 1757 { \ 1758 u32 current_counter = er32(reg); \ 1759 if (current_counter < adapter->stats.last_##name) \ 1760 adapter->stats.name += 0x100000000LL; \ 1761 adapter->stats.last_##name = current_counter; \ 1762 adapter->stats.name &= 0xFFFFFFFF00000000LL; \ 1763 adapter->stats.name |= current_counter; \ 1764 } 1765 1766 /** 1767 * igbvf_update_stats - Update the board statistics counters 1768 * @adapter: board private structure 1769 **/ 1770 void igbvf_update_stats(struct igbvf_adapter *adapter) 1771 { 1772 struct e1000_hw *hw = &adapter->hw; 1773 struct pci_dev *pdev = adapter->pdev; 1774 1775 /* 1776 * Prevent stats update while adapter is being reset, link is down 1777 * or if the pci connection is down. 1778 */ 1779 if (adapter->link_speed == 0) 1780 return; 1781 1782 if (test_bit(__IGBVF_RESETTING, &adapter->state)) 1783 return; 1784 1785 if (pci_channel_offline(pdev)) 1786 return; 1787 1788 UPDATE_VF_COUNTER(VFGPRC, gprc); 1789 UPDATE_VF_COUNTER(VFGORC, gorc); 1790 UPDATE_VF_COUNTER(VFGPTC, gptc); 1791 UPDATE_VF_COUNTER(VFGOTC, gotc); 1792 UPDATE_VF_COUNTER(VFMPRC, mprc); 1793 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); 1794 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); 1795 UPDATE_VF_COUNTER(VFGORLBC, gorlbc); 1796 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); 1797 1798 /* Fill out the OS statistics structure */ 1799 adapter->net_stats.multicast = adapter->stats.mprc; 1800 } 1801 1802 static void igbvf_print_link_info(struct igbvf_adapter *adapter) 1803 { 1804 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", 1805 adapter->link_speed, 1806 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); 1807 } 1808 1809 static bool igbvf_has_link(struct igbvf_adapter *adapter) 1810 { 1811 struct e1000_hw *hw = &adapter->hw; 1812 s32 ret_val = E1000_SUCCESS; 1813 bool link_active; 1814 1815 /* If interface is down, stay link down */ 1816 if (test_bit(__IGBVF_DOWN, &adapter->state)) 1817 return false; 1818 1819 ret_val = hw->mac.ops.check_for_link(hw); 1820 link_active = !hw->mac.get_link_status; 1821 1822 /* if check for link returns error we will need to reset */ 1823 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) 1824 schedule_work(&adapter->reset_task); 1825 1826 return link_active; 1827 } 1828 1829 /** 1830 * igbvf_watchdog - Timer Call-back 1831 * @data: pointer to adapter cast into an unsigned long 1832 **/ 1833 static void igbvf_watchdog(unsigned long data) 1834 { 1835 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; 1836 1837 /* Do the rest outside of interrupt context */ 1838 schedule_work(&adapter->watchdog_task); 1839 } 1840 1841 static void igbvf_watchdog_task(struct work_struct *work) 1842 { 1843 struct igbvf_adapter *adapter = container_of(work, 1844 struct igbvf_adapter, 1845 watchdog_task); 1846 struct net_device *netdev = adapter->netdev; 1847 struct e1000_mac_info *mac = &adapter->hw.mac; 1848 struct igbvf_ring *tx_ring = adapter->tx_ring; 1849 struct e1000_hw *hw = &adapter->hw; 1850 u32 link; 1851 int tx_pending = 0; 1852 1853 link = igbvf_has_link(adapter); 1854 1855 if (link) { 1856 if (!netif_carrier_ok(netdev)) { 1857 mac->ops.get_link_up_info(&adapter->hw, 1858 &adapter->link_speed, 1859 &adapter->link_duplex); 1860 igbvf_print_link_info(adapter); 1861 1862 netif_carrier_on(netdev); 1863 netif_wake_queue(netdev); 1864 } 1865 } else { 1866 if (netif_carrier_ok(netdev)) { 1867 adapter->link_speed = 0; 1868 adapter->link_duplex = 0; 1869 dev_info(&adapter->pdev->dev, "Link is Down\n"); 1870 netif_carrier_off(netdev); 1871 netif_stop_queue(netdev); 1872 } 1873 } 1874 1875 if (netif_carrier_ok(netdev)) { 1876 igbvf_update_stats(adapter); 1877 } else { 1878 tx_pending = (igbvf_desc_unused(tx_ring) + 1 < 1879 tx_ring->count); 1880 if (tx_pending) { 1881 /* 1882 * We've lost link, so the controller stops DMA, 1883 * but we've got queued Tx work that's never going 1884 * to get done, so reset controller to flush Tx. 1885 * (Do the reset outside of interrupt context). 1886 */ 1887 adapter->tx_timeout_count++; 1888 schedule_work(&adapter->reset_task); 1889 } 1890 } 1891 1892 /* Cause software interrupt to ensure Rx ring is cleaned */ 1893 ew32(EICS, adapter->rx_ring->eims_value); 1894 1895 /* Reset the timer */ 1896 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1897 mod_timer(&adapter->watchdog_timer, 1898 round_jiffies(jiffies + (2 * HZ))); 1899 } 1900 1901 #define IGBVF_TX_FLAGS_CSUM 0x00000001 1902 #define IGBVF_TX_FLAGS_VLAN 0x00000002 1903 #define IGBVF_TX_FLAGS_TSO 0x00000004 1904 #define IGBVF_TX_FLAGS_IPV4 0x00000008 1905 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 1906 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 1907 1908 static int igbvf_tso(struct igbvf_adapter *adapter, 1909 struct igbvf_ring *tx_ring, 1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, 1911 __be16 protocol) 1912 { 1913 struct e1000_adv_tx_context_desc *context_desc; 1914 struct igbvf_buffer *buffer_info; 1915 u32 info = 0, tu_cmd = 0; 1916 u32 mss_l4len_idx, l4len; 1917 unsigned int i; 1918 int err; 1919 1920 *hdr_len = 0; 1921 1922 err = skb_cow_head(skb, 0); 1923 if (err < 0) { 1924 dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); 1925 return err; 1926 } 1927 1928 l4len = tcp_hdrlen(skb); 1929 *hdr_len += l4len; 1930 1931 if (protocol == htons(ETH_P_IP)) { 1932 struct iphdr *iph = ip_hdr(skb); 1933 iph->tot_len = 0; 1934 iph->check = 0; 1935 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1936 iph->daddr, 0, 1937 IPPROTO_TCP, 1938 0); 1939 } else if (skb_is_gso_v6(skb)) { 1940 ipv6_hdr(skb)->payload_len = 0; 1941 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1942 &ipv6_hdr(skb)->daddr, 1943 0, IPPROTO_TCP, 0); 1944 } 1945 1946 i = tx_ring->next_to_use; 1947 1948 buffer_info = &tx_ring->buffer_info[i]; 1949 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 1950 /* VLAN MACLEN IPLEN */ 1951 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 1952 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 1953 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 1954 *hdr_len += skb_network_offset(skb); 1955 info |= (skb_transport_header(skb) - skb_network_header(skb)); 1956 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); 1957 context_desc->vlan_macip_lens = cpu_to_le32(info); 1958 1959 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1960 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1961 1962 if (protocol == htons(ETH_P_IP)) 1963 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1964 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1965 1966 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 1967 1968 /* MSS L4LEN IDX */ 1969 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 1970 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 1971 1972 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1973 context_desc->seqnum_seed = 0; 1974 1975 buffer_info->time_stamp = jiffies; 1976 buffer_info->dma = 0; 1977 i++; 1978 if (i == tx_ring->count) 1979 i = 0; 1980 1981 tx_ring->next_to_use = i; 1982 1983 return true; 1984 } 1985 1986 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1987 struct igbvf_ring *tx_ring, 1988 struct sk_buff *skb, u32 tx_flags, 1989 __be16 protocol) 1990 { 1991 struct e1000_adv_tx_context_desc *context_desc; 1992 unsigned int i; 1993 struct igbvf_buffer *buffer_info; 1994 u32 info = 0, tu_cmd = 0; 1995 1996 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 1997 (tx_flags & IGBVF_TX_FLAGS_VLAN)) { 1998 i = tx_ring->next_to_use; 1999 buffer_info = &tx_ring->buffer_info[i]; 2000 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 2001 2002 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 2003 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 2004 2005 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 2006 if (skb->ip_summed == CHECKSUM_PARTIAL) 2007 info |= (skb_transport_header(skb) - 2008 skb_network_header(skb)); 2009 2010 2011 context_desc->vlan_macip_lens = cpu_to_le32(info); 2012 2013 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 2014 2015 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2016 switch (protocol) { 2017 case htons(ETH_P_IP): 2018 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2019 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2020 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2021 break; 2022 case htons(ETH_P_IPV6): 2023 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2024 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2025 break; 2026 default: 2027 break; 2028 } 2029 } 2030 2031 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 2032 context_desc->seqnum_seed = 0; 2033 context_desc->mss_l4len_idx = 0; 2034 2035 buffer_info->time_stamp = jiffies; 2036 buffer_info->dma = 0; 2037 i++; 2038 if (i == tx_ring->count) 2039 i = 0; 2040 tx_ring->next_to_use = i; 2041 2042 return true; 2043 } 2044 2045 return false; 2046 } 2047 2048 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) 2049 { 2050 struct igbvf_adapter *adapter = netdev_priv(netdev); 2051 2052 /* there is enough descriptors then we don't need to worry */ 2053 if (igbvf_desc_unused(adapter->tx_ring) >= size) 2054 return 0; 2055 2056 netif_stop_queue(netdev); 2057 2058 smp_mb(); 2059 2060 /* We need to check again just in case room has been made available */ 2061 if (igbvf_desc_unused(adapter->tx_ring) < size) 2062 return -EBUSY; 2063 2064 netif_wake_queue(netdev); 2065 2066 ++adapter->restart_queue; 2067 return 0; 2068 } 2069 2070 #define IGBVF_MAX_TXD_PWR 16 2071 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) 2072 2073 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, 2074 struct igbvf_ring *tx_ring, 2075 struct sk_buff *skb) 2076 { 2077 struct igbvf_buffer *buffer_info; 2078 struct pci_dev *pdev = adapter->pdev; 2079 unsigned int len = skb_headlen(skb); 2080 unsigned int count = 0, i; 2081 unsigned int f; 2082 2083 i = tx_ring->next_to_use; 2084 2085 buffer_info = &tx_ring->buffer_info[i]; 2086 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2087 buffer_info->length = len; 2088 /* set time_stamp *before* dma to help avoid a possible race */ 2089 buffer_info->time_stamp = jiffies; 2090 buffer_info->mapped_as_page = false; 2091 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, 2092 DMA_TO_DEVICE); 2093 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2094 goto dma_error; 2095 2096 2097 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2098 const struct skb_frag_struct *frag; 2099 2100 count++; 2101 i++; 2102 if (i == tx_ring->count) 2103 i = 0; 2104 2105 frag = &skb_shinfo(skb)->frags[f]; 2106 len = skb_frag_size(frag); 2107 2108 buffer_info = &tx_ring->buffer_info[i]; 2109 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2110 buffer_info->length = len; 2111 buffer_info->time_stamp = jiffies; 2112 buffer_info->mapped_as_page = true; 2113 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, 2114 DMA_TO_DEVICE); 2115 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2116 goto dma_error; 2117 } 2118 2119 tx_ring->buffer_info[i].skb = skb; 2120 2121 return ++count; 2122 2123 dma_error: 2124 dev_err(&pdev->dev, "TX DMA map failed\n"); 2125 2126 /* clear timestamp and dma mappings for failed buffer_info mapping */ 2127 buffer_info->dma = 0; 2128 buffer_info->time_stamp = 0; 2129 buffer_info->length = 0; 2130 buffer_info->mapped_as_page = false; 2131 if (count) 2132 count--; 2133 2134 /* clear timestamp and dma mappings for remaining portion of packet */ 2135 while (count--) { 2136 if (i==0) 2137 i += tx_ring->count; 2138 i--; 2139 buffer_info = &tx_ring->buffer_info[i]; 2140 igbvf_put_txbuf(adapter, buffer_info); 2141 } 2142 2143 return 0; 2144 } 2145 2146 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, 2147 struct igbvf_ring *tx_ring, 2148 int tx_flags, int count, 2149 unsigned int first, u32 paylen, 2150 u8 hdr_len) 2151 { 2152 union e1000_adv_tx_desc *tx_desc = NULL; 2153 struct igbvf_buffer *buffer_info; 2154 u32 olinfo_status = 0, cmd_type_len; 2155 unsigned int i; 2156 2157 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 2158 E1000_ADVTXD_DCMD_DEXT); 2159 2160 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 2161 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 2162 2163 if (tx_flags & IGBVF_TX_FLAGS_TSO) { 2164 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 2165 2166 /* insert tcp checksum */ 2167 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2168 2169 /* insert ip checksum */ 2170 if (tx_flags & IGBVF_TX_FLAGS_IPV4) 2171 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 2172 2173 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { 2174 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2175 } 2176 2177 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 2178 2179 i = tx_ring->next_to_use; 2180 while (count--) { 2181 buffer_info = &tx_ring->buffer_info[i]; 2182 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 2183 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 2184 tx_desc->read.cmd_type_len = 2185 cpu_to_le32(cmd_type_len | buffer_info->length); 2186 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2187 i++; 2188 if (i == tx_ring->count) 2189 i = 0; 2190 } 2191 2192 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 2193 /* Force memory writes to complete before letting h/w 2194 * know there are new descriptors to fetch. (Only 2195 * applicable for weak-ordered memory model archs, 2196 * such as IA-64). */ 2197 wmb(); 2198 2199 tx_ring->buffer_info[first].next_to_watch = tx_desc; 2200 tx_ring->next_to_use = i; 2201 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2202 /* we need this if more than one processor can write to our tail 2203 * at a time, it syncronizes IO on IA64/Altix systems */ 2204 mmiowb(); 2205 } 2206 2207 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, 2208 struct net_device *netdev, 2209 struct igbvf_ring *tx_ring) 2210 { 2211 struct igbvf_adapter *adapter = netdev_priv(netdev); 2212 unsigned int first, tx_flags = 0; 2213 u8 hdr_len = 0; 2214 int count = 0; 2215 int tso = 0; 2216 __be16 protocol = vlan_get_protocol(skb); 2217 2218 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2219 dev_kfree_skb_any(skb); 2220 return NETDEV_TX_OK; 2221 } 2222 2223 if (skb->len <= 0) { 2224 dev_kfree_skb_any(skb); 2225 return NETDEV_TX_OK; 2226 } 2227 2228 /* 2229 * need: count + 4 desc gap to keep tail from touching 2230 * + 2 desc gap to keep tail from touching head, 2231 * + 1 desc for skb->data, 2232 * + 1 desc for context descriptor, 2233 * head, otherwise try next time 2234 */ 2235 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { 2236 /* this is a hard error */ 2237 return NETDEV_TX_BUSY; 2238 } 2239 2240 if (skb_vlan_tag_present(skb)) { 2241 tx_flags |= IGBVF_TX_FLAGS_VLAN; 2242 tx_flags |= (skb_vlan_tag_get(skb) << 2243 IGBVF_TX_FLAGS_VLAN_SHIFT); 2244 } 2245 2246 if (protocol == htons(ETH_P_IP)) 2247 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2248 2249 first = tx_ring->next_to_use; 2250 2251 tso = skb_is_gso(skb) ? 2252 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0; 2253 if (unlikely(tso < 0)) { 2254 dev_kfree_skb_any(skb); 2255 return NETDEV_TX_OK; 2256 } 2257 2258 if (tso) 2259 tx_flags |= IGBVF_TX_FLAGS_TSO; 2260 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && 2261 (skb->ip_summed == CHECKSUM_PARTIAL)) 2262 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2263 2264 /* 2265 * count reflects descriptors mapped, if 0 then mapping error 2266 * has occurred and we need to rewind the descriptor queue 2267 */ 2268 count = igbvf_tx_map_adv(adapter, tx_ring, skb); 2269 2270 if (count) { 2271 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, 2272 first, skb->len, hdr_len); 2273 /* Make sure there is space in the ring for the next send. */ 2274 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); 2275 } else { 2276 dev_kfree_skb_any(skb); 2277 tx_ring->buffer_info[first].time_stamp = 0; 2278 tx_ring->next_to_use = first; 2279 } 2280 2281 return NETDEV_TX_OK; 2282 } 2283 2284 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb, 2285 struct net_device *netdev) 2286 { 2287 struct igbvf_adapter *adapter = netdev_priv(netdev); 2288 struct igbvf_ring *tx_ring; 2289 2290 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2291 dev_kfree_skb_any(skb); 2292 return NETDEV_TX_OK; 2293 } 2294 2295 tx_ring = &adapter->tx_ring[0]; 2296 2297 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); 2298 } 2299 2300 /** 2301 * igbvf_tx_timeout - Respond to a Tx Hang 2302 * @netdev: network interface device structure 2303 **/ 2304 static void igbvf_tx_timeout(struct net_device *netdev) 2305 { 2306 struct igbvf_adapter *adapter = netdev_priv(netdev); 2307 2308 /* Do the reset outside of interrupt context */ 2309 adapter->tx_timeout_count++; 2310 schedule_work(&adapter->reset_task); 2311 } 2312 2313 static void igbvf_reset_task(struct work_struct *work) 2314 { 2315 struct igbvf_adapter *adapter; 2316 adapter = container_of(work, struct igbvf_adapter, reset_task); 2317 2318 igbvf_reinit_locked(adapter); 2319 } 2320 2321 /** 2322 * igbvf_get_stats - Get System Network Statistics 2323 * @netdev: network interface device structure 2324 * 2325 * Returns the address of the device statistics structure. 2326 * The statistics are actually updated from the timer callback. 2327 **/ 2328 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) 2329 { 2330 struct igbvf_adapter *adapter = netdev_priv(netdev); 2331 2332 /* only return the current stats */ 2333 return &adapter->net_stats; 2334 } 2335 2336 /** 2337 * igbvf_change_mtu - Change the Maximum Transfer Unit 2338 * @netdev: network interface device structure 2339 * @new_mtu: new value for maximum frame size 2340 * 2341 * Returns 0 on success, negative on failure 2342 **/ 2343 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) 2344 { 2345 struct igbvf_adapter *adapter = netdev_priv(netdev); 2346 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2347 2348 if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN || 2349 max_frame > MAX_JUMBO_FRAME_SIZE) 2350 return -EINVAL; 2351 2352 #define MAX_STD_JUMBO_FRAME_SIZE 9234 2353 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 2354 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 2355 return -EINVAL; 2356 } 2357 2358 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 2359 msleep(1); 2360 /* igbvf_down has a dependency on max_frame_size */ 2361 adapter->max_frame_size = max_frame; 2362 if (netif_running(netdev)) 2363 igbvf_down(adapter); 2364 2365 /* 2366 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 2367 * means we reserve 2 more, this pushes us to allocate from the next 2368 * larger slab size. 2369 * i.e. RXBUFFER_2048 --> size-4096 slab 2370 * However with the new *_jumbo_rx* routines, jumbo receives will use 2371 * fragmented skbs 2372 */ 2373 2374 if (max_frame <= 1024) 2375 adapter->rx_buffer_len = 1024; 2376 else if (max_frame <= 2048) 2377 adapter->rx_buffer_len = 2048; 2378 else 2379 #if (PAGE_SIZE / 2) > 16384 2380 adapter->rx_buffer_len = 16384; 2381 #else 2382 adapter->rx_buffer_len = PAGE_SIZE / 2; 2383 #endif 2384 2385 2386 /* adjust allocation if LPE protects us, and we aren't using SBP */ 2387 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 2388 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 2389 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + 2390 ETH_FCS_LEN; 2391 2392 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 2393 netdev->mtu, new_mtu); 2394 netdev->mtu = new_mtu; 2395 2396 if (netif_running(netdev)) 2397 igbvf_up(adapter); 2398 else 2399 igbvf_reset(adapter); 2400 2401 clear_bit(__IGBVF_RESETTING, &adapter->state); 2402 2403 return 0; 2404 } 2405 2406 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2407 { 2408 switch (cmd) { 2409 default: 2410 return -EOPNOTSUPP; 2411 } 2412 } 2413 2414 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) 2415 { 2416 struct net_device *netdev = pci_get_drvdata(pdev); 2417 struct igbvf_adapter *adapter = netdev_priv(netdev); 2418 #ifdef CONFIG_PM 2419 int retval = 0; 2420 #endif 2421 2422 netif_device_detach(netdev); 2423 2424 if (netif_running(netdev)) { 2425 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 2426 igbvf_down(adapter); 2427 igbvf_free_irq(adapter); 2428 } 2429 2430 #ifdef CONFIG_PM 2431 retval = pci_save_state(pdev); 2432 if (retval) 2433 return retval; 2434 #endif 2435 2436 pci_disable_device(pdev); 2437 2438 return 0; 2439 } 2440 2441 #ifdef CONFIG_PM 2442 static int igbvf_resume(struct pci_dev *pdev) 2443 { 2444 struct net_device *netdev = pci_get_drvdata(pdev); 2445 struct igbvf_adapter *adapter = netdev_priv(netdev); 2446 u32 err; 2447 2448 pci_restore_state(pdev); 2449 err = pci_enable_device_mem(pdev); 2450 if (err) { 2451 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 2452 return err; 2453 } 2454 2455 pci_set_master(pdev); 2456 2457 if (netif_running(netdev)) { 2458 err = igbvf_request_irq(adapter); 2459 if (err) 2460 return err; 2461 } 2462 2463 igbvf_reset(adapter); 2464 2465 if (netif_running(netdev)) 2466 igbvf_up(adapter); 2467 2468 netif_device_attach(netdev); 2469 2470 return 0; 2471 } 2472 #endif 2473 2474 static void igbvf_shutdown(struct pci_dev *pdev) 2475 { 2476 igbvf_suspend(pdev, PMSG_SUSPEND); 2477 } 2478 2479 #ifdef CONFIG_NET_POLL_CONTROLLER 2480 /* 2481 * Polling 'interrupt' - used by things like netconsole to send skbs 2482 * without having to re-enable interrupts. It's not called while 2483 * the interrupt routine is executing. 2484 */ 2485 static void igbvf_netpoll(struct net_device *netdev) 2486 { 2487 struct igbvf_adapter *adapter = netdev_priv(netdev); 2488 2489 disable_irq(adapter->pdev->irq); 2490 2491 igbvf_clean_tx_irq(adapter->tx_ring); 2492 2493 enable_irq(adapter->pdev->irq); 2494 } 2495 #endif 2496 2497 /** 2498 * igbvf_io_error_detected - called when PCI error is detected 2499 * @pdev: Pointer to PCI device 2500 * @state: The current pci connection state 2501 * 2502 * This function is called after a PCI bus error affecting 2503 * this device has been detected. 2504 */ 2505 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, 2506 pci_channel_state_t state) 2507 { 2508 struct net_device *netdev = pci_get_drvdata(pdev); 2509 struct igbvf_adapter *adapter = netdev_priv(netdev); 2510 2511 netif_device_detach(netdev); 2512 2513 if (state == pci_channel_io_perm_failure) 2514 return PCI_ERS_RESULT_DISCONNECT; 2515 2516 if (netif_running(netdev)) 2517 igbvf_down(adapter); 2518 pci_disable_device(pdev); 2519 2520 /* Request a slot slot reset. */ 2521 return PCI_ERS_RESULT_NEED_RESET; 2522 } 2523 2524 /** 2525 * igbvf_io_slot_reset - called after the pci bus has been reset. 2526 * @pdev: Pointer to PCI device 2527 * 2528 * Restart the card from scratch, as if from a cold-boot. Implementation 2529 * resembles the first-half of the igbvf_resume routine. 2530 */ 2531 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) 2532 { 2533 struct net_device *netdev = pci_get_drvdata(pdev); 2534 struct igbvf_adapter *adapter = netdev_priv(netdev); 2535 2536 if (pci_enable_device_mem(pdev)) { 2537 dev_err(&pdev->dev, 2538 "Cannot re-enable PCI device after reset.\n"); 2539 return PCI_ERS_RESULT_DISCONNECT; 2540 } 2541 pci_set_master(pdev); 2542 2543 igbvf_reset(adapter); 2544 2545 return PCI_ERS_RESULT_RECOVERED; 2546 } 2547 2548 /** 2549 * igbvf_io_resume - called when traffic can start flowing again. 2550 * @pdev: Pointer to PCI device 2551 * 2552 * This callback is called when the error recovery driver tells us that 2553 * its OK to resume normal operation. Implementation resembles the 2554 * second-half of the igbvf_resume routine. 2555 */ 2556 static void igbvf_io_resume(struct pci_dev *pdev) 2557 { 2558 struct net_device *netdev = pci_get_drvdata(pdev); 2559 struct igbvf_adapter *adapter = netdev_priv(netdev); 2560 2561 if (netif_running(netdev)) { 2562 if (igbvf_up(adapter)) { 2563 dev_err(&pdev->dev, 2564 "can't bring device back up after reset\n"); 2565 return; 2566 } 2567 } 2568 2569 netif_device_attach(netdev); 2570 } 2571 2572 static void igbvf_print_device_info(struct igbvf_adapter *adapter) 2573 { 2574 struct e1000_hw *hw = &adapter->hw; 2575 struct net_device *netdev = adapter->netdev; 2576 struct pci_dev *pdev = adapter->pdev; 2577 2578 if (hw->mac.type == e1000_vfadapt_i350) 2579 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n"); 2580 else 2581 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); 2582 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); 2583 } 2584 2585 static int igbvf_set_features(struct net_device *netdev, 2586 netdev_features_t features) 2587 { 2588 struct igbvf_adapter *adapter = netdev_priv(netdev); 2589 2590 if (features & NETIF_F_RXCSUM) 2591 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; 2592 else 2593 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; 2594 2595 return 0; 2596 } 2597 2598 static const struct net_device_ops igbvf_netdev_ops = { 2599 .ndo_open = igbvf_open, 2600 .ndo_stop = igbvf_close, 2601 .ndo_start_xmit = igbvf_xmit_frame, 2602 .ndo_get_stats = igbvf_get_stats, 2603 .ndo_set_rx_mode = igbvf_set_multi, 2604 .ndo_set_mac_address = igbvf_set_mac, 2605 .ndo_change_mtu = igbvf_change_mtu, 2606 .ndo_do_ioctl = igbvf_ioctl, 2607 .ndo_tx_timeout = igbvf_tx_timeout, 2608 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, 2609 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, 2610 #ifdef CONFIG_NET_POLL_CONTROLLER 2611 .ndo_poll_controller = igbvf_netpoll, 2612 #endif 2613 .ndo_set_features = igbvf_set_features, 2614 }; 2615 2616 /** 2617 * igbvf_probe - Device Initialization Routine 2618 * @pdev: PCI device information struct 2619 * @ent: entry in igbvf_pci_tbl 2620 * 2621 * Returns 0 on success, negative on failure 2622 * 2623 * igbvf_probe initializes an adapter identified by a pci_dev structure. 2624 * The OS initialization, configuring of the adapter private structure, 2625 * and a hardware reset occur. 2626 **/ 2627 static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2628 { 2629 struct net_device *netdev; 2630 struct igbvf_adapter *adapter; 2631 struct e1000_hw *hw; 2632 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; 2633 2634 static int cards_found; 2635 int err, pci_using_dac; 2636 2637 err = pci_enable_device_mem(pdev); 2638 if (err) 2639 return err; 2640 2641 pci_using_dac = 0; 2642 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2643 if (!err) { 2644 pci_using_dac = 1; 2645 } else { 2646 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2647 if (err) { 2648 dev_err(&pdev->dev, "No usable DMA " 2649 "configuration, aborting\n"); 2650 goto err_dma; 2651 } 2652 } 2653 2654 err = pci_request_regions(pdev, igbvf_driver_name); 2655 if (err) 2656 goto err_pci_reg; 2657 2658 pci_set_master(pdev); 2659 2660 err = -ENOMEM; 2661 netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); 2662 if (!netdev) 2663 goto err_alloc_etherdev; 2664 2665 SET_NETDEV_DEV(netdev, &pdev->dev); 2666 2667 pci_set_drvdata(pdev, netdev); 2668 adapter = netdev_priv(netdev); 2669 hw = &adapter->hw; 2670 adapter->netdev = netdev; 2671 adapter->pdev = pdev; 2672 adapter->ei = ei; 2673 adapter->pba = ei->pba; 2674 adapter->flags = ei->flags; 2675 adapter->hw.back = adapter; 2676 adapter->hw.mac.type = ei->mac; 2677 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2678 2679 /* PCI config space info */ 2680 2681 hw->vendor_id = pdev->vendor; 2682 hw->device_id = pdev->device; 2683 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2684 hw->subsystem_device_id = pdev->subsystem_device; 2685 hw->revision_id = pdev->revision; 2686 2687 err = -EIO; 2688 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2689 pci_resource_len(pdev, 0)); 2690 2691 if (!adapter->hw.hw_addr) 2692 goto err_ioremap; 2693 2694 if (ei->get_variants) { 2695 err = ei->get_variants(adapter); 2696 if (err) 2697 goto err_get_variants; 2698 } 2699 2700 /* setup adapter struct */ 2701 err = igbvf_sw_init(adapter); 2702 if (err) 2703 goto err_sw_init; 2704 2705 /* construct the net_device struct */ 2706 netdev->netdev_ops = &igbvf_netdev_ops; 2707 2708 igbvf_set_ethtool_ops(netdev); 2709 netdev->watchdog_timeo = 5 * HZ; 2710 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 2711 2712 adapter->bd_number = cards_found++; 2713 2714 netdev->hw_features = NETIF_F_SG | 2715 NETIF_F_IP_CSUM | 2716 NETIF_F_IPV6_CSUM | 2717 NETIF_F_TSO | 2718 NETIF_F_TSO6 | 2719 NETIF_F_RXCSUM; 2720 2721 netdev->features = netdev->hw_features | 2722 NETIF_F_HW_VLAN_CTAG_TX | 2723 NETIF_F_HW_VLAN_CTAG_RX | 2724 NETIF_F_HW_VLAN_CTAG_FILTER; 2725 2726 if (pci_using_dac) 2727 netdev->features |= NETIF_F_HIGHDMA; 2728 2729 netdev->vlan_features |= NETIF_F_TSO; 2730 netdev->vlan_features |= NETIF_F_TSO6; 2731 netdev->vlan_features |= NETIF_F_IP_CSUM; 2732 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 2733 netdev->vlan_features |= NETIF_F_SG; 2734 2735 /*reset the controller to put the device in a known good state */ 2736 err = hw->mac.ops.reset_hw(hw); 2737 if (err) { 2738 dev_info(&pdev->dev, 2739 "PF still in reset state. Is the PF interface up?\n"); 2740 } else { 2741 err = hw->mac.ops.read_mac_addr(hw); 2742 if (err) 2743 dev_info(&pdev->dev, "Error reading MAC address.\n"); 2744 else if (is_zero_ether_addr(adapter->hw.mac.addr)) 2745 dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); 2746 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 2747 netdev->addr_len); 2748 } 2749 2750 if (!is_valid_ether_addr(netdev->dev_addr)) { 2751 dev_info(&pdev->dev, "Assigning random MAC address.\n"); 2752 eth_hw_addr_random(netdev); 2753 memcpy(adapter->hw.mac.addr, netdev->dev_addr, 2754 netdev->addr_len); 2755 } 2756 2757 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, 2758 (unsigned long) adapter); 2759 2760 INIT_WORK(&adapter->reset_task, igbvf_reset_task); 2761 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); 2762 2763 /* ring size defaults */ 2764 adapter->rx_ring->count = 1024; 2765 adapter->tx_ring->count = 1024; 2766 2767 /* reset the hardware with the new settings */ 2768 igbvf_reset(adapter); 2769 2770 /* set hardware-specific flags */ 2771 if (adapter->hw.mac.type == e1000_vfadapt_i350) 2772 adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP; 2773 2774 strcpy(netdev->name, "eth%d"); 2775 err = register_netdev(netdev); 2776 if (err) 2777 goto err_hw_init; 2778 2779 /* tell the stack to leave us alone until igbvf_open() is called */ 2780 netif_carrier_off(netdev); 2781 netif_stop_queue(netdev); 2782 2783 igbvf_print_device_info(adapter); 2784 2785 igbvf_initialize_last_counter_stats(adapter); 2786 2787 return 0; 2788 2789 err_hw_init: 2790 kfree(adapter->tx_ring); 2791 kfree(adapter->rx_ring); 2792 err_sw_init: 2793 igbvf_reset_interrupt_capability(adapter); 2794 err_get_variants: 2795 iounmap(adapter->hw.hw_addr); 2796 err_ioremap: 2797 free_netdev(netdev); 2798 err_alloc_etherdev: 2799 pci_release_regions(pdev); 2800 err_pci_reg: 2801 err_dma: 2802 pci_disable_device(pdev); 2803 return err; 2804 } 2805 2806 /** 2807 * igbvf_remove - Device Removal Routine 2808 * @pdev: PCI device information struct 2809 * 2810 * igbvf_remove is called by the PCI subsystem to alert the driver 2811 * that it should release a PCI device. The could be caused by a 2812 * Hot-Plug event, or because the driver is going to be removed from 2813 * memory. 2814 **/ 2815 static void igbvf_remove(struct pci_dev *pdev) 2816 { 2817 struct net_device *netdev = pci_get_drvdata(pdev); 2818 struct igbvf_adapter *adapter = netdev_priv(netdev); 2819 struct e1000_hw *hw = &adapter->hw; 2820 2821 /* 2822 * The watchdog timer may be rescheduled, so explicitly 2823 * disable it from being rescheduled. 2824 */ 2825 set_bit(__IGBVF_DOWN, &adapter->state); 2826 del_timer_sync(&adapter->watchdog_timer); 2827 2828 cancel_work_sync(&adapter->reset_task); 2829 cancel_work_sync(&adapter->watchdog_task); 2830 2831 unregister_netdev(netdev); 2832 2833 igbvf_reset_interrupt_capability(adapter); 2834 2835 /* 2836 * it is important to delete the napi struct prior to freeing the 2837 * rx ring so that you do not end up with null pointer refs 2838 */ 2839 netif_napi_del(&adapter->rx_ring->napi); 2840 kfree(adapter->tx_ring); 2841 kfree(adapter->rx_ring); 2842 2843 iounmap(hw->hw_addr); 2844 if (hw->flash_address) 2845 iounmap(hw->flash_address); 2846 pci_release_regions(pdev); 2847 2848 free_netdev(netdev); 2849 2850 pci_disable_device(pdev); 2851 } 2852 2853 /* PCI Error Recovery (ERS) */ 2854 static const struct pci_error_handlers igbvf_err_handler = { 2855 .error_detected = igbvf_io_error_detected, 2856 .slot_reset = igbvf_io_slot_reset, 2857 .resume = igbvf_io_resume, 2858 }; 2859 2860 static const struct pci_device_id igbvf_pci_tbl[] = { 2861 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, 2862 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, 2863 { } /* terminate list */ 2864 }; 2865 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); 2866 2867 /* PCI Device API Driver */ 2868 static struct pci_driver igbvf_driver = { 2869 .name = igbvf_driver_name, 2870 .id_table = igbvf_pci_tbl, 2871 .probe = igbvf_probe, 2872 .remove = igbvf_remove, 2873 #ifdef CONFIG_PM 2874 /* Power Management Hooks */ 2875 .suspend = igbvf_suspend, 2876 .resume = igbvf_resume, 2877 #endif 2878 .shutdown = igbvf_shutdown, 2879 .err_handler = &igbvf_err_handler 2880 }; 2881 2882 /** 2883 * igbvf_init_module - Driver Registration Routine 2884 * 2885 * igbvf_init_module is the first routine called when the driver is 2886 * loaded. All it does is register with the PCI subsystem. 2887 **/ 2888 static int __init igbvf_init_module(void) 2889 { 2890 int ret; 2891 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); 2892 pr_info("%s\n", igbvf_copyright); 2893 2894 ret = pci_register_driver(&igbvf_driver); 2895 2896 return ret; 2897 } 2898 module_init(igbvf_init_module); 2899 2900 /** 2901 * igbvf_exit_module - Driver Exit Cleanup Routine 2902 * 2903 * igbvf_exit_module is called just before the driver is removed 2904 * from memory. 2905 **/ 2906 static void __exit igbvf_exit_module(void) 2907 { 2908 pci_unregister_driver(&igbvf_driver); 2909 } 2910 module_exit(igbvf_exit_module); 2911 2912 2913 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 2914 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); 2915 MODULE_LICENSE("GPL"); 2916 MODULE_VERSION(DRV_VERSION); 2917 2918 /* netdev.c */ 2919