1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/if_vlan.h> 7 #include <linux/aer.h> 8 9 #include "igc.h" 10 #include "igc_hw.h" 11 12 #define DRV_VERSION "0.0.1-k" 13 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 14 15 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 16 17 static int debug = -1; 18 19 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 20 MODULE_DESCRIPTION(DRV_SUMMARY); 21 MODULE_LICENSE("GPL v2"); 22 MODULE_VERSION(DRV_VERSION); 23 module_param(debug, int, 0); 24 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 25 26 char igc_driver_name[] = "igc"; 27 char igc_driver_version[] = DRV_VERSION; 28 static const char igc_driver_string[] = DRV_SUMMARY; 29 static const char igc_copyright[] = 30 "Copyright(c) 2018 Intel Corporation."; 31 32 static const struct igc_info *igc_info_tbl[] = { 33 [board_base] = &igc_base_info, 34 }; 35 36 static const struct pci_device_id igc_pci_tbl[] = { 37 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 38 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 39 /* required last entry */ 40 {0, } 41 }; 42 43 MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 44 45 /* forward declaration */ 46 static void igc_clean_tx_ring(struct igc_ring *tx_ring); 47 static int igc_sw_init(struct igc_adapter *); 48 static void igc_configure(struct igc_adapter *adapter); 49 static void igc_power_down_link(struct igc_adapter *adapter); 50 static void igc_set_default_mac_filter(struct igc_adapter *adapter); 51 static void igc_set_rx_mode(struct net_device *netdev); 52 static void igc_write_itr(struct igc_q_vector *q_vector); 53 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector); 54 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx); 55 static void igc_set_interrupt_capability(struct igc_adapter *adapter, 56 bool msix); 57 static void igc_free_q_vectors(struct igc_adapter *adapter); 58 static void igc_irq_disable(struct igc_adapter *adapter); 59 static void igc_irq_enable(struct igc_adapter *adapter); 60 static void igc_configure_msix(struct igc_adapter *adapter); 61 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 62 struct igc_rx_buffer *bi); 63 64 enum latency_range { 65 lowest_latency = 0, 66 low_latency = 1, 67 bulk_latency = 2, 68 latency_invalid = 255 69 }; 70 71 void igc_reset(struct igc_adapter *adapter) 72 { 73 struct pci_dev *pdev = adapter->pdev; 74 struct igc_hw *hw = &adapter->hw; 75 76 hw->mac.ops.reset_hw(hw); 77 78 if (hw->mac.ops.init_hw(hw)) 79 dev_err(&pdev->dev, "Hardware Error\n"); 80 81 if (!netif_running(adapter->netdev)) 82 igc_power_down_link(adapter); 83 84 igc_get_phy_info(hw); 85 } 86 87 /** 88 * igc_power_up_link - Power up the phy/serdes link 89 * @adapter: address of board private structure 90 */ 91 static void igc_power_up_link(struct igc_adapter *adapter) 92 { 93 igc_reset_phy(&adapter->hw); 94 95 if (adapter->hw.phy.media_type == igc_media_type_copper) 96 igc_power_up_phy_copper(&adapter->hw); 97 98 igc_setup_link(&adapter->hw); 99 } 100 101 /** 102 * igc_power_down_link - Power down the phy/serdes link 103 * @adapter: address of board private structure 104 */ 105 static void igc_power_down_link(struct igc_adapter *adapter) 106 { 107 if (adapter->hw.phy.media_type == igc_media_type_copper) 108 igc_power_down_phy_copper_base(&adapter->hw); 109 } 110 111 /** 112 * igc_release_hw_control - release control of the h/w to f/w 113 * @adapter: address of board private structure 114 * 115 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 116 * For ASF and Pass Through versions of f/w this means that the 117 * driver is no longer loaded. 118 */ 119 static void igc_release_hw_control(struct igc_adapter *adapter) 120 { 121 struct igc_hw *hw = &adapter->hw; 122 u32 ctrl_ext; 123 124 /* Let firmware take over control of h/w */ 125 ctrl_ext = rd32(IGC_CTRL_EXT); 126 wr32(IGC_CTRL_EXT, 127 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 128 } 129 130 /** 131 * igc_get_hw_control - get control of the h/w from f/w 132 * @adapter: address of board private structure 133 * 134 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 135 * For ASF and Pass Through versions of f/w this means that 136 * the driver is loaded. 137 */ 138 static void igc_get_hw_control(struct igc_adapter *adapter) 139 { 140 struct igc_hw *hw = &adapter->hw; 141 u32 ctrl_ext; 142 143 /* Let firmware know the driver has taken over */ 144 ctrl_ext = rd32(IGC_CTRL_EXT); 145 wr32(IGC_CTRL_EXT, 146 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 147 } 148 149 /** 150 * igc_free_tx_resources - Free Tx Resources per Queue 151 * @tx_ring: Tx descriptor ring for a specific queue 152 * 153 * Free all transmit software resources 154 */ 155 void igc_free_tx_resources(struct igc_ring *tx_ring) 156 { 157 igc_clean_tx_ring(tx_ring); 158 159 vfree(tx_ring->tx_buffer_info); 160 tx_ring->tx_buffer_info = NULL; 161 162 /* if not set, then don't free */ 163 if (!tx_ring->desc) 164 return; 165 166 dma_free_coherent(tx_ring->dev, tx_ring->size, 167 tx_ring->desc, tx_ring->dma); 168 169 tx_ring->desc = NULL; 170 } 171 172 /** 173 * igc_free_all_tx_resources - Free Tx Resources for All Queues 174 * @adapter: board private structure 175 * 176 * Free all transmit software resources 177 */ 178 static void igc_free_all_tx_resources(struct igc_adapter *adapter) 179 { 180 int i; 181 182 for (i = 0; i < adapter->num_tx_queues; i++) 183 igc_free_tx_resources(adapter->tx_ring[i]); 184 } 185 186 /** 187 * igc_clean_tx_ring - Free Tx Buffers 188 * @tx_ring: ring to be cleaned 189 */ 190 static void igc_clean_tx_ring(struct igc_ring *tx_ring) 191 { 192 u16 i = tx_ring->next_to_clean; 193 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 194 195 while (i != tx_ring->next_to_use) { 196 union igc_adv_tx_desc *eop_desc, *tx_desc; 197 198 /* Free all the Tx ring sk_buffs */ 199 dev_kfree_skb_any(tx_buffer->skb); 200 201 /* unmap skb header data */ 202 dma_unmap_single(tx_ring->dev, 203 dma_unmap_addr(tx_buffer, dma), 204 dma_unmap_len(tx_buffer, len), 205 DMA_TO_DEVICE); 206 207 /* check for eop_desc to determine the end of the packet */ 208 eop_desc = tx_buffer->next_to_watch; 209 tx_desc = IGC_TX_DESC(tx_ring, i); 210 211 /* unmap remaining buffers */ 212 while (tx_desc != eop_desc) { 213 tx_buffer++; 214 tx_desc++; 215 i++; 216 if (unlikely(i == tx_ring->count)) { 217 i = 0; 218 tx_buffer = tx_ring->tx_buffer_info; 219 tx_desc = IGC_TX_DESC(tx_ring, 0); 220 } 221 222 /* unmap any remaining paged data */ 223 if (dma_unmap_len(tx_buffer, len)) 224 dma_unmap_page(tx_ring->dev, 225 dma_unmap_addr(tx_buffer, dma), 226 dma_unmap_len(tx_buffer, len), 227 DMA_TO_DEVICE); 228 } 229 230 /* move us one more past the eop_desc for start of next pkt */ 231 tx_buffer++; 232 i++; 233 if (unlikely(i == tx_ring->count)) { 234 i = 0; 235 tx_buffer = tx_ring->tx_buffer_info; 236 } 237 } 238 239 /* reset BQL for queue */ 240 netdev_tx_reset_queue(txring_txq(tx_ring)); 241 242 /* reset next_to_use and next_to_clean */ 243 tx_ring->next_to_use = 0; 244 tx_ring->next_to_clean = 0; 245 } 246 247 /** 248 * igc_clean_all_tx_rings - Free Tx Buffers for all queues 249 * @adapter: board private structure 250 */ 251 static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 252 { 253 int i; 254 255 for (i = 0; i < adapter->num_tx_queues; i++) 256 if (adapter->tx_ring[i]) 257 igc_clean_tx_ring(adapter->tx_ring[i]); 258 } 259 260 /** 261 * igc_setup_tx_resources - allocate Tx resources (Descriptors) 262 * @tx_ring: tx descriptor ring (for a specific queue) to setup 263 * 264 * Return 0 on success, negative on failure 265 */ 266 int igc_setup_tx_resources(struct igc_ring *tx_ring) 267 { 268 struct device *dev = tx_ring->dev; 269 int size = 0; 270 271 size = sizeof(struct igc_tx_buffer) * tx_ring->count; 272 tx_ring->tx_buffer_info = vzalloc(size); 273 if (!tx_ring->tx_buffer_info) 274 goto err; 275 276 /* round up to nearest 4K */ 277 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 278 tx_ring->size = ALIGN(tx_ring->size, 4096); 279 280 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 281 &tx_ring->dma, GFP_KERNEL); 282 283 if (!tx_ring->desc) 284 goto err; 285 286 tx_ring->next_to_use = 0; 287 tx_ring->next_to_clean = 0; 288 289 return 0; 290 291 err: 292 vfree(tx_ring->tx_buffer_info); 293 dev_err(dev, 294 "Unable to allocate memory for the transmit descriptor ring\n"); 295 return -ENOMEM; 296 } 297 298 /** 299 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 300 * @adapter: board private structure 301 * 302 * Return 0 on success, negative on failure 303 */ 304 static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 305 { 306 struct pci_dev *pdev = adapter->pdev; 307 int i, err = 0; 308 309 for (i = 0; i < adapter->num_tx_queues; i++) { 310 err = igc_setup_tx_resources(adapter->tx_ring[i]); 311 if (err) { 312 dev_err(&pdev->dev, 313 "Allocation for Tx Queue %u failed\n", i); 314 for (i--; i >= 0; i--) 315 igc_free_tx_resources(adapter->tx_ring[i]); 316 break; 317 } 318 } 319 320 return err; 321 } 322 323 /** 324 * igc_clean_rx_ring - Free Rx Buffers per Queue 325 * @rx_ring: ring to free buffers from 326 */ 327 static void igc_clean_rx_ring(struct igc_ring *rx_ring) 328 { 329 u16 i = rx_ring->next_to_clean; 330 331 if (rx_ring->skb) 332 dev_kfree_skb(rx_ring->skb); 333 rx_ring->skb = NULL; 334 335 /* Free all the Rx ring sk_buffs */ 336 while (i != rx_ring->next_to_alloc) { 337 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 338 339 /* Invalidate cache lines that may have been written to by 340 * device so that we avoid corrupting memory. 341 */ 342 dma_sync_single_range_for_cpu(rx_ring->dev, 343 buffer_info->dma, 344 buffer_info->page_offset, 345 igc_rx_bufsz(rx_ring), 346 DMA_FROM_DEVICE); 347 348 /* free resources associated with mapping */ 349 dma_unmap_page_attrs(rx_ring->dev, 350 buffer_info->dma, 351 igc_rx_pg_size(rx_ring), 352 DMA_FROM_DEVICE, 353 IGC_RX_DMA_ATTR); 354 __page_frag_cache_drain(buffer_info->page, 355 buffer_info->pagecnt_bias); 356 357 i++; 358 if (i == rx_ring->count) 359 i = 0; 360 } 361 362 rx_ring->next_to_alloc = 0; 363 rx_ring->next_to_clean = 0; 364 rx_ring->next_to_use = 0; 365 } 366 367 /** 368 * igc_clean_all_rx_rings - Free Rx Buffers for all queues 369 * @adapter: board private structure 370 */ 371 static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 372 { 373 int i; 374 375 for (i = 0; i < adapter->num_rx_queues; i++) 376 if (adapter->rx_ring[i]) 377 igc_clean_rx_ring(adapter->rx_ring[i]); 378 } 379 380 /** 381 * igc_free_rx_resources - Free Rx Resources 382 * @rx_ring: ring to clean the resources from 383 * 384 * Free all receive software resources 385 */ 386 void igc_free_rx_resources(struct igc_ring *rx_ring) 387 { 388 igc_clean_rx_ring(rx_ring); 389 390 vfree(rx_ring->rx_buffer_info); 391 rx_ring->rx_buffer_info = NULL; 392 393 /* if not set, then don't free */ 394 if (!rx_ring->desc) 395 return; 396 397 dma_free_coherent(rx_ring->dev, rx_ring->size, 398 rx_ring->desc, rx_ring->dma); 399 400 rx_ring->desc = NULL; 401 } 402 403 /** 404 * igc_free_all_rx_resources - Free Rx Resources for All Queues 405 * @adapter: board private structure 406 * 407 * Free all receive software resources 408 */ 409 static void igc_free_all_rx_resources(struct igc_adapter *adapter) 410 { 411 int i; 412 413 for (i = 0; i < adapter->num_rx_queues; i++) 414 igc_free_rx_resources(adapter->rx_ring[i]); 415 } 416 417 /** 418 * igc_setup_rx_resources - allocate Rx resources (Descriptors) 419 * @rx_ring: rx descriptor ring (for a specific queue) to setup 420 * 421 * Returns 0 on success, negative on failure 422 */ 423 int igc_setup_rx_resources(struct igc_ring *rx_ring) 424 { 425 struct device *dev = rx_ring->dev; 426 int size, desc_len; 427 428 size = sizeof(struct igc_rx_buffer) * rx_ring->count; 429 rx_ring->rx_buffer_info = vzalloc(size); 430 if (!rx_ring->rx_buffer_info) 431 goto err; 432 433 desc_len = sizeof(union igc_adv_rx_desc); 434 435 /* Round up to nearest 4K */ 436 rx_ring->size = rx_ring->count * desc_len; 437 rx_ring->size = ALIGN(rx_ring->size, 4096); 438 439 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 440 &rx_ring->dma, GFP_KERNEL); 441 442 if (!rx_ring->desc) 443 goto err; 444 445 rx_ring->next_to_alloc = 0; 446 rx_ring->next_to_clean = 0; 447 rx_ring->next_to_use = 0; 448 449 return 0; 450 451 err: 452 vfree(rx_ring->rx_buffer_info); 453 rx_ring->rx_buffer_info = NULL; 454 dev_err(dev, 455 "Unable to allocate memory for the receive descriptor ring\n"); 456 return -ENOMEM; 457 } 458 459 /** 460 * igc_setup_all_rx_resources - wrapper to allocate Rx resources 461 * (Descriptors) for all queues 462 * @adapter: board private structure 463 * 464 * Return 0 on success, negative on failure 465 */ 466 static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 467 { 468 struct pci_dev *pdev = adapter->pdev; 469 int i, err = 0; 470 471 for (i = 0; i < adapter->num_rx_queues; i++) { 472 err = igc_setup_rx_resources(adapter->rx_ring[i]); 473 if (err) { 474 dev_err(&pdev->dev, 475 "Allocation for Rx Queue %u failed\n", i); 476 for (i--; i >= 0; i--) 477 igc_free_rx_resources(adapter->rx_ring[i]); 478 break; 479 } 480 } 481 482 return err; 483 } 484 485 /** 486 * igc_configure_rx_ring - Configure a receive ring after Reset 487 * @adapter: board private structure 488 * @ring: receive ring to be configured 489 * 490 * Configure the Rx unit of the MAC after a reset. 491 */ 492 static void igc_configure_rx_ring(struct igc_adapter *adapter, 493 struct igc_ring *ring) 494 { 495 struct igc_hw *hw = &adapter->hw; 496 union igc_adv_rx_desc *rx_desc; 497 int reg_idx = ring->reg_idx; 498 u32 srrctl = 0, rxdctl = 0; 499 u64 rdba = ring->dma; 500 501 /* disable the queue */ 502 wr32(IGC_RXDCTL(reg_idx), 0); 503 504 /* Set DMA base address registers */ 505 wr32(IGC_RDBAL(reg_idx), 506 rdba & 0x00000000ffffffffULL); 507 wr32(IGC_RDBAH(reg_idx), rdba >> 32); 508 wr32(IGC_RDLEN(reg_idx), 509 ring->count * sizeof(union igc_adv_rx_desc)); 510 511 /* initialize head and tail */ 512 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 513 wr32(IGC_RDH(reg_idx), 0); 514 writel(0, ring->tail); 515 516 /* reset next-to- use/clean to place SW in sync with hardware */ 517 ring->next_to_clean = 0; 518 ring->next_to_use = 0; 519 520 /* set descriptor configuration */ 521 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; 522 if (ring_uses_large_buffer(ring)) 523 srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 524 else 525 srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 526 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 527 528 wr32(IGC_SRRCTL(reg_idx), srrctl); 529 530 rxdctl |= IGC_RX_PTHRESH; 531 rxdctl |= IGC_RX_HTHRESH << 8; 532 rxdctl |= IGC_RX_WTHRESH << 16; 533 534 /* initialize rx_buffer_info */ 535 memset(ring->rx_buffer_info, 0, 536 sizeof(struct igc_rx_buffer) * ring->count); 537 538 /* initialize Rx descriptor 0 */ 539 rx_desc = IGC_RX_DESC(ring, 0); 540 rx_desc->wb.upper.length = 0; 541 542 /* enable receive descriptor fetching */ 543 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 544 545 wr32(IGC_RXDCTL(reg_idx), rxdctl); 546 } 547 548 /** 549 * igc_configure_rx - Configure receive Unit after Reset 550 * @adapter: board private structure 551 * 552 * Configure the Rx unit of the MAC after a reset. 553 */ 554 static void igc_configure_rx(struct igc_adapter *adapter) 555 { 556 int i; 557 558 /* Setup the HW Rx Head and Tail Descriptor Pointers and 559 * the Base and Length of the Rx Descriptor Ring 560 */ 561 for (i = 0; i < adapter->num_rx_queues; i++) 562 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 563 } 564 565 /** 566 * igc_configure_tx_ring - Configure transmit ring after Reset 567 * @adapter: board private structure 568 * @ring: tx ring to configure 569 * 570 * Configure a transmit ring after a reset. 571 */ 572 static void igc_configure_tx_ring(struct igc_adapter *adapter, 573 struct igc_ring *ring) 574 { 575 struct igc_hw *hw = &adapter->hw; 576 int reg_idx = ring->reg_idx; 577 u64 tdba = ring->dma; 578 u32 txdctl = 0; 579 580 /* disable the queue */ 581 wr32(IGC_TXDCTL(reg_idx), 0); 582 wrfl(); 583 mdelay(10); 584 585 wr32(IGC_TDLEN(reg_idx), 586 ring->count * sizeof(union igc_adv_tx_desc)); 587 wr32(IGC_TDBAL(reg_idx), 588 tdba & 0x00000000ffffffffULL); 589 wr32(IGC_TDBAH(reg_idx), tdba >> 32); 590 591 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 592 wr32(IGC_TDH(reg_idx), 0); 593 writel(0, ring->tail); 594 595 txdctl |= IGC_TX_PTHRESH; 596 txdctl |= IGC_TX_HTHRESH << 8; 597 txdctl |= IGC_TX_WTHRESH << 16; 598 599 txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 600 wr32(IGC_TXDCTL(reg_idx), txdctl); 601 } 602 603 /** 604 * igc_configure_tx - Configure transmit Unit after Reset 605 * @adapter: board private structure 606 * 607 * Configure the Tx unit of the MAC after a reset. 608 */ 609 static void igc_configure_tx(struct igc_adapter *adapter) 610 { 611 int i; 612 613 for (i = 0; i < adapter->num_tx_queues; i++) 614 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 615 } 616 617 /** 618 * igc_setup_mrqc - configure the multiple receive queue control registers 619 * @adapter: Board private structure 620 */ 621 static void igc_setup_mrqc(struct igc_adapter *adapter) 622 { 623 } 624 625 /** 626 * igc_setup_rctl - configure the receive control registers 627 * @adapter: Board private structure 628 */ 629 static void igc_setup_rctl(struct igc_adapter *adapter) 630 { 631 struct igc_hw *hw = &adapter->hw; 632 u32 rctl; 633 634 rctl = rd32(IGC_RCTL); 635 636 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 637 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 638 639 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 640 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 641 642 /* enable stripping of CRC. Newer features require 643 * that the HW strips the CRC. 644 */ 645 rctl |= IGC_RCTL_SECRC; 646 647 /* disable store bad packets and clear size bits. */ 648 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 649 650 /* enable LPE to allow for reception of jumbo frames */ 651 rctl |= IGC_RCTL_LPE; 652 653 /* disable queue 0 to prevent tail write w/o re-config */ 654 wr32(IGC_RXDCTL(0), 0); 655 656 /* This is useful for sniffing bad packets. */ 657 if (adapter->netdev->features & NETIF_F_RXALL) { 658 /* UPE and MPE will be handled by normal PROMISC logic 659 * in set_rx_mode 660 */ 661 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 662 IGC_RCTL_BAM | /* RX All Bcast Pkts */ 663 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 664 665 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 666 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 667 } 668 669 wr32(IGC_RCTL, rctl); 670 } 671 672 /** 673 * igc_setup_tctl - configure the transmit control registers 674 * @adapter: Board private structure 675 */ 676 static void igc_setup_tctl(struct igc_adapter *adapter) 677 { 678 struct igc_hw *hw = &adapter->hw; 679 u32 tctl; 680 681 /* disable queue 0 which icould be enabled by default */ 682 wr32(IGC_TXDCTL(0), 0); 683 684 /* Program the Transmit Control Register */ 685 tctl = rd32(IGC_TCTL); 686 tctl &= ~IGC_TCTL_CT; 687 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 688 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 689 690 /* Enable transmits */ 691 tctl |= IGC_TCTL_EN; 692 693 wr32(IGC_TCTL, tctl); 694 } 695 696 /** 697 * igc_set_mac - Change the Ethernet Address of the NIC 698 * @netdev: network interface device structure 699 * @p: pointer to an address structure 700 * 701 * Returns 0 on success, negative on failure 702 */ 703 static int igc_set_mac(struct net_device *netdev, void *p) 704 { 705 struct igc_adapter *adapter = netdev_priv(netdev); 706 struct igc_hw *hw = &adapter->hw; 707 struct sockaddr *addr = p; 708 709 if (!is_valid_ether_addr(addr->sa_data)) 710 return -EADDRNOTAVAIL; 711 712 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 713 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 714 715 /* set the correct pool for the new PF MAC address in entry 0 */ 716 igc_set_default_mac_filter(adapter); 717 718 return 0; 719 } 720 721 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) 722 { 723 } 724 725 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 726 { 727 struct net_device *netdev = tx_ring->netdev; 728 729 netif_stop_subqueue(netdev, tx_ring->queue_index); 730 731 /* memory barriier comment */ 732 smp_mb(); 733 734 /* We need to check again in a case another CPU has just 735 * made room available. 736 */ 737 if (igc_desc_unused(tx_ring) < size) 738 return -EBUSY; 739 740 /* A reprieve! */ 741 netif_wake_subqueue(netdev, tx_ring->queue_index); 742 743 u64_stats_update_begin(&tx_ring->tx_syncp2); 744 tx_ring->tx_stats.restart_queue2++; 745 u64_stats_update_end(&tx_ring->tx_syncp2); 746 747 return 0; 748 } 749 750 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 751 { 752 if (igc_desc_unused(tx_ring) >= size) 753 return 0; 754 return __igc_maybe_stop_tx(tx_ring, size); 755 } 756 757 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 758 { 759 /* set type for advanced descriptor with frame checksum insertion */ 760 u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 761 IGC_ADVTXD_DCMD_DEXT | 762 IGC_ADVTXD_DCMD_IFCS; 763 764 return cmd_type; 765 } 766 767 static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 768 union igc_adv_tx_desc *tx_desc, 769 u32 tx_flags, unsigned int paylen) 770 { 771 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 772 773 /* insert L4 checksum */ 774 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * 775 ((IGC_TXD_POPTS_TXSM << 8) / 776 IGC_TX_FLAGS_CSUM); 777 778 /* insert IPv4 checksum */ 779 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * 780 (((IGC_TXD_POPTS_IXSM << 8)) / 781 IGC_TX_FLAGS_IPV4); 782 783 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 784 } 785 786 static int igc_tx_map(struct igc_ring *tx_ring, 787 struct igc_tx_buffer *first, 788 const u8 hdr_len) 789 { 790 struct sk_buff *skb = first->skb; 791 struct igc_tx_buffer *tx_buffer; 792 union igc_adv_tx_desc *tx_desc; 793 u32 tx_flags = first->tx_flags; 794 struct skb_frag_struct *frag; 795 u16 i = tx_ring->next_to_use; 796 unsigned int data_len, size; 797 dma_addr_t dma; 798 u32 cmd_type = igc_tx_cmd_type(skb, tx_flags); 799 800 tx_desc = IGC_TX_DESC(tx_ring, i); 801 802 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 803 804 size = skb_headlen(skb); 805 data_len = skb->data_len; 806 807 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 808 809 tx_buffer = first; 810 811 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 812 if (dma_mapping_error(tx_ring->dev, dma)) 813 goto dma_error; 814 815 /* record length, and DMA address */ 816 dma_unmap_len_set(tx_buffer, len, size); 817 dma_unmap_addr_set(tx_buffer, dma, dma); 818 819 tx_desc->read.buffer_addr = cpu_to_le64(dma); 820 821 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 822 tx_desc->read.cmd_type_len = 823 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 824 825 i++; 826 tx_desc++; 827 if (i == tx_ring->count) { 828 tx_desc = IGC_TX_DESC(tx_ring, 0); 829 i = 0; 830 } 831 tx_desc->read.olinfo_status = 0; 832 833 dma += IGC_MAX_DATA_PER_TXD; 834 size -= IGC_MAX_DATA_PER_TXD; 835 836 tx_desc->read.buffer_addr = cpu_to_le64(dma); 837 } 838 839 if (likely(!data_len)) 840 break; 841 842 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 843 844 i++; 845 tx_desc++; 846 if (i == tx_ring->count) { 847 tx_desc = IGC_TX_DESC(tx_ring, 0); 848 i = 0; 849 } 850 tx_desc->read.olinfo_status = 0; 851 852 size = skb_frag_size(frag); 853 data_len -= size; 854 855 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 856 size, DMA_TO_DEVICE); 857 858 tx_buffer = &tx_ring->tx_buffer_info[i]; 859 } 860 861 /* write last descriptor with RS and EOP bits */ 862 cmd_type |= size | IGC_TXD_DCMD; 863 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 864 865 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 866 867 /* set the timestamp */ 868 first->time_stamp = jiffies; 869 870 skb_tx_timestamp(skb); 871 872 /* Force memory writes to complete before letting h/w know there 873 * are new descriptors to fetch. (Only applicable for weak-ordered 874 * memory model archs, such as IA-64). 875 * 876 * We also need this memory barrier to make certain all of the 877 * status bits have been updated before next_to_watch is written. 878 */ 879 wmb(); 880 881 /* set next_to_watch value indicating a packet is present */ 882 first->next_to_watch = tx_desc; 883 884 i++; 885 if (i == tx_ring->count) 886 i = 0; 887 888 tx_ring->next_to_use = i; 889 890 /* Make sure there is space in the ring for the next send. */ 891 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 892 893 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 894 writel(i, tx_ring->tail); 895 } 896 897 return 0; 898 dma_error: 899 dev_err(tx_ring->dev, "TX DMA map failed\n"); 900 tx_buffer = &tx_ring->tx_buffer_info[i]; 901 902 /* clear dma mappings for failed tx_buffer_info map */ 903 while (tx_buffer != first) { 904 if (dma_unmap_len(tx_buffer, len)) 905 dma_unmap_page(tx_ring->dev, 906 dma_unmap_addr(tx_buffer, dma), 907 dma_unmap_len(tx_buffer, len), 908 DMA_TO_DEVICE); 909 dma_unmap_len_set(tx_buffer, len, 0); 910 911 if (i-- == 0) 912 i += tx_ring->count; 913 tx_buffer = &tx_ring->tx_buffer_info[i]; 914 } 915 916 if (dma_unmap_len(tx_buffer, len)) 917 dma_unmap_single(tx_ring->dev, 918 dma_unmap_addr(tx_buffer, dma), 919 dma_unmap_len(tx_buffer, len), 920 DMA_TO_DEVICE); 921 dma_unmap_len_set(tx_buffer, len, 0); 922 923 dev_kfree_skb_any(tx_buffer->skb); 924 tx_buffer->skb = NULL; 925 926 tx_ring->next_to_use = i; 927 928 return -1; 929 } 930 931 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 932 struct igc_ring *tx_ring) 933 { 934 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 935 __be16 protocol = vlan_get_protocol(skb); 936 struct igc_tx_buffer *first; 937 u32 tx_flags = 0; 938 unsigned short f; 939 u8 hdr_len = 0; 940 941 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 942 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 943 * + 2 desc gap to keep tail from touching head, 944 * + 1 desc for context descriptor, 945 * otherwise try next time 946 */ 947 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 948 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 949 950 if (igc_maybe_stop_tx(tx_ring, count + 3)) { 951 /* this is a hard error */ 952 return NETDEV_TX_BUSY; 953 } 954 955 /* record the location of the first descriptor for this packet */ 956 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 957 first->skb = skb; 958 first->bytecount = skb->len; 959 first->gso_segs = 1; 960 961 /* record initial flags and protocol */ 962 first->tx_flags = tx_flags; 963 first->protocol = protocol; 964 965 igc_tx_csum(tx_ring, first); 966 967 igc_tx_map(tx_ring, first, hdr_len); 968 969 return NETDEV_TX_OK; 970 } 971 972 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 973 struct sk_buff *skb) 974 { 975 unsigned int r_idx = skb->queue_mapping; 976 977 if (r_idx >= adapter->num_tx_queues) 978 r_idx = r_idx % adapter->num_tx_queues; 979 980 return adapter->tx_ring[r_idx]; 981 } 982 983 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 984 struct net_device *netdev) 985 { 986 struct igc_adapter *adapter = netdev_priv(netdev); 987 988 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 989 * in order to meet this minimum size requirement. 990 */ 991 if (skb->len < 17) { 992 if (skb_padto(skb, 17)) 993 return NETDEV_TX_OK; 994 skb->len = 17; 995 } 996 997 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 998 } 999 1000 static inline void igc_rx_hash(struct igc_ring *ring, 1001 union igc_adv_rx_desc *rx_desc, 1002 struct sk_buff *skb) 1003 { 1004 if (ring->netdev->features & NETIF_F_RXHASH) 1005 skb_set_hash(skb, 1006 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1007 PKT_HASH_TYPE_L3); 1008 } 1009 1010 /** 1011 * igc_process_skb_fields - Populate skb header fields from Rx descriptor 1012 * @rx_ring: rx descriptor ring packet is being transacted on 1013 * @rx_desc: pointer to the EOP Rx descriptor 1014 * @skb: pointer to current skb being populated 1015 * 1016 * This function checks the ring, descriptor, and packet information in 1017 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 1018 * other fields within the skb. 1019 */ 1020 static void igc_process_skb_fields(struct igc_ring *rx_ring, 1021 union igc_adv_rx_desc *rx_desc, 1022 struct sk_buff *skb) 1023 { 1024 igc_rx_hash(rx_ring, rx_desc, skb); 1025 1026 skb_record_rx_queue(skb, rx_ring->queue_index); 1027 1028 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1029 } 1030 1031 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 1032 const unsigned int size) 1033 { 1034 struct igc_rx_buffer *rx_buffer; 1035 1036 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1037 prefetchw(rx_buffer->page); 1038 1039 /* we are reusing so sync this buffer for CPU use */ 1040 dma_sync_single_range_for_cpu(rx_ring->dev, 1041 rx_buffer->dma, 1042 rx_buffer->page_offset, 1043 size, 1044 DMA_FROM_DEVICE); 1045 1046 rx_buffer->pagecnt_bias--; 1047 1048 return rx_buffer; 1049 } 1050 1051 /** 1052 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 1053 * @rx_ring: rx descriptor ring to transact packets on 1054 * @rx_buffer: buffer containing page to add 1055 * @skb: sk_buff to place the data into 1056 * @size: size of buffer to be added 1057 * 1058 * This function will add the data contained in rx_buffer->page to the skb. 1059 */ 1060 static void igc_add_rx_frag(struct igc_ring *rx_ring, 1061 struct igc_rx_buffer *rx_buffer, 1062 struct sk_buff *skb, 1063 unsigned int size) 1064 { 1065 #if (PAGE_SIZE < 8192) 1066 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; 1067 1068 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1069 rx_buffer->page_offset, size, truesize); 1070 rx_buffer->page_offset ^= truesize; 1071 #else 1072 unsigned int truesize = ring_uses_build_skb(rx_ring) ? 1073 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1074 SKB_DATA_ALIGN(size); 1075 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1076 rx_buffer->page_offset, size, truesize); 1077 rx_buffer->page_offset += truesize; 1078 #endif 1079 } 1080 1081 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 1082 struct igc_rx_buffer *rx_buffer, 1083 union igc_adv_rx_desc *rx_desc, 1084 unsigned int size) 1085 { 1086 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1087 #if (PAGE_SIZE < 8192) 1088 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; 1089 #else 1090 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1091 SKB_DATA_ALIGN(IGC_SKB_PAD + size); 1092 #endif 1093 struct sk_buff *skb; 1094 1095 /* prefetch first cache line of first page */ 1096 prefetch(va); 1097 #if L1_CACHE_BYTES < 128 1098 prefetch(va + L1_CACHE_BYTES); 1099 #endif 1100 1101 /* build an skb around the page buffer */ 1102 skb = build_skb(va - IGC_SKB_PAD, truesize); 1103 if (unlikely(!skb)) 1104 return NULL; 1105 1106 /* update pointers within the skb to store the data */ 1107 skb_reserve(skb, IGC_SKB_PAD); 1108 __skb_put(skb, size); 1109 1110 /* update buffer offset */ 1111 #if (PAGE_SIZE < 8192) 1112 rx_buffer->page_offset ^= truesize; 1113 #else 1114 rx_buffer->page_offset += truesize; 1115 #endif 1116 1117 return skb; 1118 } 1119 1120 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 1121 struct igc_rx_buffer *rx_buffer, 1122 union igc_adv_rx_desc *rx_desc, 1123 unsigned int size) 1124 { 1125 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1126 #if (PAGE_SIZE < 8192) 1127 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; 1128 #else 1129 unsigned int truesize = SKB_DATA_ALIGN(size); 1130 #endif 1131 unsigned int headlen; 1132 struct sk_buff *skb; 1133 1134 /* prefetch first cache line of first page */ 1135 prefetch(va); 1136 #if L1_CACHE_BYTES < 128 1137 prefetch(va + L1_CACHE_BYTES); 1138 #endif 1139 1140 /* allocate a skb to store the frags */ 1141 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); 1142 if (unlikely(!skb)) 1143 return NULL; 1144 1145 /* Determine available headroom for copy */ 1146 headlen = size; 1147 if (headlen > IGC_RX_HDR_LEN) 1148 headlen = eth_get_headlen(va, IGC_RX_HDR_LEN); 1149 1150 /* align pull length to size of long to optimize memcpy performance */ 1151 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 1152 1153 /* update all of the pointers */ 1154 size -= headlen; 1155 if (size) { 1156 skb_add_rx_frag(skb, 0, rx_buffer->page, 1157 (va + headlen) - page_address(rx_buffer->page), 1158 size, truesize); 1159 #if (PAGE_SIZE < 8192) 1160 rx_buffer->page_offset ^= truesize; 1161 #else 1162 rx_buffer->page_offset += truesize; 1163 #endif 1164 } else { 1165 rx_buffer->pagecnt_bias++; 1166 } 1167 1168 return skb; 1169 } 1170 1171 /** 1172 * igc_reuse_rx_page - page flip buffer and store it back on the ring 1173 * @rx_ring: rx descriptor ring to store buffers on 1174 * @old_buff: donor buffer to have page reused 1175 * 1176 * Synchronizes page for reuse by the adapter 1177 */ 1178 static void igc_reuse_rx_page(struct igc_ring *rx_ring, 1179 struct igc_rx_buffer *old_buff) 1180 { 1181 u16 nta = rx_ring->next_to_alloc; 1182 struct igc_rx_buffer *new_buff; 1183 1184 new_buff = &rx_ring->rx_buffer_info[nta]; 1185 1186 /* update, and store next to alloc */ 1187 nta++; 1188 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1189 1190 /* Transfer page from old buffer to new buffer. 1191 * Move each member individually to avoid possible store 1192 * forwarding stalls. 1193 */ 1194 new_buff->dma = old_buff->dma; 1195 new_buff->page = old_buff->page; 1196 new_buff->page_offset = old_buff->page_offset; 1197 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1198 } 1199 1200 static inline bool igc_page_is_reserved(struct page *page) 1201 { 1202 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 1203 } 1204 1205 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) 1206 { 1207 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1208 struct page *page = rx_buffer->page; 1209 1210 /* avoid re-using remote pages */ 1211 if (unlikely(igc_page_is_reserved(page))) 1212 return false; 1213 1214 #if (PAGE_SIZE < 8192) 1215 /* if we are only owner of page we can reuse it */ 1216 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) 1217 return false; 1218 #else 1219 #define IGC_LAST_OFFSET \ 1220 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 1221 1222 if (rx_buffer->page_offset > IGC_LAST_OFFSET) 1223 return false; 1224 #endif 1225 1226 /* If we have drained the page fragment pool we need to update 1227 * the pagecnt_bias and page count so that we fully restock the 1228 * number of references the driver holds. 1229 */ 1230 if (unlikely(!pagecnt_bias)) { 1231 page_ref_add(page, USHRT_MAX); 1232 rx_buffer->pagecnt_bias = USHRT_MAX; 1233 } 1234 1235 return true; 1236 } 1237 1238 /** 1239 * igc_is_non_eop - process handling of non-EOP buffers 1240 * @rx_ring: Rx ring being processed 1241 * @rx_desc: Rx descriptor for current buffer 1242 * @skb: current socket buffer containing buffer in progress 1243 * 1244 * This function updates next to clean. If the buffer is an EOP buffer 1245 * this function exits returning false, otherwise it will place the 1246 * sk_buff in the next buffer to be chained and return true indicating 1247 * that this is in fact a non-EOP buffer. 1248 */ 1249 static bool igc_is_non_eop(struct igc_ring *rx_ring, 1250 union igc_adv_rx_desc *rx_desc) 1251 { 1252 u32 ntc = rx_ring->next_to_clean + 1; 1253 1254 /* fetch, update, and store next to clean */ 1255 ntc = (ntc < rx_ring->count) ? ntc : 0; 1256 rx_ring->next_to_clean = ntc; 1257 1258 prefetch(IGC_RX_DESC(rx_ring, ntc)); 1259 1260 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 1261 return false; 1262 1263 return true; 1264 } 1265 1266 /** 1267 * igc_cleanup_headers - Correct corrupted or empty headers 1268 * @rx_ring: rx descriptor ring packet is being transacted on 1269 * @rx_desc: pointer to the EOP Rx descriptor 1270 * @skb: pointer to current skb being fixed 1271 * 1272 * Address the case where we are pulling data in on pages only 1273 * and as such no data is present in the skb header. 1274 * 1275 * In addition if skb is not at least 60 bytes we need to pad it so that 1276 * it is large enough to qualify as a valid Ethernet frame. 1277 * 1278 * Returns true if an error was encountered and skb was freed. 1279 */ 1280 static bool igc_cleanup_headers(struct igc_ring *rx_ring, 1281 union igc_adv_rx_desc *rx_desc, 1282 struct sk_buff *skb) 1283 { 1284 if (unlikely((igc_test_staterr(rx_desc, 1285 IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) { 1286 struct net_device *netdev = rx_ring->netdev; 1287 1288 if (!(netdev->features & NETIF_F_RXALL)) { 1289 dev_kfree_skb_any(skb); 1290 return true; 1291 } 1292 } 1293 1294 /* if eth_skb_pad returns an error the skb was freed */ 1295 if (eth_skb_pad(skb)) 1296 return true; 1297 1298 return false; 1299 } 1300 1301 static void igc_put_rx_buffer(struct igc_ring *rx_ring, 1302 struct igc_rx_buffer *rx_buffer) 1303 { 1304 if (igc_can_reuse_rx_page(rx_buffer)) { 1305 /* hand second half of page back to the ring */ 1306 igc_reuse_rx_page(rx_ring, rx_buffer); 1307 } else { 1308 /* We are not reusing the buffer so unmap it and free 1309 * any references we are holding to it 1310 */ 1311 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 1312 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1313 IGC_RX_DMA_ATTR); 1314 __page_frag_cache_drain(rx_buffer->page, 1315 rx_buffer->pagecnt_bias); 1316 } 1317 1318 /* clear contents of rx_buffer */ 1319 rx_buffer->page = NULL; 1320 } 1321 1322 /** 1323 * igc_alloc_rx_buffers - Replace used receive buffers; packet split 1324 * @adapter: address of board private structure 1325 */ 1326 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 1327 { 1328 union igc_adv_rx_desc *rx_desc; 1329 u16 i = rx_ring->next_to_use; 1330 struct igc_rx_buffer *bi; 1331 u16 bufsz; 1332 1333 /* nothing to do */ 1334 if (!cleaned_count) 1335 return; 1336 1337 rx_desc = IGC_RX_DESC(rx_ring, i); 1338 bi = &rx_ring->rx_buffer_info[i]; 1339 i -= rx_ring->count; 1340 1341 bufsz = igc_rx_bufsz(rx_ring); 1342 1343 do { 1344 if (!igc_alloc_mapped_page(rx_ring, bi)) 1345 break; 1346 1347 /* sync the buffer for use by the device */ 1348 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 1349 bi->page_offset, bufsz, 1350 DMA_FROM_DEVICE); 1351 1352 /* Refresh the desc even if buffer_addrs didn't change 1353 * because each write-back erases this info. 1354 */ 1355 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1356 1357 rx_desc++; 1358 bi++; 1359 i++; 1360 if (unlikely(!i)) { 1361 rx_desc = IGC_RX_DESC(rx_ring, 0); 1362 bi = rx_ring->rx_buffer_info; 1363 i -= rx_ring->count; 1364 } 1365 1366 /* clear the length for the next_to_use descriptor */ 1367 rx_desc->wb.upper.length = 0; 1368 1369 cleaned_count--; 1370 } while (cleaned_count); 1371 1372 i += rx_ring->count; 1373 1374 if (rx_ring->next_to_use != i) { 1375 /* record the next descriptor to use */ 1376 rx_ring->next_to_use = i; 1377 1378 /* update next to alloc since we have filled the ring */ 1379 rx_ring->next_to_alloc = i; 1380 1381 /* Force memory writes to complete before letting h/w 1382 * know there are new descriptors to fetch. (Only 1383 * applicable for weak-ordered memory model archs, 1384 * such as IA-64). 1385 */ 1386 wmb(); 1387 writel(i, rx_ring->tail); 1388 } 1389 } 1390 1391 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 1392 { 1393 unsigned int total_bytes = 0, total_packets = 0; 1394 struct igc_ring *rx_ring = q_vector->rx.ring; 1395 struct sk_buff *skb = rx_ring->skb; 1396 u16 cleaned_count = igc_desc_unused(rx_ring); 1397 1398 while (likely(total_packets < budget)) { 1399 union igc_adv_rx_desc *rx_desc; 1400 struct igc_rx_buffer *rx_buffer; 1401 unsigned int size; 1402 1403 /* return some buffers to hardware, one at a time is too slow */ 1404 if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 1405 igc_alloc_rx_buffers(rx_ring, cleaned_count); 1406 cleaned_count = 0; 1407 } 1408 1409 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 1410 size = le16_to_cpu(rx_desc->wb.upper.length); 1411 if (!size) 1412 break; 1413 1414 /* This memory barrier is needed to keep us from reading 1415 * any other fields out of the rx_desc until we know the 1416 * descriptor has been written back 1417 */ 1418 dma_rmb(); 1419 1420 rx_buffer = igc_get_rx_buffer(rx_ring, size); 1421 1422 /* retrieve a buffer from the ring */ 1423 if (skb) 1424 igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 1425 else if (ring_uses_build_skb(rx_ring)) 1426 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); 1427 else 1428 skb = igc_construct_skb(rx_ring, rx_buffer, 1429 rx_desc, size); 1430 1431 /* exit if we failed to retrieve a buffer */ 1432 if (!skb) { 1433 rx_ring->rx_stats.alloc_failed++; 1434 rx_buffer->pagecnt_bias++; 1435 break; 1436 } 1437 1438 igc_put_rx_buffer(rx_ring, rx_buffer); 1439 cleaned_count++; 1440 1441 /* fetch next buffer in frame if non-eop */ 1442 if (igc_is_non_eop(rx_ring, rx_desc)) 1443 continue; 1444 1445 /* verify the packet layout is correct */ 1446 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 1447 skb = NULL; 1448 continue; 1449 } 1450 1451 /* probably a little skewed due to removing CRC */ 1452 total_bytes += skb->len; 1453 1454 /* populate checksum, timestamp, VLAN, and protocol */ 1455 igc_process_skb_fields(rx_ring, rx_desc, skb); 1456 1457 napi_gro_receive(&q_vector->napi, skb); 1458 1459 /* reset skb pointer */ 1460 skb = NULL; 1461 1462 /* update budget accounting */ 1463 total_packets++; 1464 } 1465 1466 /* place incomplete frames back on ring for completion */ 1467 rx_ring->skb = skb; 1468 1469 u64_stats_update_begin(&rx_ring->rx_syncp); 1470 rx_ring->rx_stats.packets += total_packets; 1471 rx_ring->rx_stats.bytes += total_bytes; 1472 u64_stats_update_end(&rx_ring->rx_syncp); 1473 q_vector->rx.total_packets += total_packets; 1474 q_vector->rx.total_bytes += total_bytes; 1475 1476 if (cleaned_count) 1477 igc_alloc_rx_buffers(rx_ring, cleaned_count); 1478 1479 return total_packets; 1480 } 1481 1482 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 1483 { 1484 return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0; 1485 } 1486 1487 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 1488 struct igc_rx_buffer *bi) 1489 { 1490 struct page *page = bi->page; 1491 dma_addr_t dma; 1492 1493 /* since we are recycling buffers we should seldom need to alloc */ 1494 if (likely(page)) 1495 return true; 1496 1497 /* alloc new page for storage */ 1498 page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 1499 if (unlikely(!page)) { 1500 rx_ring->rx_stats.alloc_failed++; 1501 return false; 1502 } 1503 1504 /* map page for use */ 1505 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1506 igc_rx_pg_size(rx_ring), 1507 DMA_FROM_DEVICE, 1508 IGC_RX_DMA_ATTR); 1509 1510 /* if mapping failed free memory back to system since 1511 * there isn't much point in holding memory we can't use 1512 */ 1513 if (dma_mapping_error(rx_ring->dev, dma)) { 1514 __free_page(page); 1515 1516 rx_ring->rx_stats.alloc_failed++; 1517 return false; 1518 } 1519 1520 bi->dma = dma; 1521 bi->page = page; 1522 bi->page_offset = igc_rx_offset(rx_ring); 1523 bi->pagecnt_bias = 1; 1524 1525 return true; 1526 } 1527 1528 /** 1529 * igc_clean_tx_irq - Reclaim resources after transmit completes 1530 * @q_vector: pointer to q_vector containing needed info 1531 * @napi_budget: Used to determine if we are in netpoll 1532 * 1533 * returns true if ring is completely cleaned 1534 */ 1535 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 1536 { 1537 struct igc_adapter *adapter = q_vector->adapter; 1538 unsigned int total_bytes = 0, total_packets = 0; 1539 unsigned int budget = q_vector->tx.work_limit; 1540 struct igc_ring *tx_ring = q_vector->tx.ring; 1541 unsigned int i = tx_ring->next_to_clean; 1542 struct igc_tx_buffer *tx_buffer; 1543 union igc_adv_tx_desc *tx_desc; 1544 1545 if (test_bit(__IGC_DOWN, &adapter->state)) 1546 return true; 1547 1548 tx_buffer = &tx_ring->tx_buffer_info[i]; 1549 tx_desc = IGC_TX_DESC(tx_ring, i); 1550 i -= tx_ring->count; 1551 1552 do { 1553 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 1554 1555 /* if next_to_watch is not set then there is no work pending */ 1556 if (!eop_desc) 1557 break; 1558 1559 /* prevent any other reads prior to eop_desc */ 1560 smp_rmb(); 1561 1562 /* if DD is not set pending work has not been completed */ 1563 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 1564 break; 1565 1566 /* clear next_to_watch to prevent false hangs */ 1567 tx_buffer->next_to_watch = NULL; 1568 1569 /* update the statistics for this packet */ 1570 total_bytes += tx_buffer->bytecount; 1571 total_packets += tx_buffer->gso_segs; 1572 1573 /* free the skb */ 1574 napi_consume_skb(tx_buffer->skb, napi_budget); 1575 1576 /* unmap skb header data */ 1577 dma_unmap_single(tx_ring->dev, 1578 dma_unmap_addr(tx_buffer, dma), 1579 dma_unmap_len(tx_buffer, len), 1580 DMA_TO_DEVICE); 1581 1582 /* clear tx_buffer data */ 1583 dma_unmap_len_set(tx_buffer, len, 0); 1584 1585 /* clear last DMA location and unmap remaining buffers */ 1586 while (tx_desc != eop_desc) { 1587 tx_buffer++; 1588 tx_desc++; 1589 i++; 1590 if (unlikely(!i)) { 1591 i -= tx_ring->count; 1592 tx_buffer = tx_ring->tx_buffer_info; 1593 tx_desc = IGC_TX_DESC(tx_ring, 0); 1594 } 1595 1596 /* unmap any remaining paged data */ 1597 if (dma_unmap_len(tx_buffer, len)) { 1598 dma_unmap_page(tx_ring->dev, 1599 dma_unmap_addr(tx_buffer, dma), 1600 dma_unmap_len(tx_buffer, len), 1601 DMA_TO_DEVICE); 1602 dma_unmap_len_set(tx_buffer, len, 0); 1603 } 1604 } 1605 1606 /* move us one more past the eop_desc for start of next pkt */ 1607 tx_buffer++; 1608 tx_desc++; 1609 i++; 1610 if (unlikely(!i)) { 1611 i -= tx_ring->count; 1612 tx_buffer = tx_ring->tx_buffer_info; 1613 tx_desc = IGC_TX_DESC(tx_ring, 0); 1614 } 1615 1616 /* issue prefetch for next Tx descriptor */ 1617 prefetch(tx_desc); 1618 1619 /* update budget accounting */ 1620 budget--; 1621 } while (likely(budget)); 1622 1623 netdev_tx_completed_queue(txring_txq(tx_ring), 1624 total_packets, total_bytes); 1625 1626 i += tx_ring->count; 1627 tx_ring->next_to_clean = i; 1628 u64_stats_update_begin(&tx_ring->tx_syncp); 1629 tx_ring->tx_stats.bytes += total_bytes; 1630 tx_ring->tx_stats.packets += total_packets; 1631 u64_stats_update_end(&tx_ring->tx_syncp); 1632 q_vector->tx.total_bytes += total_bytes; 1633 q_vector->tx.total_packets += total_packets; 1634 1635 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 1636 struct igc_hw *hw = &adapter->hw; 1637 1638 /* Detect a transmit hang in hardware, this serializes the 1639 * check with the clearing of time_stamp and movement of i 1640 */ 1641 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 1642 if (tx_buffer->next_to_watch && 1643 time_after(jiffies, tx_buffer->time_stamp + 1644 (adapter->tx_timeout_factor * HZ)) && 1645 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { 1646 /* detected Tx unit hang */ 1647 dev_err(tx_ring->dev, 1648 "Detected Tx Unit Hang\n" 1649 " Tx Queue <%d>\n" 1650 " TDH <%x>\n" 1651 " TDT <%x>\n" 1652 " next_to_use <%x>\n" 1653 " next_to_clean <%x>\n" 1654 "buffer_info[next_to_clean]\n" 1655 " time_stamp <%lx>\n" 1656 " next_to_watch <%p>\n" 1657 " jiffies <%lx>\n" 1658 " desc.status <%x>\n", 1659 tx_ring->queue_index, 1660 rd32(IGC_TDH(tx_ring->reg_idx)), 1661 readl(tx_ring->tail), 1662 tx_ring->next_to_use, 1663 tx_ring->next_to_clean, 1664 tx_buffer->time_stamp, 1665 tx_buffer->next_to_watch, 1666 jiffies, 1667 tx_buffer->next_to_watch->wb.status); 1668 netif_stop_subqueue(tx_ring->netdev, 1669 tx_ring->queue_index); 1670 1671 /* we are about to reset, no point in enabling stuff */ 1672 return true; 1673 } 1674 } 1675 1676 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 1677 if (unlikely(total_packets && 1678 netif_carrier_ok(tx_ring->netdev) && 1679 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 1680 /* Make sure that anybody stopping the queue after this 1681 * sees the new next_to_clean. 1682 */ 1683 smp_mb(); 1684 if (__netif_subqueue_stopped(tx_ring->netdev, 1685 tx_ring->queue_index) && 1686 !(test_bit(__IGC_DOWN, &adapter->state))) { 1687 netif_wake_subqueue(tx_ring->netdev, 1688 tx_ring->queue_index); 1689 1690 u64_stats_update_begin(&tx_ring->tx_syncp); 1691 tx_ring->tx_stats.restart_queue++; 1692 u64_stats_update_end(&tx_ring->tx_syncp); 1693 } 1694 } 1695 1696 return !!budget; 1697 } 1698 1699 /** 1700 * igc_up - Open the interface and prepare it to handle traffic 1701 * @adapter: board private structure 1702 */ 1703 void igc_up(struct igc_adapter *adapter) 1704 { 1705 struct igc_hw *hw = &adapter->hw; 1706 int i = 0; 1707 1708 /* hardware has been reset, we need to reload some things */ 1709 igc_configure(adapter); 1710 1711 clear_bit(__IGC_DOWN, &adapter->state); 1712 1713 for (i = 0; i < adapter->num_q_vectors; i++) 1714 napi_enable(&adapter->q_vector[i]->napi); 1715 1716 if (adapter->msix_entries) 1717 igc_configure_msix(adapter); 1718 else 1719 igc_assign_vector(adapter->q_vector[0], 0); 1720 1721 /* Clear any pending interrupts. */ 1722 rd32(IGC_ICR); 1723 igc_irq_enable(adapter); 1724 1725 netif_tx_start_all_queues(adapter->netdev); 1726 1727 /* start the watchdog. */ 1728 hw->mac.get_link_status = 1; 1729 schedule_work(&adapter->watchdog_task); 1730 } 1731 1732 /** 1733 * igc_update_stats - Update the board statistics counters 1734 * @adapter: board private structure 1735 */ 1736 static void igc_update_stats(struct igc_adapter *adapter) 1737 { 1738 } 1739 1740 static void igc_nfc_filter_exit(struct igc_adapter *adapter) 1741 { 1742 } 1743 1744 /** 1745 * igc_down - Close the interface 1746 * @adapter: board private structure 1747 */ 1748 void igc_down(struct igc_adapter *adapter) 1749 { 1750 struct net_device *netdev = adapter->netdev; 1751 struct igc_hw *hw = &adapter->hw; 1752 u32 tctl, rctl; 1753 int i = 0; 1754 1755 set_bit(__IGC_DOWN, &adapter->state); 1756 1757 /* disable receives in the hardware */ 1758 rctl = rd32(IGC_RCTL); 1759 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 1760 /* flush and sleep below */ 1761 1762 igc_nfc_filter_exit(adapter); 1763 1764 /* set trans_start so we don't get spurious watchdogs during reset */ 1765 netif_trans_update(netdev); 1766 1767 netif_carrier_off(netdev); 1768 netif_tx_stop_all_queues(netdev); 1769 1770 /* disable transmits in the hardware */ 1771 tctl = rd32(IGC_TCTL); 1772 tctl &= ~IGC_TCTL_EN; 1773 wr32(IGC_TCTL, tctl); 1774 /* flush both disables and wait for them to finish */ 1775 wrfl(); 1776 usleep_range(10000, 20000); 1777 1778 igc_irq_disable(adapter); 1779 1780 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 1781 1782 for (i = 0; i < adapter->num_q_vectors; i++) { 1783 if (adapter->q_vector[i]) { 1784 napi_synchronize(&adapter->q_vector[i]->napi); 1785 napi_disable(&adapter->q_vector[i]->napi); 1786 } 1787 } 1788 1789 del_timer_sync(&adapter->watchdog_timer); 1790 del_timer_sync(&adapter->phy_info_timer); 1791 1792 /* record the stats before reset*/ 1793 spin_lock(&adapter->stats64_lock); 1794 igc_update_stats(adapter); 1795 spin_unlock(&adapter->stats64_lock); 1796 1797 adapter->link_speed = 0; 1798 adapter->link_duplex = 0; 1799 1800 if (!pci_channel_offline(adapter->pdev)) 1801 igc_reset(adapter); 1802 1803 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 1804 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 1805 1806 igc_clean_all_tx_rings(adapter); 1807 igc_clean_all_rx_rings(adapter); 1808 } 1809 1810 void igc_reinit_locked(struct igc_adapter *adapter) 1811 { 1812 WARN_ON(in_interrupt()); 1813 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 1814 usleep_range(1000, 2000); 1815 igc_down(adapter); 1816 igc_up(adapter); 1817 clear_bit(__IGC_RESETTING, &adapter->state); 1818 } 1819 1820 static void igc_reset_task(struct work_struct *work) 1821 { 1822 struct igc_adapter *adapter; 1823 1824 adapter = container_of(work, struct igc_adapter, reset_task); 1825 1826 netdev_err(adapter->netdev, "Reset adapter\n"); 1827 igc_reinit_locked(adapter); 1828 } 1829 1830 /** 1831 * igc_change_mtu - Change the Maximum Transfer Unit 1832 * @netdev: network interface device structure 1833 * @new_mtu: new value for maximum frame size 1834 * 1835 * Returns 0 on success, negative on failure 1836 */ 1837 static int igc_change_mtu(struct net_device *netdev, int new_mtu) 1838 { 1839 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1840 struct igc_adapter *adapter = netdev_priv(netdev); 1841 struct pci_dev *pdev = adapter->pdev; 1842 1843 /* adjust max frame to be at least the size of a standard frame */ 1844 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 1845 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 1846 1847 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 1848 usleep_range(1000, 2000); 1849 1850 /* igc_down has a dependency on max_frame_size */ 1851 adapter->max_frame_size = max_frame; 1852 1853 if (netif_running(netdev)) 1854 igc_down(adapter); 1855 1856 dev_info(&pdev->dev, "changing MTU from %d to %d\n", 1857 netdev->mtu, new_mtu); 1858 netdev->mtu = new_mtu; 1859 1860 if (netif_running(netdev)) 1861 igc_up(adapter); 1862 else 1863 igc_reset(adapter); 1864 1865 clear_bit(__IGC_RESETTING, &adapter->state); 1866 1867 return 0; 1868 } 1869 1870 /** 1871 * igc_get_stats - Get System Network Statistics 1872 * @netdev: network interface device structure 1873 * 1874 * Returns the address of the device statistics structure. 1875 * The statistics are updated here and also from the timer callback. 1876 */ 1877 static struct net_device_stats *igc_get_stats(struct net_device *netdev) 1878 { 1879 struct igc_adapter *adapter = netdev_priv(netdev); 1880 1881 if (!test_bit(__IGC_RESETTING, &adapter->state)) 1882 igc_update_stats(adapter); 1883 1884 /* only return the current stats */ 1885 return &netdev->stats; 1886 } 1887 1888 /** 1889 * igc_configure - configure the hardware for RX and TX 1890 * @adapter: private board structure 1891 */ 1892 static void igc_configure(struct igc_adapter *adapter) 1893 { 1894 struct net_device *netdev = adapter->netdev; 1895 int i = 0; 1896 1897 igc_get_hw_control(adapter); 1898 igc_set_rx_mode(netdev); 1899 1900 igc_setup_tctl(adapter); 1901 igc_setup_mrqc(adapter); 1902 igc_setup_rctl(adapter); 1903 1904 igc_configure_tx(adapter); 1905 igc_configure_rx(adapter); 1906 1907 igc_rx_fifo_flush_base(&adapter->hw); 1908 1909 /* call igc_desc_unused which always leaves 1910 * at least 1 descriptor unused to make sure 1911 * next_to_use != next_to_clean 1912 */ 1913 for (i = 0; i < adapter->num_rx_queues; i++) { 1914 struct igc_ring *ring = adapter->rx_ring[i]; 1915 1916 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 1917 } 1918 } 1919 1920 /** 1921 * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table 1922 * @adapter: address of board private structure 1923 * @index: Index of the RAR entry which need to be synced with MAC table 1924 */ 1925 static void igc_rar_set_index(struct igc_adapter *adapter, u32 index) 1926 { 1927 u8 *addr = adapter->mac_table[index].addr; 1928 struct igc_hw *hw = &adapter->hw; 1929 u32 rar_low, rar_high; 1930 1931 /* HW expects these to be in network order when they are plugged 1932 * into the registers which are little endian. In order to guarantee 1933 * that ordering we need to do an leXX_to_cpup here in order to be 1934 * ready for the byteswap that occurs with writel 1935 */ 1936 rar_low = le32_to_cpup((__le32 *)(addr)); 1937 rar_high = le16_to_cpup((__le16 *)(addr + 4)); 1938 1939 /* Indicate to hardware the Address is Valid. */ 1940 if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) { 1941 if (is_valid_ether_addr(addr)) 1942 rar_high |= IGC_RAH_AV; 1943 1944 rar_high |= IGC_RAH_POOL_1 << 1945 adapter->mac_table[index].queue; 1946 } 1947 1948 wr32(IGC_RAL(index), rar_low); 1949 wrfl(); 1950 wr32(IGC_RAH(index), rar_high); 1951 wrfl(); 1952 } 1953 1954 /* Set default MAC address for the PF in the first RAR entry */ 1955 static void igc_set_default_mac_filter(struct igc_adapter *adapter) 1956 { 1957 struct igc_mac_addr *mac_table = &adapter->mac_table[0]; 1958 1959 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); 1960 mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; 1961 1962 igc_rar_set_index(adapter, 0); 1963 } 1964 1965 /** 1966 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 1967 * @netdev: network interface device structure 1968 * 1969 * The set_rx_mode entry point is called whenever the unicast or multicast 1970 * address lists or the network interface flags are updated. This routine is 1971 * responsible for configuring the hardware for proper unicast, multicast, 1972 * promiscuous mode, and all-multi behavior. 1973 */ 1974 static void igc_set_rx_mode(struct net_device *netdev) 1975 { 1976 } 1977 1978 /** 1979 * igc_msix_other - msix other interrupt handler 1980 * @irq: interrupt number 1981 * @data: pointer to a q_vector 1982 */ 1983 static irqreturn_t igc_msix_other(int irq, void *data) 1984 { 1985 struct igc_adapter *adapter = data; 1986 struct igc_hw *hw = &adapter->hw; 1987 u32 icr = rd32(IGC_ICR); 1988 1989 /* reading ICR causes bit 31 of EICR to be cleared */ 1990 if (icr & IGC_ICR_DRSTA) 1991 schedule_work(&adapter->reset_task); 1992 1993 if (icr & IGC_ICR_DOUTSYNC) { 1994 /* HW is reporting DMA is out of sync */ 1995 adapter->stats.doosync++; 1996 } 1997 1998 if (icr & IGC_ICR_LSC) { 1999 hw->mac.get_link_status = 1; 2000 /* guard against interrupt when we're going down */ 2001 if (!test_bit(__IGC_DOWN, &adapter->state)) 2002 mod_timer(&adapter->watchdog_timer, jiffies + 1); 2003 } 2004 2005 wr32(IGC_EIMS, adapter->eims_other); 2006 2007 return IRQ_HANDLED; 2008 } 2009 2010 /** 2011 * igc_write_ivar - configure ivar for given MSI-X vector 2012 * @hw: pointer to the HW structure 2013 * @msix_vector: vector number we are allocating to a given ring 2014 * @index: row index of IVAR register to write within IVAR table 2015 * @offset: column offset of in IVAR, should be multiple of 8 2016 * 2017 * The IVAR table consists of 2 columns, 2018 * each containing an cause allocation for an Rx and Tx ring, and a 2019 * variable number of rows depending on the number of queues supported. 2020 */ 2021 static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 2022 int index, int offset) 2023 { 2024 u32 ivar = array_rd32(IGC_IVAR0, index); 2025 2026 /* clear any bits that are currently set */ 2027 ivar &= ~((u32)0xFF << offset); 2028 2029 /* write vector and valid bit */ 2030 ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 2031 2032 array_wr32(IGC_IVAR0, index, ivar); 2033 } 2034 2035 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 2036 { 2037 struct igc_adapter *adapter = q_vector->adapter; 2038 struct igc_hw *hw = &adapter->hw; 2039 int rx_queue = IGC_N0_QUEUE; 2040 int tx_queue = IGC_N0_QUEUE; 2041 2042 if (q_vector->rx.ring) 2043 rx_queue = q_vector->rx.ring->reg_idx; 2044 if (q_vector->tx.ring) 2045 tx_queue = q_vector->tx.ring->reg_idx; 2046 2047 switch (hw->mac.type) { 2048 case igc_i225: 2049 if (rx_queue > IGC_N0_QUEUE) 2050 igc_write_ivar(hw, msix_vector, 2051 rx_queue >> 1, 2052 (rx_queue & 0x1) << 4); 2053 if (tx_queue > IGC_N0_QUEUE) 2054 igc_write_ivar(hw, msix_vector, 2055 tx_queue >> 1, 2056 ((tx_queue & 0x1) << 4) + 8); 2057 q_vector->eims_value = BIT(msix_vector); 2058 break; 2059 default: 2060 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 2061 break; 2062 } 2063 2064 /* add q_vector eims value to global eims_enable_mask */ 2065 adapter->eims_enable_mask |= q_vector->eims_value; 2066 2067 /* configure q_vector to set itr on first interrupt */ 2068 q_vector->set_itr = 1; 2069 } 2070 2071 /** 2072 * igc_configure_msix - Configure MSI-X hardware 2073 * @adapter: Pointer to adapter structure 2074 * 2075 * igc_configure_msix sets up the hardware to properly 2076 * generate MSI-X interrupts. 2077 */ 2078 static void igc_configure_msix(struct igc_adapter *adapter) 2079 { 2080 struct igc_hw *hw = &adapter->hw; 2081 int i, vector = 0; 2082 u32 tmp; 2083 2084 adapter->eims_enable_mask = 0; 2085 2086 /* set vector for other causes, i.e. link changes */ 2087 switch (hw->mac.type) { 2088 case igc_i225: 2089 /* Turn on MSI-X capability first, or our settings 2090 * won't stick. And it will take days to debug. 2091 */ 2092 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 2093 IGC_GPIE_PBA | IGC_GPIE_EIAME | 2094 IGC_GPIE_NSICR); 2095 2096 /* enable msix_other interrupt */ 2097 adapter->eims_other = BIT(vector); 2098 tmp = (vector++ | IGC_IVAR_VALID) << 8; 2099 2100 wr32(IGC_IVAR_MISC, tmp); 2101 break; 2102 default: 2103 /* do nothing, since nothing else supports MSI-X */ 2104 break; 2105 } /* switch (hw->mac.type) */ 2106 2107 adapter->eims_enable_mask |= adapter->eims_other; 2108 2109 for (i = 0; i < adapter->num_q_vectors; i++) 2110 igc_assign_vector(adapter->q_vector[i], vector++); 2111 2112 wrfl(); 2113 } 2114 2115 static irqreturn_t igc_msix_ring(int irq, void *data) 2116 { 2117 struct igc_q_vector *q_vector = data; 2118 2119 /* Write the ITR value calculated from the previous interrupt. */ 2120 igc_write_itr(q_vector); 2121 2122 napi_schedule(&q_vector->napi); 2123 2124 return IRQ_HANDLED; 2125 } 2126 2127 /** 2128 * igc_request_msix - Initialize MSI-X interrupts 2129 * @adapter: Pointer to adapter structure 2130 * 2131 * igc_request_msix allocates MSI-X vectors and requests interrupts from the 2132 * kernel. 2133 */ 2134 static int igc_request_msix(struct igc_adapter *adapter) 2135 { 2136 int i = 0, err = 0, vector = 0, free_vector = 0; 2137 struct net_device *netdev = adapter->netdev; 2138 2139 err = request_irq(adapter->msix_entries[vector].vector, 2140 &igc_msix_other, 0, netdev->name, adapter); 2141 if (err) 2142 goto err_out; 2143 2144 for (i = 0; i < adapter->num_q_vectors; i++) { 2145 struct igc_q_vector *q_vector = adapter->q_vector[i]; 2146 2147 vector++; 2148 2149 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 2150 2151 if (q_vector->rx.ring && q_vector->tx.ring) 2152 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 2153 q_vector->rx.ring->queue_index); 2154 else if (q_vector->tx.ring) 2155 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 2156 q_vector->tx.ring->queue_index); 2157 else if (q_vector->rx.ring) 2158 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 2159 q_vector->rx.ring->queue_index); 2160 else 2161 sprintf(q_vector->name, "%s-unused", netdev->name); 2162 2163 err = request_irq(adapter->msix_entries[vector].vector, 2164 igc_msix_ring, 0, q_vector->name, 2165 q_vector); 2166 if (err) 2167 goto err_free; 2168 } 2169 2170 igc_configure_msix(adapter); 2171 return 0; 2172 2173 err_free: 2174 /* free already assigned IRQs */ 2175 free_irq(adapter->msix_entries[free_vector++].vector, adapter); 2176 2177 vector--; 2178 for (i = 0; i < vector; i++) { 2179 free_irq(adapter->msix_entries[free_vector++].vector, 2180 adapter->q_vector[i]); 2181 } 2182 err_out: 2183 return err; 2184 } 2185 2186 /** 2187 * igc_reset_q_vector - Reset config for interrupt vector 2188 * @adapter: board private structure to initialize 2189 * @v_idx: Index of vector to be reset 2190 * 2191 * If NAPI is enabled it will delete any references to the 2192 * NAPI struct. This is preparation for igc_free_q_vector. 2193 */ 2194 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 2195 { 2196 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 2197 2198 /* if we're coming from igc_set_interrupt_capability, the vectors are 2199 * not yet allocated 2200 */ 2201 if (!q_vector) 2202 return; 2203 2204 if (q_vector->tx.ring) 2205 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 2206 2207 if (q_vector->rx.ring) 2208 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 2209 2210 netif_napi_del(&q_vector->napi); 2211 } 2212 2213 static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 2214 { 2215 int v_idx = adapter->num_q_vectors; 2216 2217 if (adapter->msix_entries) { 2218 pci_disable_msix(adapter->pdev); 2219 kfree(adapter->msix_entries); 2220 adapter->msix_entries = NULL; 2221 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 2222 pci_disable_msi(adapter->pdev); 2223 } 2224 2225 while (v_idx--) 2226 igc_reset_q_vector(adapter, v_idx); 2227 } 2228 2229 /** 2230 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 2231 * @adapter: Pointer to adapter structure 2232 * 2233 * This function resets the device so that it has 0 rx queues, tx queues, and 2234 * MSI-X interrupts allocated. 2235 */ 2236 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 2237 { 2238 igc_free_q_vectors(adapter); 2239 igc_reset_interrupt_capability(adapter); 2240 } 2241 2242 /** 2243 * igc_free_q_vectors - Free memory allocated for interrupt vectors 2244 * @adapter: board private structure to initialize 2245 * 2246 * This function frees the memory allocated to the q_vectors. In addition if 2247 * NAPI is enabled it will delete any references to the NAPI struct prior 2248 * to freeing the q_vector. 2249 */ 2250 static void igc_free_q_vectors(struct igc_adapter *adapter) 2251 { 2252 int v_idx = adapter->num_q_vectors; 2253 2254 adapter->num_tx_queues = 0; 2255 adapter->num_rx_queues = 0; 2256 adapter->num_q_vectors = 0; 2257 2258 while (v_idx--) { 2259 igc_reset_q_vector(adapter, v_idx); 2260 igc_free_q_vector(adapter, v_idx); 2261 } 2262 } 2263 2264 /** 2265 * igc_free_q_vector - Free memory allocated for specific interrupt vector 2266 * @adapter: board private structure to initialize 2267 * @v_idx: Index of vector to be freed 2268 * 2269 * This function frees the memory allocated to the q_vector. 2270 */ 2271 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 2272 { 2273 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 2274 2275 adapter->q_vector[v_idx] = NULL; 2276 2277 /* igc_get_stats64() might access the rings on this vector, 2278 * we must wait a grace period before freeing it. 2279 */ 2280 if (q_vector) 2281 kfree_rcu(q_vector, rcu); 2282 } 2283 2284 /* Need to wait a few seconds after link up to get diagnostic information from 2285 * the phy 2286 */ 2287 static void igc_update_phy_info(struct timer_list *t) 2288 { 2289 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 2290 2291 igc_get_phy_info(&adapter->hw); 2292 } 2293 2294 /** 2295 * igc_has_link - check shared code for link and determine up/down 2296 * @adapter: pointer to driver private info 2297 */ 2298 bool igc_has_link(struct igc_adapter *adapter) 2299 { 2300 struct igc_hw *hw = &adapter->hw; 2301 bool link_active = false; 2302 2303 /* get_link_status is set on LSC (link status) interrupt or 2304 * rx sequence error interrupt. get_link_status will stay 2305 * false until the igc_check_for_link establishes link 2306 * for copper adapters ONLY 2307 */ 2308 switch (hw->phy.media_type) { 2309 case igc_media_type_copper: 2310 if (!hw->mac.get_link_status) 2311 return true; 2312 hw->mac.ops.check_for_link(hw); 2313 link_active = !hw->mac.get_link_status; 2314 break; 2315 default: 2316 case igc_media_type_unknown: 2317 break; 2318 } 2319 2320 if (hw->mac.type == igc_i225 && 2321 hw->phy.id == I225_I_PHY_ID) { 2322 if (!netif_carrier_ok(adapter->netdev)) { 2323 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 2324 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 2325 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 2326 adapter->link_check_timeout = jiffies; 2327 } 2328 } 2329 2330 return link_active; 2331 } 2332 2333 /** 2334 * igc_watchdog - Timer Call-back 2335 * @data: pointer to adapter cast into an unsigned long 2336 */ 2337 static void igc_watchdog(struct timer_list *t) 2338 { 2339 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 2340 /* Do the rest outside of interrupt context */ 2341 schedule_work(&adapter->watchdog_task); 2342 } 2343 2344 static void igc_watchdog_task(struct work_struct *work) 2345 { 2346 struct igc_adapter *adapter = container_of(work, 2347 struct igc_adapter, 2348 watchdog_task); 2349 struct net_device *netdev = adapter->netdev; 2350 struct igc_hw *hw = &adapter->hw; 2351 struct igc_phy_info *phy = &hw->phy; 2352 u16 phy_data, retry_count = 20; 2353 u32 connsw; 2354 u32 link; 2355 int i; 2356 2357 link = igc_has_link(adapter); 2358 2359 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 2360 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 2361 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 2362 else 2363 link = false; 2364 } 2365 2366 /* Force link down if we have fiber to swap to */ 2367 if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 2368 if (hw->phy.media_type == igc_media_type_copper) { 2369 connsw = rd32(IGC_CONNSW); 2370 if (!(connsw & IGC_CONNSW_AUTOSENSE_EN)) 2371 link = 0; 2372 } 2373 } 2374 if (link) { 2375 if (!netif_carrier_ok(netdev)) { 2376 u32 ctrl; 2377 2378 hw->mac.ops.get_speed_and_duplex(hw, 2379 &adapter->link_speed, 2380 &adapter->link_duplex); 2381 2382 ctrl = rd32(IGC_CTRL); 2383 /* Link status message must follow this format */ 2384 netdev_info(netdev, 2385 "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 2386 netdev->name, 2387 adapter->link_speed, 2388 adapter->link_duplex == FULL_DUPLEX ? 2389 "Full" : "Half", 2390 (ctrl & IGC_CTRL_TFCE) && 2391 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 2392 (ctrl & IGC_CTRL_RFCE) ? "RX" : 2393 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 2394 2395 /* check if SmartSpeed worked */ 2396 igc_check_downshift(hw); 2397 if (phy->speed_downgraded) 2398 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 2399 2400 /* adjust timeout factor according to speed/duplex */ 2401 adapter->tx_timeout_factor = 1; 2402 switch (adapter->link_speed) { 2403 case SPEED_10: 2404 adapter->tx_timeout_factor = 14; 2405 break; 2406 case SPEED_100: 2407 /* maybe add some timeout factor ? */ 2408 break; 2409 } 2410 2411 if (adapter->link_speed != SPEED_1000) 2412 goto no_wait; 2413 2414 /* wait for Remote receiver status OK */ 2415 retry_read_status: 2416 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 2417 &phy_data)) { 2418 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 2419 retry_count) { 2420 msleep(100); 2421 retry_count--; 2422 goto retry_read_status; 2423 } else if (!retry_count) { 2424 dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); 2425 } 2426 } else { 2427 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); 2428 } 2429 no_wait: 2430 netif_carrier_on(netdev); 2431 2432 /* link state has changed, schedule phy info update */ 2433 if (!test_bit(__IGC_DOWN, &adapter->state)) 2434 mod_timer(&adapter->phy_info_timer, 2435 round_jiffies(jiffies + 2 * HZ)); 2436 } 2437 } else { 2438 if (netif_carrier_ok(netdev)) { 2439 adapter->link_speed = 0; 2440 adapter->link_duplex = 0; 2441 2442 /* Links status message must follow this format */ 2443 netdev_info(netdev, "igc: %s NIC Link is Down\n", 2444 netdev->name); 2445 netif_carrier_off(netdev); 2446 2447 /* link state has changed, schedule phy info update */ 2448 if (!test_bit(__IGC_DOWN, &adapter->state)) 2449 mod_timer(&adapter->phy_info_timer, 2450 round_jiffies(jiffies + 2 * HZ)); 2451 2452 /* link is down, time to check for alternate media */ 2453 if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 2454 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 2455 schedule_work(&adapter->reset_task); 2456 /* return immediately */ 2457 return; 2458 } 2459 } 2460 2461 /* also check for alternate media here */ 2462 } else if (!netif_carrier_ok(netdev) && 2463 (adapter->flags & IGC_FLAG_MAS_ENABLE)) { 2464 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 2465 schedule_work(&adapter->reset_task); 2466 /* return immediately */ 2467 return; 2468 } 2469 } 2470 } 2471 2472 spin_lock(&adapter->stats64_lock); 2473 igc_update_stats(adapter); 2474 spin_unlock(&adapter->stats64_lock); 2475 2476 for (i = 0; i < adapter->num_tx_queues; i++) { 2477 struct igc_ring *tx_ring = adapter->tx_ring[i]; 2478 2479 if (!netif_carrier_ok(netdev)) { 2480 /* We've lost link, so the controller stops DMA, 2481 * but we've got queued Tx work that's never going 2482 * to get done, so reset controller to flush Tx. 2483 * (Do the reset outside of interrupt context). 2484 */ 2485 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 2486 adapter->tx_timeout_count++; 2487 schedule_work(&adapter->reset_task); 2488 /* return immediately since reset is imminent */ 2489 return; 2490 } 2491 } 2492 2493 /* Force detection of hung controller every watchdog period */ 2494 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 2495 } 2496 2497 /* Cause software interrupt to ensure Rx ring is cleaned */ 2498 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 2499 u32 eics = 0; 2500 2501 for (i = 0; i < adapter->num_q_vectors; i++) 2502 eics |= adapter->q_vector[i]->eims_value; 2503 wr32(IGC_EICS, eics); 2504 } else { 2505 wr32(IGC_ICS, IGC_ICS_RXDMT0); 2506 } 2507 2508 /* Reset the timer */ 2509 if (!test_bit(__IGC_DOWN, &adapter->state)) { 2510 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 2511 mod_timer(&adapter->watchdog_timer, 2512 round_jiffies(jiffies + HZ)); 2513 else 2514 mod_timer(&adapter->watchdog_timer, 2515 round_jiffies(jiffies + 2 * HZ)); 2516 } 2517 } 2518 2519 /** 2520 * igc_update_ring_itr - update the dynamic ITR value based on packet size 2521 * @q_vector: pointer to q_vector 2522 * 2523 * Stores a new ITR value based on strictly on packet size. This 2524 * algorithm is less sophisticated than that used in igc_update_itr, 2525 * due to the difficulty of synchronizing statistics across multiple 2526 * receive rings. The divisors and thresholds used by this function 2527 * were determined based on theoretical maximum wire speed and testing 2528 * data, in order to minimize response time while increasing bulk 2529 * throughput. 2530 * NOTE: This function is called only when operating in a multiqueue 2531 * receive environment. 2532 */ 2533 static void igc_update_ring_itr(struct igc_q_vector *q_vector) 2534 { 2535 struct igc_adapter *adapter = q_vector->adapter; 2536 int new_val = q_vector->itr_val; 2537 int avg_wire_size = 0; 2538 unsigned int packets; 2539 2540 /* For non-gigabit speeds, just fix the interrupt rate at 4000 2541 * ints/sec - ITR timer value of 120 ticks. 2542 */ 2543 switch (adapter->link_speed) { 2544 case SPEED_10: 2545 case SPEED_100: 2546 new_val = IGC_4K_ITR; 2547 goto set_itr_val; 2548 default: 2549 break; 2550 } 2551 2552 packets = q_vector->rx.total_packets; 2553 if (packets) 2554 avg_wire_size = q_vector->rx.total_bytes / packets; 2555 2556 packets = q_vector->tx.total_packets; 2557 if (packets) 2558 avg_wire_size = max_t(u32, avg_wire_size, 2559 q_vector->tx.total_bytes / packets); 2560 2561 /* if avg_wire_size isn't set no work was done */ 2562 if (!avg_wire_size) 2563 goto clear_counts; 2564 2565 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 2566 avg_wire_size += 24; 2567 2568 /* Don't starve jumbo frames */ 2569 avg_wire_size = min(avg_wire_size, 3000); 2570 2571 /* Give a little boost to mid-size frames */ 2572 if (avg_wire_size > 300 && avg_wire_size < 1200) 2573 new_val = avg_wire_size / 3; 2574 else 2575 new_val = avg_wire_size / 2; 2576 2577 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2578 if (new_val < IGC_20K_ITR && 2579 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 2580 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 2581 new_val = IGC_20K_ITR; 2582 2583 set_itr_val: 2584 if (new_val != q_vector->itr_val) { 2585 q_vector->itr_val = new_val; 2586 q_vector->set_itr = 1; 2587 } 2588 clear_counts: 2589 q_vector->rx.total_bytes = 0; 2590 q_vector->rx.total_packets = 0; 2591 q_vector->tx.total_bytes = 0; 2592 q_vector->tx.total_packets = 0; 2593 } 2594 2595 /** 2596 * igc_update_itr - update the dynamic ITR value based on statistics 2597 * @q_vector: pointer to q_vector 2598 * @ring_container: ring info to update the itr for 2599 * 2600 * Stores a new ITR value based on packets and byte 2601 * counts during the last interrupt. The advantage of per interrupt 2602 * computation is faster updates and more accurate ITR for the current 2603 * traffic pattern. Constants in this function were computed 2604 * based on theoretical maximum wire speed and thresholds were set based 2605 * on testing data as well as attempting to minimize response time 2606 * while increasing bulk throughput. 2607 * NOTE: These calculations are only valid when operating in a single- 2608 * queue environment. 2609 */ 2610 static void igc_update_itr(struct igc_q_vector *q_vector, 2611 struct igc_ring_container *ring_container) 2612 { 2613 unsigned int packets = ring_container->total_packets; 2614 unsigned int bytes = ring_container->total_bytes; 2615 u8 itrval = ring_container->itr; 2616 2617 /* no packets, exit with status unchanged */ 2618 if (packets == 0) 2619 return; 2620 2621 switch (itrval) { 2622 case lowest_latency: 2623 /* handle TSO and jumbo frames */ 2624 if (bytes / packets > 8000) 2625 itrval = bulk_latency; 2626 else if ((packets < 5) && (bytes > 512)) 2627 itrval = low_latency; 2628 break; 2629 case low_latency: /* 50 usec aka 20000 ints/s */ 2630 if (bytes > 10000) { 2631 /* this if handles the TSO accounting */ 2632 if (bytes / packets > 8000) 2633 itrval = bulk_latency; 2634 else if ((packets < 10) || ((bytes / packets) > 1200)) 2635 itrval = bulk_latency; 2636 else if ((packets > 35)) 2637 itrval = lowest_latency; 2638 } else if (bytes / packets > 2000) { 2639 itrval = bulk_latency; 2640 } else if (packets <= 2 && bytes < 512) { 2641 itrval = lowest_latency; 2642 } 2643 break; 2644 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2645 if (bytes > 25000) { 2646 if (packets > 35) 2647 itrval = low_latency; 2648 } else if (bytes < 1500) { 2649 itrval = low_latency; 2650 } 2651 break; 2652 } 2653 2654 /* clear work counters since we have the values we need */ 2655 ring_container->total_bytes = 0; 2656 ring_container->total_packets = 0; 2657 2658 /* write updated itr to ring container */ 2659 ring_container->itr = itrval; 2660 } 2661 2662 /** 2663 * igc_intr_msi - Interrupt Handler 2664 * @irq: interrupt number 2665 * @data: pointer to a network interface device structure 2666 */ 2667 static irqreturn_t igc_intr_msi(int irq, void *data) 2668 { 2669 struct igc_adapter *adapter = data; 2670 struct igc_q_vector *q_vector = adapter->q_vector[0]; 2671 struct igc_hw *hw = &adapter->hw; 2672 /* read ICR disables interrupts using IAM */ 2673 u32 icr = rd32(IGC_ICR); 2674 2675 igc_write_itr(q_vector); 2676 2677 if (icr & IGC_ICR_DRSTA) 2678 schedule_work(&adapter->reset_task); 2679 2680 if (icr & IGC_ICR_DOUTSYNC) { 2681 /* HW is reporting DMA is out of sync */ 2682 adapter->stats.doosync++; 2683 } 2684 2685 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 2686 hw->mac.get_link_status = 1; 2687 if (!test_bit(__IGC_DOWN, &adapter->state)) 2688 mod_timer(&adapter->watchdog_timer, jiffies + 1); 2689 } 2690 2691 napi_schedule(&q_vector->napi); 2692 2693 return IRQ_HANDLED; 2694 } 2695 2696 /** 2697 * igc_intr - Legacy Interrupt Handler 2698 * @irq: interrupt number 2699 * @data: pointer to a network interface device structure 2700 */ 2701 static irqreturn_t igc_intr(int irq, void *data) 2702 { 2703 struct igc_adapter *adapter = data; 2704 struct igc_q_vector *q_vector = adapter->q_vector[0]; 2705 struct igc_hw *hw = &adapter->hw; 2706 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 2707 * need for the IMC write 2708 */ 2709 u32 icr = rd32(IGC_ICR); 2710 2711 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 2712 * not set, then the adapter didn't send an interrupt 2713 */ 2714 if (!(icr & IGC_ICR_INT_ASSERTED)) 2715 return IRQ_NONE; 2716 2717 igc_write_itr(q_vector); 2718 2719 if (icr & IGC_ICR_DRSTA) 2720 schedule_work(&adapter->reset_task); 2721 2722 if (icr & IGC_ICR_DOUTSYNC) { 2723 /* HW is reporting DMA is out of sync */ 2724 adapter->stats.doosync++; 2725 } 2726 2727 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 2728 hw->mac.get_link_status = 1; 2729 /* guard against interrupt when we're going down */ 2730 if (!test_bit(__IGC_DOWN, &adapter->state)) 2731 mod_timer(&adapter->watchdog_timer, jiffies + 1); 2732 } 2733 2734 napi_schedule(&q_vector->napi); 2735 2736 return IRQ_HANDLED; 2737 } 2738 2739 static void igc_set_itr(struct igc_q_vector *q_vector) 2740 { 2741 struct igc_adapter *adapter = q_vector->adapter; 2742 u32 new_itr = q_vector->itr_val; 2743 u8 current_itr = 0; 2744 2745 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2746 switch (adapter->link_speed) { 2747 case SPEED_10: 2748 case SPEED_100: 2749 current_itr = 0; 2750 new_itr = IGC_4K_ITR; 2751 goto set_itr_now; 2752 default: 2753 break; 2754 } 2755 2756 igc_update_itr(q_vector, &q_vector->tx); 2757 igc_update_itr(q_vector, &q_vector->rx); 2758 2759 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 2760 2761 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2762 if (current_itr == lowest_latency && 2763 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 2764 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 2765 current_itr = low_latency; 2766 2767 switch (current_itr) { 2768 /* counts and packets in update_itr are dependent on these numbers */ 2769 case lowest_latency: 2770 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 2771 break; 2772 case low_latency: 2773 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 2774 break; 2775 case bulk_latency: 2776 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 2777 break; 2778 default: 2779 break; 2780 } 2781 2782 set_itr_now: 2783 if (new_itr != q_vector->itr_val) { 2784 /* this attempts to bias the interrupt rate towards Bulk 2785 * by adding intermediate steps when interrupt rate is 2786 * increasing 2787 */ 2788 new_itr = new_itr > q_vector->itr_val ? 2789 max((new_itr * q_vector->itr_val) / 2790 (new_itr + (q_vector->itr_val >> 2)), 2791 new_itr) : new_itr; 2792 /* Don't write the value here; it resets the adapter's 2793 * internal timer, and causes us to delay far longer than 2794 * we should between interrupts. Instead, we write the ITR 2795 * value at the beginning of the next interrupt so the timing 2796 * ends up being correct. 2797 */ 2798 q_vector->itr_val = new_itr; 2799 q_vector->set_itr = 1; 2800 } 2801 } 2802 2803 static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 2804 { 2805 struct igc_adapter *adapter = q_vector->adapter; 2806 struct igc_hw *hw = &adapter->hw; 2807 2808 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 2809 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 2810 if (adapter->num_q_vectors == 1) 2811 igc_set_itr(q_vector); 2812 else 2813 igc_update_ring_itr(q_vector); 2814 } 2815 2816 if (!test_bit(__IGC_DOWN, &adapter->state)) { 2817 if (adapter->msix_entries) 2818 wr32(IGC_EIMS, q_vector->eims_value); 2819 else 2820 igc_irq_enable(adapter); 2821 } 2822 } 2823 2824 /** 2825 * igc_poll - NAPI Rx polling callback 2826 * @napi: napi polling structure 2827 * @budget: count of how many packets we should handle 2828 */ 2829 static int igc_poll(struct napi_struct *napi, int budget) 2830 { 2831 struct igc_q_vector *q_vector = container_of(napi, 2832 struct igc_q_vector, 2833 napi); 2834 bool clean_complete = true; 2835 int work_done = 0; 2836 2837 if (q_vector->tx.ring) 2838 clean_complete = igc_clean_tx_irq(q_vector, budget); 2839 2840 if (q_vector->rx.ring) { 2841 int cleaned = igc_clean_rx_irq(q_vector, budget); 2842 2843 work_done += cleaned; 2844 if (cleaned >= budget) 2845 clean_complete = false; 2846 } 2847 2848 /* If all work not completed, return budget and keep polling */ 2849 if (!clean_complete) 2850 return budget; 2851 2852 /* Exit the polling mode, but don't re-enable interrupts if stack might 2853 * poll us due to busy-polling 2854 */ 2855 if (likely(napi_complete_done(napi, work_done))) 2856 igc_ring_irq_enable(q_vector); 2857 2858 return min(work_done, budget - 1); 2859 } 2860 2861 /** 2862 * igc_set_interrupt_capability - set MSI or MSI-X if supported 2863 * @adapter: Pointer to adapter structure 2864 * 2865 * Attempt to configure interrupts using the best available 2866 * capabilities of the hardware and kernel. 2867 */ 2868 static void igc_set_interrupt_capability(struct igc_adapter *adapter, 2869 bool msix) 2870 { 2871 int numvecs, i; 2872 int err; 2873 2874 if (!msix) 2875 goto msi_only; 2876 adapter->flags |= IGC_FLAG_HAS_MSIX; 2877 2878 /* Number of supported queues. */ 2879 adapter->num_rx_queues = adapter->rss_queues; 2880 2881 adapter->num_tx_queues = adapter->rss_queues; 2882 2883 /* start with one vector for every Rx queue */ 2884 numvecs = adapter->num_rx_queues; 2885 2886 /* if Tx handler is separate add 1 for every Tx queue */ 2887 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 2888 numvecs += adapter->num_tx_queues; 2889 2890 /* store the number of vectors reserved for queues */ 2891 adapter->num_q_vectors = numvecs; 2892 2893 /* add 1 vector for link status interrupts */ 2894 numvecs++; 2895 2896 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 2897 GFP_KERNEL); 2898 2899 if (!adapter->msix_entries) 2900 return; 2901 2902 /* populate entry values */ 2903 for (i = 0; i < numvecs; i++) 2904 adapter->msix_entries[i].entry = i; 2905 2906 err = pci_enable_msix_range(adapter->pdev, 2907 adapter->msix_entries, 2908 numvecs, 2909 numvecs); 2910 if (err > 0) 2911 return; 2912 2913 kfree(adapter->msix_entries); 2914 adapter->msix_entries = NULL; 2915 2916 igc_reset_interrupt_capability(adapter); 2917 2918 msi_only: 2919 adapter->flags &= ~IGC_FLAG_HAS_MSIX; 2920 2921 adapter->rss_queues = 1; 2922 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 2923 adapter->num_rx_queues = 1; 2924 adapter->num_tx_queues = 1; 2925 adapter->num_q_vectors = 1; 2926 if (!pci_enable_msi(adapter->pdev)) 2927 adapter->flags |= IGC_FLAG_HAS_MSI; 2928 } 2929 2930 static void igc_add_ring(struct igc_ring *ring, 2931 struct igc_ring_container *head) 2932 { 2933 head->ring = ring; 2934 head->count++; 2935 } 2936 2937 /** 2938 * igc_alloc_q_vector - Allocate memory for a single interrupt vector 2939 * @adapter: board private structure to initialize 2940 * @v_count: q_vectors allocated on adapter, used for ring interleaving 2941 * @v_idx: index of vector in adapter struct 2942 * @txr_count: total number of Tx rings to allocate 2943 * @txr_idx: index of first Tx ring to allocate 2944 * @rxr_count: total number of Rx rings to allocate 2945 * @rxr_idx: index of first Rx ring to allocate 2946 * 2947 * We allocate one q_vector. If allocation fails we return -ENOMEM. 2948 */ 2949 static int igc_alloc_q_vector(struct igc_adapter *adapter, 2950 unsigned int v_count, unsigned int v_idx, 2951 unsigned int txr_count, unsigned int txr_idx, 2952 unsigned int rxr_count, unsigned int rxr_idx) 2953 { 2954 struct igc_q_vector *q_vector; 2955 struct igc_ring *ring; 2956 int ring_count; 2957 2958 /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 2959 if (txr_count > 1 || rxr_count > 1) 2960 return -ENOMEM; 2961 2962 ring_count = txr_count + rxr_count; 2963 2964 /* allocate q_vector and rings */ 2965 q_vector = adapter->q_vector[v_idx]; 2966 if (!q_vector) 2967 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 2968 GFP_KERNEL); 2969 else 2970 memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 2971 if (!q_vector) 2972 return -ENOMEM; 2973 2974 /* initialize NAPI */ 2975 netif_napi_add(adapter->netdev, &q_vector->napi, 2976 igc_poll, 64); 2977 2978 /* tie q_vector and adapter together */ 2979 adapter->q_vector[v_idx] = q_vector; 2980 q_vector->adapter = adapter; 2981 2982 /* initialize work limits */ 2983 q_vector->tx.work_limit = adapter->tx_work_limit; 2984 2985 /* initialize ITR configuration */ 2986 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 2987 q_vector->itr_val = IGC_START_ITR; 2988 2989 /* initialize pointer to rings */ 2990 ring = q_vector->ring; 2991 2992 /* initialize ITR */ 2993 if (rxr_count) { 2994 /* rx or rx/tx vector */ 2995 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 2996 q_vector->itr_val = adapter->rx_itr_setting; 2997 } else { 2998 /* tx only vector */ 2999 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 3000 q_vector->itr_val = adapter->tx_itr_setting; 3001 } 3002 3003 if (txr_count) { 3004 /* assign generic ring traits */ 3005 ring->dev = &adapter->pdev->dev; 3006 ring->netdev = adapter->netdev; 3007 3008 /* configure backlink on ring */ 3009 ring->q_vector = q_vector; 3010 3011 /* update q_vector Tx values */ 3012 igc_add_ring(ring, &q_vector->tx); 3013 3014 /* apply Tx specific ring traits */ 3015 ring->count = adapter->tx_ring_count; 3016 ring->queue_index = txr_idx; 3017 3018 /* assign ring to adapter */ 3019 adapter->tx_ring[txr_idx] = ring; 3020 3021 /* push pointer to next ring */ 3022 ring++; 3023 } 3024 3025 if (rxr_count) { 3026 /* assign generic ring traits */ 3027 ring->dev = &adapter->pdev->dev; 3028 ring->netdev = adapter->netdev; 3029 3030 /* configure backlink on ring */ 3031 ring->q_vector = q_vector; 3032 3033 /* update q_vector Rx values */ 3034 igc_add_ring(ring, &q_vector->rx); 3035 3036 /* apply Rx specific ring traits */ 3037 ring->count = adapter->rx_ring_count; 3038 ring->queue_index = rxr_idx; 3039 3040 /* assign ring to adapter */ 3041 adapter->rx_ring[rxr_idx] = ring; 3042 } 3043 3044 return 0; 3045 } 3046 3047 /** 3048 * igc_alloc_q_vectors - Allocate memory for interrupt vectors 3049 * @adapter: board private structure to initialize 3050 * 3051 * We allocate one q_vector per queue interrupt. If allocation fails we 3052 * return -ENOMEM. 3053 */ 3054 static int igc_alloc_q_vectors(struct igc_adapter *adapter) 3055 { 3056 int rxr_remaining = adapter->num_rx_queues; 3057 int txr_remaining = adapter->num_tx_queues; 3058 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 3059 int q_vectors = adapter->num_q_vectors; 3060 int err; 3061 3062 if (q_vectors >= (rxr_remaining + txr_remaining)) { 3063 for (; rxr_remaining; v_idx++) { 3064 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 3065 0, 0, 1, rxr_idx); 3066 3067 if (err) 3068 goto err_out; 3069 3070 /* update counts and index */ 3071 rxr_remaining--; 3072 rxr_idx++; 3073 } 3074 } 3075 3076 for (; v_idx < q_vectors; v_idx++) { 3077 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 3078 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 3079 3080 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 3081 tqpv, txr_idx, rqpv, rxr_idx); 3082 3083 if (err) 3084 goto err_out; 3085 3086 /* update counts and index */ 3087 rxr_remaining -= rqpv; 3088 txr_remaining -= tqpv; 3089 rxr_idx++; 3090 txr_idx++; 3091 } 3092 3093 return 0; 3094 3095 err_out: 3096 adapter->num_tx_queues = 0; 3097 adapter->num_rx_queues = 0; 3098 adapter->num_q_vectors = 0; 3099 3100 while (v_idx--) 3101 igc_free_q_vector(adapter, v_idx); 3102 3103 return -ENOMEM; 3104 } 3105 3106 /** 3107 * igc_cache_ring_register - Descriptor ring to register mapping 3108 * @adapter: board private structure to initialize 3109 * 3110 * Once we know the feature-set enabled for the device, we'll cache 3111 * the register offset the descriptor ring is assigned to. 3112 */ 3113 static void igc_cache_ring_register(struct igc_adapter *adapter) 3114 { 3115 int i = 0, j = 0; 3116 3117 switch (adapter->hw.mac.type) { 3118 case igc_i225: 3119 /* Fall through */ 3120 default: 3121 for (; i < adapter->num_rx_queues; i++) 3122 adapter->rx_ring[i]->reg_idx = i; 3123 for (; j < adapter->num_tx_queues; j++) 3124 adapter->tx_ring[j]->reg_idx = j; 3125 break; 3126 } 3127 } 3128 3129 /** 3130 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 3131 * @adapter: Pointer to adapter structure 3132 * 3133 * This function initializes the interrupts and allocates all of the queues. 3134 */ 3135 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 3136 { 3137 struct pci_dev *pdev = adapter->pdev; 3138 int err = 0; 3139 3140 igc_set_interrupt_capability(adapter, msix); 3141 3142 err = igc_alloc_q_vectors(adapter); 3143 if (err) { 3144 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); 3145 goto err_alloc_q_vectors; 3146 } 3147 3148 igc_cache_ring_register(adapter); 3149 3150 return 0; 3151 3152 err_alloc_q_vectors: 3153 igc_reset_interrupt_capability(adapter); 3154 return err; 3155 } 3156 3157 static void igc_free_irq(struct igc_adapter *adapter) 3158 { 3159 if (adapter->msix_entries) { 3160 int vector = 0, i; 3161 3162 free_irq(adapter->msix_entries[vector++].vector, adapter); 3163 3164 for (i = 0; i < adapter->num_q_vectors; i++) 3165 free_irq(adapter->msix_entries[vector++].vector, 3166 adapter->q_vector[i]); 3167 } else { 3168 free_irq(adapter->pdev->irq, adapter); 3169 } 3170 } 3171 3172 /** 3173 * igc_irq_disable - Mask off interrupt generation on the NIC 3174 * @adapter: board private structure 3175 */ 3176 static void igc_irq_disable(struct igc_adapter *adapter) 3177 { 3178 struct igc_hw *hw = &adapter->hw; 3179 3180 if (adapter->msix_entries) { 3181 u32 regval = rd32(IGC_EIAM); 3182 3183 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 3184 wr32(IGC_EIMC, adapter->eims_enable_mask); 3185 regval = rd32(IGC_EIAC); 3186 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 3187 } 3188 3189 wr32(IGC_IAM, 0); 3190 wr32(IGC_IMC, ~0); 3191 wrfl(); 3192 3193 if (adapter->msix_entries) { 3194 int vector = 0, i; 3195 3196 synchronize_irq(adapter->msix_entries[vector++].vector); 3197 3198 for (i = 0; i < adapter->num_q_vectors; i++) 3199 synchronize_irq(adapter->msix_entries[vector++].vector); 3200 } else { 3201 synchronize_irq(adapter->pdev->irq); 3202 } 3203 } 3204 3205 /** 3206 * igc_irq_enable - Enable default interrupt generation settings 3207 * @adapter: board private structure 3208 */ 3209 static void igc_irq_enable(struct igc_adapter *adapter) 3210 { 3211 struct igc_hw *hw = &adapter->hw; 3212 3213 if (adapter->msix_entries) { 3214 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 3215 u32 regval = rd32(IGC_EIAC); 3216 3217 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 3218 regval = rd32(IGC_EIAM); 3219 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 3220 wr32(IGC_EIMS, adapter->eims_enable_mask); 3221 wr32(IGC_IMS, ims); 3222 } else { 3223 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3224 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3225 } 3226 } 3227 3228 /** 3229 * igc_request_irq - initialize interrupts 3230 * @adapter: Pointer to adapter structure 3231 * 3232 * Attempts to configure interrupts using the best available 3233 * capabilities of the hardware and kernel. 3234 */ 3235 static int igc_request_irq(struct igc_adapter *adapter) 3236 { 3237 struct net_device *netdev = adapter->netdev; 3238 struct pci_dev *pdev = adapter->pdev; 3239 int err = 0; 3240 3241 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 3242 err = igc_request_msix(adapter); 3243 if (!err) 3244 goto request_done; 3245 /* fall back to MSI */ 3246 igc_free_all_tx_resources(adapter); 3247 igc_free_all_rx_resources(adapter); 3248 3249 igc_clear_interrupt_scheme(adapter); 3250 err = igc_init_interrupt_scheme(adapter, false); 3251 if (err) 3252 goto request_done; 3253 igc_setup_all_tx_resources(adapter); 3254 igc_setup_all_rx_resources(adapter); 3255 igc_configure(adapter); 3256 } 3257 3258 igc_assign_vector(adapter->q_vector[0], 0); 3259 3260 if (adapter->flags & IGC_FLAG_HAS_MSI) { 3261 err = request_irq(pdev->irq, &igc_intr_msi, 0, 3262 netdev->name, adapter); 3263 if (!err) 3264 goto request_done; 3265 3266 /* fall back to legacy interrupts */ 3267 igc_reset_interrupt_capability(adapter); 3268 adapter->flags &= ~IGC_FLAG_HAS_MSI; 3269 } 3270 3271 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 3272 netdev->name, adapter); 3273 3274 if (err) 3275 dev_err(&pdev->dev, "Error %d getting interrupt\n", 3276 err); 3277 3278 request_done: 3279 return err; 3280 } 3281 3282 static void igc_write_itr(struct igc_q_vector *q_vector) 3283 { 3284 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 3285 3286 if (!q_vector->set_itr) 3287 return; 3288 3289 if (!itr_val) 3290 itr_val = IGC_ITR_VAL_MASK; 3291 3292 itr_val |= IGC_EITR_CNT_IGNR; 3293 3294 writel(itr_val, q_vector->itr_register); 3295 q_vector->set_itr = 0; 3296 } 3297 3298 /** 3299 * igc_open - Called when a network interface is made active 3300 * @netdev: network interface device structure 3301 * 3302 * Returns 0 on success, negative value on failure 3303 * 3304 * The open entry point is called when a network interface is made 3305 * active by the system (IFF_UP). At this point all resources needed 3306 * for transmit and receive operations are allocated, the interrupt 3307 * handler is registered with the OS, the watchdog timer is started, 3308 * and the stack is notified that the interface is ready. 3309 */ 3310 static int __igc_open(struct net_device *netdev, bool resuming) 3311 { 3312 struct igc_adapter *adapter = netdev_priv(netdev); 3313 struct igc_hw *hw = &adapter->hw; 3314 int err = 0; 3315 int i = 0; 3316 3317 /* disallow open during test */ 3318 3319 if (test_bit(__IGC_TESTING, &adapter->state)) { 3320 WARN_ON(resuming); 3321 return -EBUSY; 3322 } 3323 3324 netif_carrier_off(netdev); 3325 3326 /* allocate transmit descriptors */ 3327 err = igc_setup_all_tx_resources(adapter); 3328 if (err) 3329 goto err_setup_tx; 3330 3331 /* allocate receive descriptors */ 3332 err = igc_setup_all_rx_resources(adapter); 3333 if (err) 3334 goto err_setup_rx; 3335 3336 igc_power_up_link(adapter); 3337 3338 igc_configure(adapter); 3339 3340 err = igc_request_irq(adapter); 3341 if (err) 3342 goto err_req_irq; 3343 3344 /* Notify the stack of the actual queue counts. */ 3345 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 3346 if (err) 3347 goto err_set_queues; 3348 3349 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 3350 if (err) 3351 goto err_set_queues; 3352 3353 clear_bit(__IGC_DOWN, &adapter->state); 3354 3355 for (i = 0; i < adapter->num_q_vectors; i++) 3356 napi_enable(&adapter->q_vector[i]->napi); 3357 3358 /* Clear any pending interrupts. */ 3359 rd32(IGC_ICR); 3360 igc_irq_enable(adapter); 3361 3362 netif_tx_start_all_queues(netdev); 3363 3364 /* start the watchdog. */ 3365 hw->mac.get_link_status = 1; 3366 schedule_work(&adapter->watchdog_task); 3367 3368 return IGC_SUCCESS; 3369 3370 err_set_queues: 3371 igc_free_irq(adapter); 3372 err_req_irq: 3373 igc_release_hw_control(adapter); 3374 igc_power_down_link(adapter); 3375 igc_free_all_rx_resources(adapter); 3376 err_setup_rx: 3377 igc_free_all_tx_resources(adapter); 3378 err_setup_tx: 3379 igc_reset(adapter); 3380 3381 return err; 3382 } 3383 3384 static int igc_open(struct net_device *netdev) 3385 { 3386 return __igc_open(netdev, false); 3387 } 3388 3389 /** 3390 * igc_close - Disables a network interface 3391 * @netdev: network interface device structure 3392 * 3393 * Returns 0, this is not allowed to fail 3394 * 3395 * The close entry point is called when an interface is de-activated 3396 * by the OS. The hardware is still under the driver's control, but 3397 * needs to be disabled. A global MAC reset is issued to stop the 3398 * hardware, and all transmit and receive resources are freed. 3399 */ 3400 static int __igc_close(struct net_device *netdev, bool suspending) 3401 { 3402 struct igc_adapter *adapter = netdev_priv(netdev); 3403 3404 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 3405 3406 igc_down(adapter); 3407 3408 igc_release_hw_control(adapter); 3409 3410 igc_free_irq(adapter); 3411 3412 igc_free_all_tx_resources(adapter); 3413 igc_free_all_rx_resources(adapter); 3414 3415 return 0; 3416 } 3417 3418 static int igc_close(struct net_device *netdev) 3419 { 3420 if (netif_device_present(netdev) || netdev->dismantle) 3421 return __igc_close(netdev, false); 3422 return 0; 3423 } 3424 3425 static const struct net_device_ops igc_netdev_ops = { 3426 .ndo_open = igc_open, 3427 .ndo_stop = igc_close, 3428 .ndo_start_xmit = igc_xmit_frame, 3429 .ndo_set_mac_address = igc_set_mac, 3430 .ndo_change_mtu = igc_change_mtu, 3431 .ndo_get_stats = igc_get_stats, 3432 }; 3433 3434 /* PCIe configuration access */ 3435 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 3436 { 3437 struct igc_adapter *adapter = hw->back; 3438 3439 pci_read_config_word(adapter->pdev, reg, value); 3440 } 3441 3442 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 3443 { 3444 struct igc_adapter *adapter = hw->back; 3445 3446 pci_write_config_word(adapter->pdev, reg, *value); 3447 } 3448 3449 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 3450 { 3451 struct igc_adapter *adapter = hw->back; 3452 u16 cap_offset; 3453 3454 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 3455 if (!cap_offset) 3456 return -IGC_ERR_CONFIG; 3457 3458 pci_read_config_word(adapter->pdev, cap_offset + reg, value); 3459 3460 return IGC_SUCCESS; 3461 } 3462 3463 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 3464 { 3465 struct igc_adapter *adapter = hw->back; 3466 u16 cap_offset; 3467 3468 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 3469 if (!cap_offset) 3470 return -IGC_ERR_CONFIG; 3471 3472 pci_write_config_word(adapter->pdev, cap_offset + reg, *value); 3473 3474 return IGC_SUCCESS; 3475 } 3476 3477 u32 igc_rd32(struct igc_hw *hw, u32 reg) 3478 { 3479 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 3480 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 3481 u32 value = 0; 3482 3483 if (IGC_REMOVED(hw_addr)) 3484 return ~value; 3485 3486 value = readl(&hw_addr[reg]); 3487 3488 /* reads should not return all F's */ 3489 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 3490 struct net_device *netdev = igc->netdev; 3491 3492 hw->hw_addr = NULL; 3493 netif_device_detach(netdev); 3494 netdev_err(netdev, "PCIe link lost, device now detached\n"); 3495 } 3496 3497 return value; 3498 } 3499 3500 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) 3501 { 3502 struct pci_dev *pdev = adapter->pdev; 3503 struct igc_mac_info *mac = &adapter->hw.mac; 3504 3505 mac->autoneg = 0; 3506 3507 /* Make sure dplx is at most 1 bit and lsb of speed is not set 3508 * for the switch() below to work 3509 */ 3510 if ((spd & 1) || (dplx & ~1)) 3511 goto err_inval; 3512 3513 switch (spd + dplx) { 3514 case SPEED_10 + DUPLEX_HALF: 3515 mac->forced_speed_duplex = ADVERTISE_10_HALF; 3516 break; 3517 case SPEED_10 + DUPLEX_FULL: 3518 mac->forced_speed_duplex = ADVERTISE_10_FULL; 3519 break; 3520 case SPEED_100 + DUPLEX_HALF: 3521 mac->forced_speed_duplex = ADVERTISE_100_HALF; 3522 break; 3523 case SPEED_100 + DUPLEX_FULL: 3524 mac->forced_speed_duplex = ADVERTISE_100_FULL; 3525 break; 3526 case SPEED_1000 + DUPLEX_FULL: 3527 mac->autoneg = 1; 3528 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 3529 break; 3530 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 3531 goto err_inval; 3532 case SPEED_2500 + DUPLEX_FULL: 3533 mac->autoneg = 1; 3534 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 3535 break; 3536 case SPEED_2500 + DUPLEX_HALF: /* not supported */ 3537 default: 3538 goto err_inval; 3539 } 3540 3541 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 3542 adapter->hw.phy.mdix = AUTO_ALL_MODES; 3543 3544 return 0; 3545 3546 err_inval: 3547 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); 3548 return -EINVAL; 3549 } 3550 3551 /** 3552 * igc_probe - Device Initialization Routine 3553 * @pdev: PCI device information struct 3554 * @ent: entry in igc_pci_tbl 3555 * 3556 * Returns 0 on success, negative on failure 3557 * 3558 * igc_probe initializes an adapter identified by a pci_dev structure. 3559 * The OS initialization, configuring the adapter private structure, 3560 * and a hardware reset occur. 3561 */ 3562 static int igc_probe(struct pci_dev *pdev, 3563 const struct pci_device_id *ent) 3564 { 3565 struct igc_adapter *adapter; 3566 struct net_device *netdev; 3567 struct igc_hw *hw; 3568 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 3569 int err; 3570 3571 err = pci_enable_device_mem(pdev); 3572 if (err) 3573 return err; 3574 3575 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 3576 if (!err) { 3577 err = dma_set_coherent_mask(&pdev->dev, 3578 DMA_BIT_MASK(64)); 3579 } else { 3580 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3581 if (err) { 3582 err = dma_set_coherent_mask(&pdev->dev, 3583 DMA_BIT_MASK(32)); 3584 if (err) { 3585 dev_err(&pdev->dev, "igc: Wrong DMA config\n"); 3586 goto err_dma; 3587 } 3588 } 3589 } 3590 3591 err = pci_request_selected_regions(pdev, 3592 pci_select_bars(pdev, 3593 IORESOURCE_MEM), 3594 igc_driver_name); 3595 if (err) 3596 goto err_pci_reg; 3597 3598 pci_enable_pcie_error_reporting(pdev); 3599 3600 pci_set_master(pdev); 3601 3602 err = -ENOMEM; 3603 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 3604 IGC_MAX_TX_QUEUES); 3605 3606 if (!netdev) 3607 goto err_alloc_etherdev; 3608 3609 SET_NETDEV_DEV(netdev, &pdev->dev); 3610 3611 pci_set_drvdata(pdev, netdev); 3612 adapter = netdev_priv(netdev); 3613 adapter->netdev = netdev; 3614 adapter->pdev = pdev; 3615 hw = &adapter->hw; 3616 hw->back = adapter; 3617 adapter->port_num = hw->bus.func; 3618 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3619 3620 err = pci_save_state(pdev); 3621 if (err) 3622 goto err_ioremap; 3623 3624 err = -EIO; 3625 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 3626 pci_resource_len(pdev, 0)); 3627 if (!adapter->io_addr) 3628 goto err_ioremap; 3629 3630 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 3631 hw->hw_addr = adapter->io_addr; 3632 3633 netdev->netdev_ops = &igc_netdev_ops; 3634 igc_set_ethtool_ops(netdev); 3635 netdev->watchdog_timeo = 5 * HZ; 3636 3637 netdev->mem_start = pci_resource_start(pdev, 0); 3638 netdev->mem_end = pci_resource_end(pdev, 0); 3639 3640 /* PCI config space info */ 3641 hw->vendor_id = pdev->vendor; 3642 hw->device_id = pdev->device; 3643 hw->revision_id = pdev->revision; 3644 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3645 hw->subsystem_device_id = pdev->subsystem_device; 3646 3647 /* Copy the default MAC and PHY function pointers */ 3648 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 3649 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 3650 3651 /* Initialize skew-specific constants */ 3652 err = ei->get_invariants(hw); 3653 if (err) 3654 goto err_sw_init; 3655 3656 /* setup the private structure */ 3657 err = igc_sw_init(adapter); 3658 if (err) 3659 goto err_sw_init; 3660 3661 /* MTU range: 68 - 9216 */ 3662 netdev->min_mtu = ETH_MIN_MTU; 3663 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 3664 3665 /* before reading the NVM, reset the controller to put the device in a 3666 * known good starting state 3667 */ 3668 hw->mac.ops.reset_hw(hw); 3669 3670 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 3671 /* copy the MAC address out of the NVM */ 3672 if (hw->mac.ops.read_mac_addr(hw)) 3673 dev_err(&pdev->dev, "NVM Read Error\n"); 3674 } 3675 3676 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 3677 3678 if (!is_valid_ether_addr(netdev->dev_addr)) { 3679 dev_err(&pdev->dev, "Invalid MAC Address\n"); 3680 err = -EIO; 3681 goto err_eeprom; 3682 } 3683 3684 /* configure RXPBSIZE and TXPBSIZE */ 3685 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 3686 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 3687 3688 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 3689 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 3690 3691 INIT_WORK(&adapter->reset_task, igc_reset_task); 3692 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 3693 3694 /* Initialize link properties that are user-changeable */ 3695 adapter->fc_autoneg = true; 3696 hw->mac.autoneg = true; 3697 hw->phy.autoneg_advertised = 0xaf; 3698 3699 hw->fc.requested_mode = igc_fc_default; 3700 hw->fc.current_mode = igc_fc_default; 3701 3702 /* reset the hardware with the new settings */ 3703 igc_reset(adapter); 3704 3705 /* let the f/w know that the h/w is now under the control of the 3706 * driver. 3707 */ 3708 igc_get_hw_control(adapter); 3709 3710 strncpy(netdev->name, "eth%d", IFNAMSIZ); 3711 err = register_netdev(netdev); 3712 if (err) 3713 goto err_register; 3714 3715 /* carrier off reporting is important to ethtool even BEFORE open */ 3716 netif_carrier_off(netdev); 3717 3718 /* Check if Media Autosense is enabled */ 3719 adapter->ei = *ei; 3720 3721 /* print pcie link status and MAC address */ 3722 pcie_print_link_status(pdev); 3723 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 3724 3725 return 0; 3726 3727 err_register: 3728 igc_release_hw_control(adapter); 3729 err_eeprom: 3730 if (!igc_check_reset_block(hw)) 3731 igc_reset_phy(hw); 3732 err_sw_init: 3733 igc_clear_interrupt_scheme(adapter); 3734 iounmap(adapter->io_addr); 3735 err_ioremap: 3736 free_netdev(netdev); 3737 err_alloc_etherdev: 3738 pci_release_selected_regions(pdev, 3739 pci_select_bars(pdev, IORESOURCE_MEM)); 3740 err_pci_reg: 3741 err_dma: 3742 pci_disable_device(pdev); 3743 return err; 3744 } 3745 3746 /** 3747 * igc_remove - Device Removal Routine 3748 * @pdev: PCI device information struct 3749 * 3750 * igc_remove is called by the PCI subsystem to alert the driver 3751 * that it should release a PCI device. This could be caused by a 3752 * Hot-Plug event, or because the driver is going to be removed from 3753 * memory. 3754 */ 3755 static void igc_remove(struct pci_dev *pdev) 3756 { 3757 struct net_device *netdev = pci_get_drvdata(pdev); 3758 struct igc_adapter *adapter = netdev_priv(netdev); 3759 3760 set_bit(__IGC_DOWN, &adapter->state); 3761 3762 del_timer_sync(&adapter->watchdog_timer); 3763 del_timer_sync(&adapter->phy_info_timer); 3764 3765 cancel_work_sync(&adapter->reset_task); 3766 cancel_work_sync(&adapter->watchdog_task); 3767 3768 /* Release control of h/w to f/w. If f/w is AMT enabled, this 3769 * would have already happened in close and is redundant. 3770 */ 3771 igc_release_hw_control(adapter); 3772 unregister_netdev(netdev); 3773 3774 igc_clear_interrupt_scheme(adapter); 3775 pci_iounmap(pdev, adapter->io_addr); 3776 pci_release_mem_regions(pdev); 3777 3778 kfree(adapter->mac_table); 3779 kfree(adapter->shadow_vfta); 3780 free_netdev(netdev); 3781 3782 pci_disable_pcie_error_reporting(pdev); 3783 3784 pci_disable_device(pdev); 3785 } 3786 3787 static struct pci_driver igc_driver = { 3788 .name = igc_driver_name, 3789 .id_table = igc_pci_tbl, 3790 .probe = igc_probe, 3791 .remove = igc_remove, 3792 }; 3793 3794 void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 3795 const u32 max_rss_queues) 3796 { 3797 /* Determine if we need to pair queues. */ 3798 /* If rss_queues > half of max_rss_queues, pair the queues in 3799 * order to conserve interrupts due to limited supply. 3800 */ 3801 if (adapter->rss_queues > (max_rss_queues / 2)) 3802 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 3803 else 3804 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 3805 } 3806 3807 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 3808 { 3809 unsigned int max_rss_queues; 3810 3811 /* Determine the maximum number of RSS queues supported. */ 3812 max_rss_queues = IGC_MAX_RX_QUEUES; 3813 3814 return max_rss_queues; 3815 } 3816 3817 static void igc_init_queue_configuration(struct igc_adapter *adapter) 3818 { 3819 u32 max_rss_queues; 3820 3821 max_rss_queues = igc_get_max_rss_queues(adapter); 3822 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 3823 3824 igc_set_flag_queue_pairs(adapter, max_rss_queues); 3825 } 3826 3827 /** 3828 * igc_sw_init - Initialize general software structures (struct igc_adapter) 3829 * @adapter: board private structure to initialize 3830 * 3831 * igc_sw_init initializes the Adapter private data structure. 3832 * Fields are initialized based on PCI device information and 3833 * OS network device settings (MTU size). 3834 */ 3835 static int igc_sw_init(struct igc_adapter *adapter) 3836 { 3837 struct net_device *netdev = adapter->netdev; 3838 struct pci_dev *pdev = adapter->pdev; 3839 struct igc_hw *hw = &adapter->hw; 3840 3841 int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count; 3842 3843 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 3844 3845 /* set default ring sizes */ 3846 adapter->tx_ring_count = IGC_DEFAULT_TXD; 3847 adapter->rx_ring_count = IGC_DEFAULT_RXD; 3848 3849 /* set default ITR values */ 3850 adapter->rx_itr_setting = IGC_DEFAULT_ITR; 3851 adapter->tx_itr_setting = IGC_DEFAULT_ITR; 3852 3853 /* set default work limits */ 3854 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 3855 3856 /* adjust max frame to be at least the size of a standard frame */ 3857 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 3858 VLAN_HLEN; 3859 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3860 3861 spin_lock_init(&adapter->nfc_lock); 3862 spin_lock_init(&adapter->stats64_lock); 3863 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 3864 adapter->flags |= IGC_FLAG_HAS_MSIX; 3865 3866 adapter->mac_table = kzalloc(size, GFP_ATOMIC); 3867 if (!adapter->mac_table) 3868 return -ENOMEM; 3869 3870 igc_init_queue_configuration(adapter); 3871 3872 /* This call may decrease the number of queues */ 3873 if (igc_init_interrupt_scheme(adapter, true)) { 3874 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 3875 return -ENOMEM; 3876 } 3877 3878 /* Explicitly disable IRQ since the NIC can be in any state. */ 3879 igc_irq_disable(adapter); 3880 3881 set_bit(__IGC_DOWN, &adapter->state); 3882 3883 return 0; 3884 } 3885 3886 /** 3887 * igc_reinit_queues - return error 3888 * @adapter: pointer to adapter structure 3889 */ 3890 int igc_reinit_queues(struct igc_adapter *adapter) 3891 { 3892 struct net_device *netdev = adapter->netdev; 3893 struct pci_dev *pdev = adapter->pdev; 3894 int err = 0; 3895 3896 if (netif_running(netdev)) 3897 igc_close(netdev); 3898 3899 igc_reset_interrupt_capability(adapter); 3900 3901 if (igc_init_interrupt_scheme(adapter, true)) { 3902 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 3903 return -ENOMEM; 3904 } 3905 3906 if (netif_running(netdev)) 3907 err = igc_open(netdev); 3908 3909 return err; 3910 } 3911 3912 /** 3913 * igc_get_hw_dev - return device 3914 * @hw: pointer to hardware structure 3915 * 3916 * used by hardware layer to print debugging information 3917 */ 3918 struct net_device *igc_get_hw_dev(struct igc_hw *hw) 3919 { 3920 struct igc_adapter *adapter = hw->back; 3921 3922 return adapter->netdev; 3923 } 3924 3925 /** 3926 * igc_init_module - Driver Registration Routine 3927 * 3928 * igc_init_module is the first routine called when the driver is 3929 * loaded. All it does is register with the PCI subsystem. 3930 */ 3931 static int __init igc_init_module(void) 3932 { 3933 int ret; 3934 3935 pr_info("%s - version %s\n", 3936 igc_driver_string, igc_driver_version); 3937 3938 pr_info("%s\n", igc_copyright); 3939 3940 ret = pci_register_driver(&igc_driver); 3941 return ret; 3942 } 3943 3944 module_init(igc_init_module); 3945 3946 /** 3947 * igc_exit_module - Driver Exit Cleanup Routine 3948 * 3949 * igc_exit_module is called just before the driver is removed 3950 * from memory. 3951 */ 3952 static void __exit igc_exit_module(void) 3953 { 3954 pci_unregister_driver(&igc_driver); 3955 } 3956 3957 module_exit(igc_exit_module); 3958 /* igc_main.c */ 3959