1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/if_vlan.h> 7 #include <linux/tcp.h> 8 #include <linux/udp.h> 9 #include <linux/ip.h> 10 #include <linux/pm_runtime.h> 11 #include <net/pkt_sched.h> 12 #include <linux/bpf_trace.h> 13 #include <net/xdp_sock_drv.h> 14 #include <linux/pci.h> 15 16 #include <net/ipv6.h> 17 18 #include "igc.h" 19 #include "igc_hw.h" 20 #include "igc_tsn.h" 21 #include "igc_xdp.h" 22 23 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 24 25 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 26 27 #define IGC_XDP_PASS 0 28 #define IGC_XDP_CONSUMED BIT(0) 29 #define IGC_XDP_TX BIT(1) 30 #define IGC_XDP_REDIRECT BIT(2) 31 32 static int debug = -1; 33 34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 35 MODULE_DESCRIPTION(DRV_SUMMARY); 36 MODULE_LICENSE("GPL v2"); 37 module_param(debug, int, 0); 38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 39 40 char igc_driver_name[] = "igc"; 41 static const char igc_driver_string[] = DRV_SUMMARY; 42 static const char igc_copyright[] = 43 "Copyright(c) 2018 Intel Corporation."; 44 45 static const struct igc_info *igc_info_tbl[] = { 46 [board_base] = &igc_base_info, 47 }; 48 49 static const struct pci_device_id igc_pci_tbl[] = { 50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, 53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, 54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, 55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, 56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, 57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, 58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, 59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, 60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, 61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, 62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, 63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, 64 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, 65 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, 66 /* required last entry */ 67 {0, } 68 }; 69 70 MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 71 72 enum latency_range { 73 lowest_latency = 0, 74 low_latency = 1, 75 bulk_latency = 2, 76 latency_invalid = 255 77 }; 78 79 void igc_reset(struct igc_adapter *adapter) 80 { 81 struct net_device *dev = adapter->netdev; 82 struct igc_hw *hw = &adapter->hw; 83 struct igc_fc_info *fc = &hw->fc; 84 u32 pba, hwm; 85 86 /* Repartition PBA for greater than 9k MTU if required */ 87 pba = IGC_PBA_34K; 88 89 /* flow control settings 90 * The high water mark must be low enough to fit one full frame 91 * after transmitting the pause frame. As such we must have enough 92 * space to allow for us to complete our current transmit and then 93 * receive the frame that is in progress from the link partner. 94 * Set it to: 95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame 96 */ 97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); 98 99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ 100 fc->low_water = fc->high_water - 16; 101 fc->pause_time = 0xFFFF; 102 fc->send_xon = 1; 103 fc->current_mode = fc->requested_mode; 104 105 hw->mac.ops.reset_hw(hw); 106 107 if (hw->mac.ops.init_hw(hw)) 108 netdev_err(dev, "Error on hardware initialization\n"); 109 110 /* Re-establish EEE setting */ 111 igc_set_eee_i225(hw, true, true, true); 112 113 if (!netif_running(adapter->netdev)) 114 igc_power_down_phy_copper_base(&adapter->hw); 115 116 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ 117 wr32(IGC_VET, ETH_P_8021Q); 118 119 /* Re-enable PTP, where applicable. */ 120 igc_ptp_reset(adapter); 121 122 /* Re-enable TSN offloading, where applicable. */ 123 igc_tsn_reset(adapter); 124 125 igc_get_phy_info(hw); 126 } 127 128 /** 129 * igc_power_up_link - Power up the phy link 130 * @adapter: address of board private structure 131 */ 132 static void igc_power_up_link(struct igc_adapter *adapter) 133 { 134 igc_reset_phy(&adapter->hw); 135 136 igc_power_up_phy_copper(&adapter->hw); 137 138 igc_setup_link(&adapter->hw); 139 } 140 141 /** 142 * igc_release_hw_control - release control of the h/w to f/w 143 * @adapter: address of board private structure 144 * 145 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 146 * For ASF and Pass Through versions of f/w this means that the 147 * driver is no longer loaded. 148 */ 149 static void igc_release_hw_control(struct igc_adapter *adapter) 150 { 151 struct igc_hw *hw = &adapter->hw; 152 u32 ctrl_ext; 153 154 if (!pci_device_is_present(adapter->pdev)) 155 return; 156 157 /* Let firmware take over control of h/w */ 158 ctrl_ext = rd32(IGC_CTRL_EXT); 159 wr32(IGC_CTRL_EXT, 160 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 161 } 162 163 /** 164 * igc_get_hw_control - get control of the h/w from f/w 165 * @adapter: address of board private structure 166 * 167 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 168 * For ASF and Pass Through versions of f/w this means that 169 * the driver is loaded. 170 */ 171 static void igc_get_hw_control(struct igc_adapter *adapter) 172 { 173 struct igc_hw *hw = &adapter->hw; 174 u32 ctrl_ext; 175 176 /* Let firmware know the driver has taken over */ 177 ctrl_ext = rd32(IGC_CTRL_EXT); 178 wr32(IGC_CTRL_EXT, 179 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 180 } 181 182 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) 183 { 184 dma_unmap_single(dev, dma_unmap_addr(buf, dma), 185 dma_unmap_len(buf, len), DMA_TO_DEVICE); 186 187 dma_unmap_len_set(buf, len, 0); 188 } 189 190 /** 191 * igc_clean_tx_ring - Free Tx Buffers 192 * @tx_ring: ring to be cleaned 193 */ 194 static void igc_clean_tx_ring(struct igc_ring *tx_ring) 195 { 196 u16 i = tx_ring->next_to_clean; 197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 198 u32 xsk_frames = 0; 199 200 while (i != tx_ring->next_to_use) { 201 union igc_adv_tx_desc *eop_desc, *tx_desc; 202 203 switch (tx_buffer->type) { 204 case IGC_TX_BUFFER_TYPE_XSK: 205 xsk_frames++; 206 break; 207 case IGC_TX_BUFFER_TYPE_XDP: 208 xdp_return_frame(tx_buffer->xdpf); 209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 210 break; 211 case IGC_TX_BUFFER_TYPE_SKB: 212 dev_kfree_skb_any(tx_buffer->skb); 213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 214 break; 215 default: 216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 217 break; 218 } 219 220 /* check for eop_desc to determine the end of the packet */ 221 eop_desc = tx_buffer->next_to_watch; 222 tx_desc = IGC_TX_DESC(tx_ring, i); 223 224 /* unmap remaining buffers */ 225 while (tx_desc != eop_desc) { 226 tx_buffer++; 227 tx_desc++; 228 i++; 229 if (unlikely(i == tx_ring->count)) { 230 i = 0; 231 tx_buffer = tx_ring->tx_buffer_info; 232 tx_desc = IGC_TX_DESC(tx_ring, 0); 233 } 234 235 /* unmap any remaining paged data */ 236 if (dma_unmap_len(tx_buffer, len)) 237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 238 } 239 240 tx_buffer->next_to_watch = NULL; 241 242 /* move us one more past the eop_desc for start of next pkt */ 243 tx_buffer++; 244 i++; 245 if (unlikely(i == tx_ring->count)) { 246 i = 0; 247 tx_buffer = tx_ring->tx_buffer_info; 248 } 249 } 250 251 if (tx_ring->xsk_pool && xsk_frames) 252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 253 254 /* reset BQL for queue */ 255 netdev_tx_reset_queue(txring_txq(tx_ring)); 256 257 /* Zero out the buffer ring */ 258 memset(tx_ring->tx_buffer_info, 0, 259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); 260 261 /* Zero out the descriptor ring */ 262 memset(tx_ring->desc, 0, tx_ring->size); 263 264 /* reset next_to_use and next_to_clean */ 265 tx_ring->next_to_use = 0; 266 tx_ring->next_to_clean = 0; 267 } 268 269 /** 270 * igc_free_tx_resources - Free Tx Resources per Queue 271 * @tx_ring: Tx descriptor ring for a specific queue 272 * 273 * Free all transmit software resources 274 */ 275 void igc_free_tx_resources(struct igc_ring *tx_ring) 276 { 277 igc_disable_tx_ring(tx_ring); 278 279 vfree(tx_ring->tx_buffer_info); 280 tx_ring->tx_buffer_info = NULL; 281 282 /* if not set, then don't free */ 283 if (!tx_ring->desc) 284 return; 285 286 dma_free_coherent(tx_ring->dev, tx_ring->size, 287 tx_ring->desc, tx_ring->dma); 288 289 tx_ring->desc = NULL; 290 } 291 292 /** 293 * igc_free_all_tx_resources - Free Tx Resources for All Queues 294 * @adapter: board private structure 295 * 296 * Free all transmit software resources 297 */ 298 static void igc_free_all_tx_resources(struct igc_adapter *adapter) 299 { 300 int i; 301 302 for (i = 0; i < adapter->num_tx_queues; i++) 303 igc_free_tx_resources(adapter->tx_ring[i]); 304 } 305 306 /** 307 * igc_clean_all_tx_rings - Free Tx Buffers for all queues 308 * @adapter: board private structure 309 */ 310 static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 311 { 312 int i; 313 314 for (i = 0; i < adapter->num_tx_queues; i++) 315 if (adapter->tx_ring[i]) 316 igc_clean_tx_ring(adapter->tx_ring[i]); 317 } 318 319 static void igc_disable_tx_ring_hw(struct igc_ring *ring) 320 { 321 struct igc_hw *hw = &ring->q_vector->adapter->hw; 322 u8 idx = ring->reg_idx; 323 u32 txdctl; 324 325 txdctl = rd32(IGC_TXDCTL(idx)); 326 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; 327 txdctl |= IGC_TXDCTL_SWFLUSH; 328 wr32(IGC_TXDCTL(idx), txdctl); 329 } 330 331 /** 332 * igc_disable_all_tx_rings_hw - Disable all transmit queue operation 333 * @adapter: board private structure 334 */ 335 static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter) 336 { 337 int i; 338 339 for (i = 0; i < adapter->num_tx_queues; i++) { 340 struct igc_ring *tx_ring = adapter->tx_ring[i]; 341 342 igc_disable_tx_ring_hw(tx_ring); 343 } 344 } 345 346 /** 347 * igc_setup_tx_resources - allocate Tx resources (Descriptors) 348 * @tx_ring: tx descriptor ring (for a specific queue) to setup 349 * 350 * Return 0 on success, negative on failure 351 */ 352 int igc_setup_tx_resources(struct igc_ring *tx_ring) 353 { 354 struct net_device *ndev = tx_ring->netdev; 355 struct device *dev = tx_ring->dev; 356 int size = 0; 357 358 size = sizeof(struct igc_tx_buffer) * tx_ring->count; 359 tx_ring->tx_buffer_info = vzalloc(size); 360 if (!tx_ring->tx_buffer_info) 361 goto err; 362 363 /* round up to nearest 4K */ 364 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 365 tx_ring->size = ALIGN(tx_ring->size, 4096); 366 367 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 368 &tx_ring->dma, GFP_KERNEL); 369 370 if (!tx_ring->desc) 371 goto err; 372 373 tx_ring->next_to_use = 0; 374 tx_ring->next_to_clean = 0; 375 376 return 0; 377 378 err: 379 vfree(tx_ring->tx_buffer_info); 380 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); 381 return -ENOMEM; 382 } 383 384 /** 385 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 386 * @adapter: board private structure 387 * 388 * Return 0 on success, negative on failure 389 */ 390 static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 391 { 392 struct net_device *dev = adapter->netdev; 393 int i, err = 0; 394 395 for (i = 0; i < adapter->num_tx_queues; i++) { 396 err = igc_setup_tx_resources(adapter->tx_ring[i]); 397 if (err) { 398 netdev_err(dev, "Error on Tx queue %u setup\n", i); 399 for (i--; i >= 0; i--) 400 igc_free_tx_resources(adapter->tx_ring[i]); 401 break; 402 } 403 } 404 405 return err; 406 } 407 408 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) 409 { 410 u16 i = rx_ring->next_to_clean; 411 412 dev_kfree_skb(rx_ring->skb); 413 rx_ring->skb = NULL; 414 415 /* Free all the Rx ring sk_buffs */ 416 while (i != rx_ring->next_to_alloc) { 417 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 418 419 /* Invalidate cache lines that may have been written to by 420 * device so that we avoid corrupting memory. 421 */ 422 dma_sync_single_range_for_cpu(rx_ring->dev, 423 buffer_info->dma, 424 buffer_info->page_offset, 425 igc_rx_bufsz(rx_ring), 426 DMA_FROM_DEVICE); 427 428 /* free resources associated with mapping */ 429 dma_unmap_page_attrs(rx_ring->dev, 430 buffer_info->dma, 431 igc_rx_pg_size(rx_ring), 432 DMA_FROM_DEVICE, 433 IGC_RX_DMA_ATTR); 434 __page_frag_cache_drain(buffer_info->page, 435 buffer_info->pagecnt_bias); 436 437 i++; 438 if (i == rx_ring->count) 439 i = 0; 440 } 441 } 442 443 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) 444 { 445 struct igc_rx_buffer *bi; 446 u16 i; 447 448 for (i = 0; i < ring->count; i++) { 449 bi = &ring->rx_buffer_info[i]; 450 if (!bi->xdp) 451 continue; 452 453 xsk_buff_free(bi->xdp); 454 bi->xdp = NULL; 455 } 456 } 457 458 /** 459 * igc_clean_rx_ring - Free Rx Buffers per Queue 460 * @ring: ring to free buffers from 461 */ 462 static void igc_clean_rx_ring(struct igc_ring *ring) 463 { 464 if (ring->xsk_pool) 465 igc_clean_rx_ring_xsk_pool(ring); 466 else 467 igc_clean_rx_ring_page_shared(ring); 468 469 clear_ring_uses_large_buffer(ring); 470 471 ring->next_to_alloc = 0; 472 ring->next_to_clean = 0; 473 ring->next_to_use = 0; 474 } 475 476 /** 477 * igc_clean_all_rx_rings - Free Rx Buffers for all queues 478 * @adapter: board private structure 479 */ 480 static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 481 { 482 int i; 483 484 for (i = 0; i < adapter->num_rx_queues; i++) 485 if (adapter->rx_ring[i]) 486 igc_clean_rx_ring(adapter->rx_ring[i]); 487 } 488 489 /** 490 * igc_free_rx_resources - Free Rx Resources 491 * @rx_ring: ring to clean the resources from 492 * 493 * Free all receive software resources 494 */ 495 void igc_free_rx_resources(struct igc_ring *rx_ring) 496 { 497 igc_clean_rx_ring(rx_ring); 498 499 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 500 501 vfree(rx_ring->rx_buffer_info); 502 rx_ring->rx_buffer_info = NULL; 503 504 /* if not set, then don't free */ 505 if (!rx_ring->desc) 506 return; 507 508 dma_free_coherent(rx_ring->dev, rx_ring->size, 509 rx_ring->desc, rx_ring->dma); 510 511 rx_ring->desc = NULL; 512 } 513 514 /** 515 * igc_free_all_rx_resources - Free Rx Resources for All Queues 516 * @adapter: board private structure 517 * 518 * Free all receive software resources 519 */ 520 static void igc_free_all_rx_resources(struct igc_adapter *adapter) 521 { 522 int i; 523 524 for (i = 0; i < adapter->num_rx_queues; i++) 525 igc_free_rx_resources(adapter->rx_ring[i]); 526 } 527 528 /** 529 * igc_setup_rx_resources - allocate Rx resources (Descriptors) 530 * @rx_ring: rx descriptor ring (for a specific queue) to setup 531 * 532 * Returns 0 on success, negative on failure 533 */ 534 int igc_setup_rx_resources(struct igc_ring *rx_ring) 535 { 536 struct net_device *ndev = rx_ring->netdev; 537 struct device *dev = rx_ring->dev; 538 u8 index = rx_ring->queue_index; 539 int size, desc_len, res; 540 541 /* XDP RX-queue info */ 542 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 543 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 544 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, 545 rx_ring->q_vector->napi.napi_id); 546 if (res < 0) { 547 netdev_err(ndev, "Failed to register xdp_rxq index %u\n", 548 index); 549 return res; 550 } 551 552 size = sizeof(struct igc_rx_buffer) * rx_ring->count; 553 rx_ring->rx_buffer_info = vzalloc(size); 554 if (!rx_ring->rx_buffer_info) 555 goto err; 556 557 desc_len = sizeof(union igc_adv_rx_desc); 558 559 /* Round up to nearest 4K */ 560 rx_ring->size = rx_ring->count * desc_len; 561 rx_ring->size = ALIGN(rx_ring->size, 4096); 562 563 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 564 &rx_ring->dma, GFP_KERNEL); 565 566 if (!rx_ring->desc) 567 goto err; 568 569 rx_ring->next_to_alloc = 0; 570 rx_ring->next_to_clean = 0; 571 rx_ring->next_to_use = 0; 572 573 return 0; 574 575 err: 576 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 577 vfree(rx_ring->rx_buffer_info); 578 rx_ring->rx_buffer_info = NULL; 579 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); 580 return -ENOMEM; 581 } 582 583 /** 584 * igc_setup_all_rx_resources - wrapper to allocate Rx resources 585 * (Descriptors) for all queues 586 * @adapter: board private structure 587 * 588 * Return 0 on success, negative on failure 589 */ 590 static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 591 { 592 struct net_device *dev = adapter->netdev; 593 int i, err = 0; 594 595 for (i = 0; i < adapter->num_rx_queues; i++) { 596 err = igc_setup_rx_resources(adapter->rx_ring[i]); 597 if (err) { 598 netdev_err(dev, "Error on Rx queue %u setup\n", i); 599 for (i--; i >= 0; i--) 600 igc_free_rx_resources(adapter->rx_ring[i]); 601 break; 602 } 603 } 604 605 return err; 606 } 607 608 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, 609 struct igc_ring *ring) 610 { 611 if (!igc_xdp_is_enabled(adapter) || 612 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) 613 return NULL; 614 615 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); 616 } 617 618 /** 619 * igc_configure_rx_ring - Configure a receive ring after Reset 620 * @adapter: board private structure 621 * @ring: receive ring to be configured 622 * 623 * Configure the Rx unit of the MAC after a reset. 624 */ 625 static void igc_configure_rx_ring(struct igc_adapter *adapter, 626 struct igc_ring *ring) 627 { 628 struct igc_hw *hw = &adapter->hw; 629 union igc_adv_rx_desc *rx_desc; 630 int reg_idx = ring->reg_idx; 631 u32 srrctl = 0, rxdctl = 0; 632 u64 rdba = ring->dma; 633 u32 buf_size; 634 635 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 636 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 637 if (ring->xsk_pool) { 638 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 639 MEM_TYPE_XSK_BUFF_POOL, 640 NULL)); 641 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 642 } else { 643 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 644 MEM_TYPE_PAGE_SHARED, 645 NULL)); 646 } 647 648 if (igc_xdp_is_enabled(adapter)) 649 set_ring_uses_large_buffer(ring); 650 651 /* disable the queue */ 652 wr32(IGC_RXDCTL(reg_idx), 0); 653 654 /* Set DMA base address registers */ 655 wr32(IGC_RDBAL(reg_idx), 656 rdba & 0x00000000ffffffffULL); 657 wr32(IGC_RDBAH(reg_idx), rdba >> 32); 658 wr32(IGC_RDLEN(reg_idx), 659 ring->count * sizeof(union igc_adv_rx_desc)); 660 661 /* initialize head and tail */ 662 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 663 wr32(IGC_RDH(reg_idx), 0); 664 writel(0, ring->tail); 665 666 /* reset next-to- use/clean to place SW in sync with hardware */ 667 ring->next_to_clean = 0; 668 ring->next_to_use = 0; 669 670 if (ring->xsk_pool) 671 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); 672 else if (ring_uses_large_buffer(ring)) 673 buf_size = IGC_RXBUFFER_3072; 674 else 675 buf_size = IGC_RXBUFFER_2048; 676 677 srrctl = rd32(IGC_SRRCTL(reg_idx)); 678 srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | 679 IGC_SRRCTL_DESCTYPE_MASK); 680 srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); 681 srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); 682 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 683 684 wr32(IGC_SRRCTL(reg_idx), srrctl); 685 686 rxdctl |= IGC_RX_PTHRESH; 687 rxdctl |= IGC_RX_HTHRESH << 8; 688 rxdctl |= IGC_RX_WTHRESH << 16; 689 690 /* initialize rx_buffer_info */ 691 memset(ring->rx_buffer_info, 0, 692 sizeof(struct igc_rx_buffer) * ring->count); 693 694 /* initialize Rx descriptor 0 */ 695 rx_desc = IGC_RX_DESC(ring, 0); 696 rx_desc->wb.upper.length = 0; 697 698 /* enable receive descriptor fetching */ 699 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 700 701 wr32(IGC_RXDCTL(reg_idx), rxdctl); 702 } 703 704 /** 705 * igc_configure_rx - Configure receive Unit after Reset 706 * @adapter: board private structure 707 * 708 * Configure the Rx unit of the MAC after a reset. 709 */ 710 static void igc_configure_rx(struct igc_adapter *adapter) 711 { 712 int i; 713 714 /* Setup the HW Rx Head and Tail Descriptor Pointers and 715 * the Base and Length of the Rx Descriptor Ring 716 */ 717 for (i = 0; i < adapter->num_rx_queues; i++) 718 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 719 } 720 721 /** 722 * igc_configure_tx_ring - Configure transmit ring after Reset 723 * @adapter: board private structure 724 * @ring: tx ring to configure 725 * 726 * Configure a transmit ring after a reset. 727 */ 728 static void igc_configure_tx_ring(struct igc_adapter *adapter, 729 struct igc_ring *ring) 730 { 731 struct igc_hw *hw = &adapter->hw; 732 int reg_idx = ring->reg_idx; 733 u64 tdba = ring->dma; 734 u32 txdctl = 0; 735 736 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 737 738 /* disable the queue */ 739 wr32(IGC_TXDCTL(reg_idx), 0); 740 wrfl(); 741 742 wr32(IGC_TDLEN(reg_idx), 743 ring->count * sizeof(union igc_adv_tx_desc)); 744 wr32(IGC_TDBAL(reg_idx), 745 tdba & 0x00000000ffffffffULL); 746 wr32(IGC_TDBAH(reg_idx), tdba >> 32); 747 748 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 749 wr32(IGC_TDH(reg_idx), 0); 750 writel(0, ring->tail); 751 752 txdctl |= IGC_TX_PTHRESH; 753 txdctl |= IGC_TX_HTHRESH << 8; 754 txdctl |= IGC_TX_WTHRESH << 16; 755 756 txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 757 wr32(IGC_TXDCTL(reg_idx), txdctl); 758 } 759 760 /** 761 * igc_configure_tx - Configure transmit Unit after Reset 762 * @adapter: board private structure 763 * 764 * Configure the Tx unit of the MAC after a reset. 765 */ 766 static void igc_configure_tx(struct igc_adapter *adapter) 767 { 768 int i; 769 770 for (i = 0; i < adapter->num_tx_queues; i++) 771 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 772 } 773 774 /** 775 * igc_setup_mrqc - configure the multiple receive queue control registers 776 * @adapter: Board private structure 777 */ 778 static void igc_setup_mrqc(struct igc_adapter *adapter) 779 { 780 struct igc_hw *hw = &adapter->hw; 781 u32 j, num_rx_queues; 782 u32 mrqc, rxcsum; 783 u32 rss_key[10]; 784 785 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 786 for (j = 0; j < 10; j++) 787 wr32(IGC_RSSRK(j), rss_key[j]); 788 789 num_rx_queues = adapter->rss_queues; 790 791 if (adapter->rss_indir_tbl_init != num_rx_queues) { 792 for (j = 0; j < IGC_RETA_SIZE; j++) 793 adapter->rss_indir_tbl[j] = 794 (j * num_rx_queues) / IGC_RETA_SIZE; 795 adapter->rss_indir_tbl_init = num_rx_queues; 796 } 797 igc_write_rss_indir_tbl(adapter); 798 799 /* Disable raw packet checksumming so that RSS hash is placed in 800 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 801 * offloads as they are enabled by default 802 */ 803 rxcsum = rd32(IGC_RXCSUM); 804 rxcsum |= IGC_RXCSUM_PCSD; 805 806 /* Enable Receive Checksum Offload for SCTP */ 807 rxcsum |= IGC_RXCSUM_CRCOFL; 808 809 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 810 wr32(IGC_RXCSUM, rxcsum); 811 812 /* Generate RSS hash based on packet types, TCP/UDP 813 * port numbers and/or IPv4/v6 src and dst addresses 814 */ 815 mrqc = IGC_MRQC_RSS_FIELD_IPV4 | 816 IGC_MRQC_RSS_FIELD_IPV4_TCP | 817 IGC_MRQC_RSS_FIELD_IPV6 | 818 IGC_MRQC_RSS_FIELD_IPV6_TCP | 819 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 820 821 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 822 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; 823 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 824 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; 825 826 mrqc |= IGC_MRQC_ENABLE_RSS_MQ; 827 828 wr32(IGC_MRQC, mrqc); 829 } 830 831 /** 832 * igc_setup_rctl - configure the receive control registers 833 * @adapter: Board private structure 834 */ 835 static void igc_setup_rctl(struct igc_adapter *adapter) 836 { 837 struct igc_hw *hw = &adapter->hw; 838 u32 rctl; 839 840 rctl = rd32(IGC_RCTL); 841 842 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 843 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 844 845 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 846 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 847 848 /* enable stripping of CRC. Newer features require 849 * that the HW strips the CRC. 850 */ 851 rctl |= IGC_RCTL_SECRC; 852 853 /* disable store bad packets and clear size bits. */ 854 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 855 856 /* enable LPE to allow for reception of jumbo frames */ 857 rctl |= IGC_RCTL_LPE; 858 859 /* disable queue 0 to prevent tail write w/o re-config */ 860 wr32(IGC_RXDCTL(0), 0); 861 862 /* This is useful for sniffing bad packets. */ 863 if (adapter->netdev->features & NETIF_F_RXALL) { 864 /* UPE and MPE will be handled by normal PROMISC logic 865 * in set_rx_mode 866 */ 867 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 868 IGC_RCTL_BAM | /* RX All Bcast Pkts */ 869 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 870 871 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 872 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 873 } 874 875 wr32(IGC_RCTL, rctl); 876 } 877 878 /** 879 * igc_setup_tctl - configure the transmit control registers 880 * @adapter: Board private structure 881 */ 882 static void igc_setup_tctl(struct igc_adapter *adapter) 883 { 884 struct igc_hw *hw = &adapter->hw; 885 u32 tctl; 886 887 /* disable queue 0 which icould be enabled by default */ 888 wr32(IGC_TXDCTL(0), 0); 889 890 /* Program the Transmit Control Register */ 891 tctl = rd32(IGC_TCTL); 892 tctl &= ~IGC_TCTL_CT; 893 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 894 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 895 896 /* Enable transmits */ 897 tctl |= IGC_TCTL_EN; 898 899 wr32(IGC_TCTL, tctl); 900 } 901 902 /** 903 * igc_set_mac_filter_hw() - Set MAC address filter in hardware 904 * @adapter: Pointer to adapter where the filter should be set 905 * @index: Filter index 906 * @type: MAC address filter type (source or destination) 907 * @addr: MAC address 908 * @queue: If non-negative, queue assignment feature is enabled and frames 909 * matching the filter are enqueued onto 'queue'. Otherwise, queue 910 * assignment is disabled. 911 */ 912 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, 913 enum igc_mac_filter_type type, 914 const u8 *addr, int queue) 915 { 916 struct net_device *dev = adapter->netdev; 917 struct igc_hw *hw = &adapter->hw; 918 u32 ral, rah; 919 920 if (WARN_ON(index >= hw->mac.rar_entry_count)) 921 return; 922 923 ral = le32_to_cpup((__le32 *)(addr)); 924 rah = le16_to_cpup((__le16 *)(addr + 4)); 925 926 if (type == IGC_MAC_FILTER_TYPE_SRC) { 927 rah &= ~IGC_RAH_ASEL_MASK; 928 rah |= IGC_RAH_ASEL_SRC_ADDR; 929 } 930 931 if (queue >= 0) { 932 rah &= ~IGC_RAH_QSEL_MASK; 933 rah |= (queue << IGC_RAH_QSEL_SHIFT); 934 rah |= IGC_RAH_QSEL_ENABLE; 935 } 936 937 rah |= IGC_RAH_AV; 938 939 wr32(IGC_RAL(index), ral); 940 wr32(IGC_RAH(index), rah); 941 942 netdev_dbg(dev, "MAC address filter set in HW: index %d", index); 943 } 944 945 /** 946 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware 947 * @adapter: Pointer to adapter where the filter should be cleared 948 * @index: Filter index 949 */ 950 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) 951 { 952 struct net_device *dev = adapter->netdev; 953 struct igc_hw *hw = &adapter->hw; 954 955 if (WARN_ON(index >= hw->mac.rar_entry_count)) 956 return; 957 958 wr32(IGC_RAL(index), 0); 959 wr32(IGC_RAH(index), 0); 960 961 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); 962 } 963 964 /* Set default MAC address for the PF in the first RAR entry */ 965 static void igc_set_default_mac_filter(struct igc_adapter *adapter) 966 { 967 struct net_device *dev = adapter->netdev; 968 u8 *addr = adapter->hw.mac.addr; 969 970 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); 971 972 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); 973 } 974 975 /** 976 * igc_set_mac - Change the Ethernet Address of the NIC 977 * @netdev: network interface device structure 978 * @p: pointer to an address structure 979 * 980 * Returns 0 on success, negative on failure 981 */ 982 static int igc_set_mac(struct net_device *netdev, void *p) 983 { 984 struct igc_adapter *adapter = netdev_priv(netdev); 985 struct igc_hw *hw = &adapter->hw; 986 struct sockaddr *addr = p; 987 988 if (!is_valid_ether_addr(addr->sa_data)) 989 return -EADDRNOTAVAIL; 990 991 eth_hw_addr_set(netdev, addr->sa_data); 992 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 993 994 /* set the correct pool for the new PF MAC address in entry 0 */ 995 igc_set_default_mac_filter(adapter); 996 997 return 0; 998 } 999 1000 /** 1001 * igc_write_mc_addr_list - write multicast addresses to MTA 1002 * @netdev: network interface device structure 1003 * 1004 * Writes multicast address list to the MTA hash table. 1005 * Returns: -ENOMEM on failure 1006 * 0 on no addresses written 1007 * X on writing X addresses to MTA 1008 **/ 1009 static int igc_write_mc_addr_list(struct net_device *netdev) 1010 { 1011 struct igc_adapter *adapter = netdev_priv(netdev); 1012 struct igc_hw *hw = &adapter->hw; 1013 struct netdev_hw_addr *ha; 1014 u8 *mta_list; 1015 int i; 1016 1017 if (netdev_mc_empty(netdev)) { 1018 /* nothing to program, so clear mc list */ 1019 igc_update_mc_addr_list(hw, NULL, 0); 1020 return 0; 1021 } 1022 1023 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); 1024 if (!mta_list) 1025 return -ENOMEM; 1026 1027 /* The shared function expects a packed array of only addresses. */ 1028 i = 0; 1029 netdev_for_each_mc_addr(ha, netdev) 1030 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 1031 1032 igc_update_mc_addr_list(hw, mta_list, i); 1033 kfree(mta_list); 1034 1035 return netdev_mc_count(netdev); 1036 } 1037 1038 static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, 1039 bool *first_flag, bool *insert_empty) 1040 { 1041 struct igc_adapter *adapter = netdev_priv(ring->netdev); 1042 ktime_t cycle_time = adapter->cycle_time; 1043 ktime_t base_time = adapter->base_time; 1044 ktime_t now = ktime_get_clocktai(); 1045 ktime_t baset_est, end_of_cycle; 1046 s32 launchtime; 1047 s64 n; 1048 1049 n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); 1050 1051 baset_est = ktime_add_ns(base_time, cycle_time * (n)); 1052 end_of_cycle = ktime_add_ns(baset_est, cycle_time); 1053 1054 if (ktime_compare(txtime, end_of_cycle) >= 0) { 1055 if (baset_est != ring->last_ff_cycle) { 1056 *first_flag = true; 1057 ring->last_ff_cycle = baset_est; 1058 1059 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) 1060 *insert_empty = true; 1061 } 1062 } 1063 1064 /* Introducing a window at end of cycle on which packets 1065 * potentially not honor launchtime. Window of 5us chosen 1066 * considering software update the tail pointer and packets 1067 * are dma'ed to packet buffer. 1068 */ 1069 if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) 1070 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", 1071 txtime); 1072 1073 ring->last_tx_cycle = end_of_cycle; 1074 1075 launchtime = ktime_sub_ns(txtime, baset_est); 1076 if (launchtime > 0) 1077 div_s64_rem(launchtime, cycle_time, &launchtime); 1078 else 1079 launchtime = 0; 1080 1081 return cpu_to_le32(launchtime); 1082 } 1083 1084 static int igc_init_empty_frame(struct igc_ring *ring, 1085 struct igc_tx_buffer *buffer, 1086 struct sk_buff *skb) 1087 { 1088 unsigned int size; 1089 dma_addr_t dma; 1090 1091 size = skb_headlen(skb); 1092 1093 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); 1094 if (dma_mapping_error(ring->dev, dma)) { 1095 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); 1096 return -ENOMEM; 1097 } 1098 1099 buffer->skb = skb; 1100 buffer->protocol = 0; 1101 buffer->bytecount = skb->len; 1102 buffer->gso_segs = 1; 1103 buffer->time_stamp = jiffies; 1104 dma_unmap_len_set(buffer, len, skb->len); 1105 dma_unmap_addr_set(buffer, dma, dma); 1106 1107 return 0; 1108 } 1109 1110 static int igc_init_tx_empty_descriptor(struct igc_ring *ring, 1111 struct sk_buff *skb, 1112 struct igc_tx_buffer *first) 1113 { 1114 union igc_adv_tx_desc *desc; 1115 u32 cmd_type, olinfo_status; 1116 int err; 1117 1118 if (!igc_desc_unused(ring)) 1119 return -EBUSY; 1120 1121 err = igc_init_empty_frame(ring, first, skb); 1122 if (err) 1123 return err; 1124 1125 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 1126 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 1127 first->bytecount; 1128 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 1129 1130 desc = IGC_TX_DESC(ring, ring->next_to_use); 1131 desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1132 desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1133 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); 1134 1135 netdev_tx_sent_queue(txring_txq(ring), skb->len); 1136 1137 first->next_to_watch = desc; 1138 1139 ring->next_to_use++; 1140 if (ring->next_to_use == ring->count) 1141 ring->next_to_use = 0; 1142 1143 return 0; 1144 } 1145 1146 #define IGC_EMPTY_FRAME_SIZE 60 1147 1148 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, 1149 __le32 launch_time, bool first_flag, 1150 u32 vlan_macip_lens, u32 type_tucmd, 1151 u32 mss_l4len_idx) 1152 { 1153 struct igc_adv_tx_context_desc *context_desc; 1154 u16 i = tx_ring->next_to_use; 1155 1156 context_desc = IGC_TX_CTXTDESC(tx_ring, i); 1157 1158 i++; 1159 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1160 1161 /* set bits to identify this as an advanced context descriptor */ 1162 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 1163 1164 /* For i225, context index must be unique per ring. */ 1165 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 1166 mss_l4len_idx |= tx_ring->reg_idx << 4; 1167 1168 if (first_flag) 1169 mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; 1170 1171 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1172 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1173 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1174 context_desc->launch_time = launch_time; 1175 } 1176 1177 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, 1178 __le32 launch_time, bool first_flag) 1179 { 1180 struct sk_buff *skb = first->skb; 1181 u32 vlan_macip_lens = 0; 1182 u32 type_tucmd = 0; 1183 1184 if (skb->ip_summed != CHECKSUM_PARTIAL) { 1185 csum_failed: 1186 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && 1187 !tx_ring->launchtime_enable) 1188 return; 1189 goto no_csum; 1190 } 1191 1192 switch (skb->csum_offset) { 1193 case offsetof(struct tcphdr, check): 1194 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1195 fallthrough; 1196 case offsetof(struct udphdr, check): 1197 break; 1198 case offsetof(struct sctphdr, checksum): 1199 /* validate that this is actually an SCTP request */ 1200 if (skb_csum_is_sctp(skb)) { 1201 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; 1202 break; 1203 } 1204 fallthrough; 1205 default: 1206 skb_checksum_help(skb); 1207 goto csum_failed; 1208 } 1209 1210 /* update TX checksum flag */ 1211 first->tx_flags |= IGC_TX_FLAGS_CSUM; 1212 vlan_macip_lens = skb_checksum_start_offset(skb) - 1213 skb_network_offset(skb); 1214 no_csum: 1215 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; 1216 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1217 1218 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, 1219 vlan_macip_lens, type_tucmd, 0); 1220 } 1221 1222 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1223 { 1224 struct net_device *netdev = tx_ring->netdev; 1225 1226 netif_stop_subqueue(netdev, tx_ring->queue_index); 1227 1228 /* memory barriier comment */ 1229 smp_mb(); 1230 1231 /* We need to check again in a case another CPU has just 1232 * made room available. 1233 */ 1234 if (igc_desc_unused(tx_ring) < size) 1235 return -EBUSY; 1236 1237 /* A reprieve! */ 1238 netif_wake_subqueue(netdev, tx_ring->queue_index); 1239 1240 u64_stats_update_begin(&tx_ring->tx_syncp2); 1241 tx_ring->tx_stats.restart_queue2++; 1242 u64_stats_update_end(&tx_ring->tx_syncp2); 1243 1244 return 0; 1245 } 1246 1247 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1248 { 1249 if (igc_desc_unused(tx_ring) >= size) 1250 return 0; 1251 return __igc_maybe_stop_tx(tx_ring, size); 1252 } 1253 1254 #define IGC_SET_FLAG(_input, _flag, _result) \ 1255 (((_flag) <= (_result)) ? \ 1256 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ 1257 ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) 1258 1259 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 1260 { 1261 /* set type for advanced descriptor with frame checksum insertion */ 1262 u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 1263 IGC_ADVTXD_DCMD_DEXT | 1264 IGC_ADVTXD_DCMD_IFCS; 1265 1266 /* set HW vlan bit if vlan is present */ 1267 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, 1268 IGC_ADVTXD_DCMD_VLE); 1269 1270 /* set segmentation bits for TSO */ 1271 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, 1272 (IGC_ADVTXD_DCMD_TSE)); 1273 1274 /* set timestamp bit if present, will select the register set 1275 * based on the _TSTAMP(_X) bit. 1276 */ 1277 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, 1278 (IGC_ADVTXD_MAC_TSTAMP)); 1279 1280 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1, 1281 (IGC_ADVTXD_TSTAMP_REG_1)); 1282 1283 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2, 1284 (IGC_ADVTXD_TSTAMP_REG_2)); 1285 1286 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3, 1287 (IGC_ADVTXD_TSTAMP_REG_3)); 1288 1289 /* insert frame checksum */ 1290 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); 1291 1292 return cmd_type; 1293 } 1294 1295 static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 1296 union igc_adv_tx_desc *tx_desc, 1297 u32 tx_flags, unsigned int paylen) 1298 { 1299 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 1300 1301 /* insert L4 checksum */ 1302 olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM, 1303 (IGC_TXD_POPTS_TXSM << 8)); 1304 1305 /* insert IPv4 checksum */ 1306 olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4, 1307 (IGC_TXD_POPTS_IXSM << 8)); 1308 1309 /* Use the second timer (free running, in general) for the timestamp */ 1310 olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1, 1311 IGC_TXD_PTP2_TIMER_1); 1312 1313 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1314 } 1315 1316 static int igc_tx_map(struct igc_ring *tx_ring, 1317 struct igc_tx_buffer *first, 1318 const u8 hdr_len) 1319 { 1320 struct sk_buff *skb = first->skb; 1321 struct igc_tx_buffer *tx_buffer; 1322 union igc_adv_tx_desc *tx_desc; 1323 u32 tx_flags = first->tx_flags; 1324 skb_frag_t *frag; 1325 u16 i = tx_ring->next_to_use; 1326 unsigned int data_len, size; 1327 dma_addr_t dma; 1328 u32 cmd_type; 1329 1330 cmd_type = igc_tx_cmd_type(skb, tx_flags); 1331 tx_desc = IGC_TX_DESC(tx_ring, i); 1332 1333 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 1334 1335 size = skb_headlen(skb); 1336 data_len = skb->data_len; 1337 1338 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1339 1340 tx_buffer = first; 1341 1342 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1343 if (dma_mapping_error(tx_ring->dev, dma)) 1344 goto dma_error; 1345 1346 /* record length, and DMA address */ 1347 dma_unmap_len_set(tx_buffer, len, size); 1348 dma_unmap_addr_set(tx_buffer, dma, dma); 1349 1350 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1351 1352 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 1353 tx_desc->read.cmd_type_len = 1354 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 1355 1356 i++; 1357 tx_desc++; 1358 if (i == tx_ring->count) { 1359 tx_desc = IGC_TX_DESC(tx_ring, 0); 1360 i = 0; 1361 } 1362 tx_desc->read.olinfo_status = 0; 1363 1364 dma += IGC_MAX_DATA_PER_TXD; 1365 size -= IGC_MAX_DATA_PER_TXD; 1366 1367 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1368 } 1369 1370 if (likely(!data_len)) 1371 break; 1372 1373 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 1374 1375 i++; 1376 tx_desc++; 1377 if (i == tx_ring->count) { 1378 tx_desc = IGC_TX_DESC(tx_ring, 0); 1379 i = 0; 1380 } 1381 tx_desc->read.olinfo_status = 0; 1382 1383 size = skb_frag_size(frag); 1384 data_len -= size; 1385 1386 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 1387 size, DMA_TO_DEVICE); 1388 1389 tx_buffer = &tx_ring->tx_buffer_info[i]; 1390 } 1391 1392 /* write last descriptor with RS and EOP bits */ 1393 cmd_type |= size | IGC_TXD_DCMD; 1394 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1395 1396 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1397 1398 /* set the timestamp */ 1399 first->time_stamp = jiffies; 1400 1401 skb_tx_timestamp(skb); 1402 1403 /* Force memory writes to complete before letting h/w know there 1404 * are new descriptors to fetch. (Only applicable for weak-ordered 1405 * memory model archs, such as IA-64). 1406 * 1407 * We also need this memory barrier to make certain all of the 1408 * status bits have been updated before next_to_watch is written. 1409 */ 1410 wmb(); 1411 1412 /* set next_to_watch value indicating a packet is present */ 1413 first->next_to_watch = tx_desc; 1414 1415 i++; 1416 if (i == tx_ring->count) 1417 i = 0; 1418 1419 tx_ring->next_to_use = i; 1420 1421 /* Make sure there is space in the ring for the next send. */ 1422 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 1423 1424 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1425 writel(i, tx_ring->tail); 1426 } 1427 1428 return 0; 1429 dma_error: 1430 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); 1431 tx_buffer = &tx_ring->tx_buffer_info[i]; 1432 1433 /* clear dma mappings for failed tx_buffer_info map */ 1434 while (tx_buffer != first) { 1435 if (dma_unmap_len(tx_buffer, len)) 1436 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1437 1438 if (i-- == 0) 1439 i += tx_ring->count; 1440 tx_buffer = &tx_ring->tx_buffer_info[i]; 1441 } 1442 1443 if (dma_unmap_len(tx_buffer, len)) 1444 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1445 1446 dev_kfree_skb_any(tx_buffer->skb); 1447 tx_buffer->skb = NULL; 1448 1449 tx_ring->next_to_use = i; 1450 1451 return -1; 1452 } 1453 1454 static int igc_tso(struct igc_ring *tx_ring, 1455 struct igc_tx_buffer *first, 1456 __le32 launch_time, bool first_flag, 1457 u8 *hdr_len) 1458 { 1459 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1460 struct sk_buff *skb = first->skb; 1461 union { 1462 struct iphdr *v4; 1463 struct ipv6hdr *v6; 1464 unsigned char *hdr; 1465 } ip; 1466 union { 1467 struct tcphdr *tcp; 1468 struct udphdr *udp; 1469 unsigned char *hdr; 1470 } l4; 1471 u32 paylen, l4_offset; 1472 int err; 1473 1474 if (skb->ip_summed != CHECKSUM_PARTIAL) 1475 return 0; 1476 1477 if (!skb_is_gso(skb)) 1478 return 0; 1479 1480 err = skb_cow_head(skb, 0); 1481 if (err < 0) 1482 return err; 1483 1484 ip.hdr = skb_network_header(skb); 1485 l4.hdr = skb_checksum_start(skb); 1486 1487 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1488 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1489 1490 /* initialize outer IP header fields */ 1491 if (ip.v4->version == 4) { 1492 unsigned char *csum_start = skb_checksum_start(skb); 1493 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 1494 1495 /* IP header will have to cancel out any data that 1496 * is not a part of the outer IP header 1497 */ 1498 ip.v4->check = csum_fold(csum_partial(trans_start, 1499 csum_start - trans_start, 1500 0)); 1501 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; 1502 1503 ip.v4->tot_len = 0; 1504 first->tx_flags |= IGC_TX_FLAGS_TSO | 1505 IGC_TX_FLAGS_CSUM | 1506 IGC_TX_FLAGS_IPV4; 1507 } else { 1508 ip.v6->payload_len = 0; 1509 first->tx_flags |= IGC_TX_FLAGS_TSO | 1510 IGC_TX_FLAGS_CSUM; 1511 } 1512 1513 /* determine offset of inner transport header */ 1514 l4_offset = l4.hdr - skb->data; 1515 1516 /* remove payload length from inner checksum */ 1517 paylen = skb->len - l4_offset; 1518 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { 1519 /* compute length of segmentation header */ 1520 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1521 csum_replace_by_diff(&l4.tcp->check, 1522 (__force __wsum)htonl(paylen)); 1523 } else { 1524 /* compute length of segmentation header */ 1525 *hdr_len = sizeof(*l4.udp) + l4_offset; 1526 csum_replace_by_diff(&l4.udp->check, 1527 (__force __wsum)htonl(paylen)); 1528 } 1529 1530 /* update gso size and bytecount with header size */ 1531 first->gso_segs = skb_shinfo(skb)->gso_segs; 1532 first->bytecount += (first->gso_segs - 1) * *hdr_len; 1533 1534 /* MSS L4LEN IDX */ 1535 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; 1536 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; 1537 1538 /* VLAN MACLEN IPLEN */ 1539 vlan_macip_lens = l4.hdr - ip.hdr; 1540 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; 1541 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1542 1543 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, 1544 vlan_macip_lens, type_tucmd, mss_l4len_idx); 1545 1546 return 1; 1547 } 1548 1549 static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags) 1550 { 1551 int i; 1552 1553 for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { 1554 struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; 1555 1556 if (tstamp->skb) 1557 continue; 1558 1559 tstamp->skb = skb_get(skb); 1560 tstamp->start = jiffies; 1561 *flags = tstamp->flags; 1562 1563 return true; 1564 } 1565 1566 return false; 1567 } 1568 1569 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 1570 struct igc_ring *tx_ring) 1571 { 1572 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1573 bool first_flag = false, insert_empty = false; 1574 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1575 __be16 protocol = vlan_get_protocol(skb); 1576 struct igc_tx_buffer *first; 1577 __le32 launch_time = 0; 1578 u32 tx_flags = 0; 1579 unsigned short f; 1580 ktime_t txtime; 1581 u8 hdr_len = 0; 1582 int tso = 0; 1583 1584 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 1585 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 1586 * + 2 desc gap to keep tail from touching head, 1587 * + 1 desc for context descriptor, 1588 * otherwise try next time 1589 */ 1590 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1591 count += TXD_USE_COUNT(skb_frag_size( 1592 &skb_shinfo(skb)->frags[f])); 1593 1594 if (igc_maybe_stop_tx(tx_ring, count + 5)) { 1595 /* this is a hard error */ 1596 return NETDEV_TX_BUSY; 1597 } 1598 1599 if (!tx_ring->launchtime_enable) 1600 goto done; 1601 1602 txtime = skb->tstamp; 1603 skb->tstamp = ktime_set(0, 0); 1604 launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); 1605 1606 if (insert_empty) { 1607 struct igc_tx_buffer *empty_info; 1608 struct sk_buff *empty; 1609 void *data; 1610 1611 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1612 empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); 1613 if (!empty) 1614 goto done; 1615 1616 data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); 1617 memset(data, 0, IGC_EMPTY_FRAME_SIZE); 1618 1619 igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); 1620 1621 if (igc_init_tx_empty_descriptor(tx_ring, 1622 empty, 1623 empty_info) < 0) 1624 dev_kfree_skb_any(empty); 1625 } 1626 1627 done: 1628 /* record the location of the first descriptor for this packet */ 1629 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1630 first->type = IGC_TX_BUFFER_TYPE_SKB; 1631 first->skb = skb; 1632 first->bytecount = skb->len; 1633 first->gso_segs = 1; 1634 1635 if (adapter->qbv_transition || tx_ring->oper_gate_closed) 1636 goto out_drop; 1637 1638 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { 1639 adapter->stats.txdrop++; 1640 goto out_drop; 1641 } 1642 1643 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && 1644 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1645 /* FIXME: add support for retrieving timestamps from 1646 * the other timer registers before skipping the 1647 * timestamping request. 1648 */ 1649 unsigned long flags; 1650 u32 tstamp_flags; 1651 1652 spin_lock_irqsave(&adapter->ptp_tx_lock, flags); 1653 if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) { 1654 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1655 tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; 1656 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES) 1657 tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1; 1658 } else { 1659 adapter->tx_hwtstamp_skipped++; 1660 } 1661 1662 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); 1663 } 1664 1665 if (skb_vlan_tag_present(skb)) { 1666 tx_flags |= IGC_TX_FLAGS_VLAN; 1667 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); 1668 } 1669 1670 /* record initial flags and protocol */ 1671 first->tx_flags = tx_flags; 1672 first->protocol = protocol; 1673 1674 tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); 1675 if (tso < 0) 1676 goto out_drop; 1677 else if (!tso) 1678 igc_tx_csum(tx_ring, first, launch_time, first_flag); 1679 1680 igc_tx_map(tx_ring, first, hdr_len); 1681 1682 return NETDEV_TX_OK; 1683 1684 out_drop: 1685 dev_kfree_skb_any(first->skb); 1686 first->skb = NULL; 1687 1688 return NETDEV_TX_OK; 1689 } 1690 1691 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 1692 struct sk_buff *skb) 1693 { 1694 unsigned int r_idx = skb->queue_mapping; 1695 1696 if (r_idx >= adapter->num_tx_queues) 1697 r_idx = r_idx % adapter->num_tx_queues; 1698 1699 return adapter->tx_ring[r_idx]; 1700 } 1701 1702 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 1703 struct net_device *netdev) 1704 { 1705 struct igc_adapter *adapter = netdev_priv(netdev); 1706 1707 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 1708 * in order to meet this minimum size requirement. 1709 */ 1710 if (skb->len < 17) { 1711 if (skb_padto(skb, 17)) 1712 return NETDEV_TX_OK; 1713 skb->len = 17; 1714 } 1715 1716 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1717 } 1718 1719 static void igc_rx_checksum(struct igc_ring *ring, 1720 union igc_adv_rx_desc *rx_desc, 1721 struct sk_buff *skb) 1722 { 1723 skb_checksum_none_assert(skb); 1724 1725 /* Ignore Checksum bit is set */ 1726 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) 1727 return; 1728 1729 /* Rx checksum disabled via ethtool */ 1730 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1731 return; 1732 1733 /* TCP/UDP checksum error bit is set */ 1734 if (igc_test_staterr(rx_desc, 1735 IGC_RXDEXT_STATERR_L4E | 1736 IGC_RXDEXT_STATERR_IPE)) { 1737 /* work around errata with sctp packets where the TCPE aka 1738 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 1739 * packets (aka let the stack check the crc32c) 1740 */ 1741 if (!(skb->len == 60 && 1742 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { 1743 u64_stats_update_begin(&ring->rx_syncp); 1744 ring->rx_stats.csum_err++; 1745 u64_stats_update_end(&ring->rx_syncp); 1746 } 1747 /* let the stack verify checksum errors */ 1748 return; 1749 } 1750 /* It must be a TCP or UDP packet with a valid checksum */ 1751 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | 1752 IGC_RXD_STAT_UDPCS)) 1753 skb->ip_summed = CHECKSUM_UNNECESSARY; 1754 1755 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", 1756 le32_to_cpu(rx_desc->wb.upper.status_error)); 1757 } 1758 1759 /* Mapping HW RSS Type to enum pkt_hash_types */ 1760 static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { 1761 [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, 1762 [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, 1763 [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, 1764 [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, 1765 [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, 1766 [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, 1767 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, 1768 [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, 1769 [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, 1770 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, 1771 [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ 1772 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ 1773 [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ 1774 [13] = PKT_HASH_TYPE_NONE, 1775 [14] = PKT_HASH_TYPE_NONE, 1776 [15] = PKT_HASH_TYPE_NONE, 1777 }; 1778 1779 static inline void igc_rx_hash(struct igc_ring *ring, 1780 union igc_adv_rx_desc *rx_desc, 1781 struct sk_buff *skb) 1782 { 1783 if (ring->netdev->features & NETIF_F_RXHASH) { 1784 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 1785 u32 rss_type = igc_rss_type(rx_desc); 1786 1787 skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); 1788 } 1789 } 1790 1791 static void igc_rx_vlan(struct igc_ring *rx_ring, 1792 union igc_adv_rx_desc *rx_desc, 1793 struct sk_buff *skb) 1794 { 1795 struct net_device *dev = rx_ring->netdev; 1796 u16 vid; 1797 1798 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1799 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { 1800 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && 1801 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 1802 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); 1803 else 1804 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1805 1806 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1807 } 1808 } 1809 1810 /** 1811 * igc_process_skb_fields - Populate skb header fields from Rx descriptor 1812 * @rx_ring: rx descriptor ring packet is being transacted on 1813 * @rx_desc: pointer to the EOP Rx descriptor 1814 * @skb: pointer to current skb being populated 1815 * 1816 * This function checks the ring, descriptor, and packet information in order 1817 * to populate the hash, checksum, VLAN, protocol, and other fields within the 1818 * skb. 1819 */ 1820 static void igc_process_skb_fields(struct igc_ring *rx_ring, 1821 union igc_adv_rx_desc *rx_desc, 1822 struct sk_buff *skb) 1823 { 1824 igc_rx_hash(rx_ring, rx_desc, skb); 1825 1826 igc_rx_checksum(rx_ring, rx_desc, skb); 1827 1828 igc_rx_vlan(rx_ring, rx_desc, skb); 1829 1830 skb_record_rx_queue(skb, rx_ring->queue_index); 1831 1832 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1833 } 1834 1835 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) 1836 { 1837 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1838 struct igc_adapter *adapter = netdev_priv(netdev); 1839 struct igc_hw *hw = &adapter->hw; 1840 u32 ctrl; 1841 1842 ctrl = rd32(IGC_CTRL); 1843 1844 if (enable) { 1845 /* enable VLAN tag insert/strip */ 1846 ctrl |= IGC_CTRL_VME; 1847 } else { 1848 /* disable VLAN tag insert/strip */ 1849 ctrl &= ~IGC_CTRL_VME; 1850 } 1851 wr32(IGC_CTRL, ctrl); 1852 } 1853 1854 static void igc_restore_vlan(struct igc_adapter *adapter) 1855 { 1856 igc_vlan_mode(adapter->netdev, adapter->netdev->features); 1857 } 1858 1859 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 1860 const unsigned int size, 1861 int *rx_buffer_pgcnt) 1862 { 1863 struct igc_rx_buffer *rx_buffer; 1864 1865 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1866 *rx_buffer_pgcnt = 1867 #if (PAGE_SIZE < 8192) 1868 page_count(rx_buffer->page); 1869 #else 1870 0; 1871 #endif 1872 prefetchw(rx_buffer->page); 1873 1874 /* we are reusing so sync this buffer for CPU use */ 1875 dma_sync_single_range_for_cpu(rx_ring->dev, 1876 rx_buffer->dma, 1877 rx_buffer->page_offset, 1878 size, 1879 DMA_FROM_DEVICE); 1880 1881 rx_buffer->pagecnt_bias--; 1882 1883 return rx_buffer; 1884 } 1885 1886 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, 1887 unsigned int truesize) 1888 { 1889 #if (PAGE_SIZE < 8192) 1890 buffer->page_offset ^= truesize; 1891 #else 1892 buffer->page_offset += truesize; 1893 #endif 1894 } 1895 1896 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, 1897 unsigned int size) 1898 { 1899 unsigned int truesize; 1900 1901 #if (PAGE_SIZE < 8192) 1902 truesize = igc_rx_pg_size(ring) / 2; 1903 #else 1904 truesize = ring_uses_build_skb(ring) ? 1905 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1906 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1907 SKB_DATA_ALIGN(size); 1908 #endif 1909 return truesize; 1910 } 1911 1912 /** 1913 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 1914 * @rx_ring: rx descriptor ring to transact packets on 1915 * @rx_buffer: buffer containing page to add 1916 * @skb: sk_buff to place the data into 1917 * @size: size of buffer to be added 1918 * 1919 * This function will add the data contained in rx_buffer->page to the skb. 1920 */ 1921 static void igc_add_rx_frag(struct igc_ring *rx_ring, 1922 struct igc_rx_buffer *rx_buffer, 1923 struct sk_buff *skb, 1924 unsigned int size) 1925 { 1926 unsigned int truesize; 1927 1928 #if (PAGE_SIZE < 8192) 1929 truesize = igc_rx_pg_size(rx_ring) / 2; 1930 #else 1931 truesize = ring_uses_build_skb(rx_ring) ? 1932 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1933 SKB_DATA_ALIGN(size); 1934 #endif 1935 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1936 rx_buffer->page_offset, size, truesize); 1937 1938 igc_rx_buffer_flip(rx_buffer, truesize); 1939 } 1940 1941 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 1942 struct igc_rx_buffer *rx_buffer, 1943 struct xdp_buff *xdp) 1944 { 1945 unsigned int size = xdp->data_end - xdp->data; 1946 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1947 unsigned int metasize = xdp->data - xdp->data_meta; 1948 struct sk_buff *skb; 1949 1950 /* prefetch first cache line of first page */ 1951 net_prefetch(xdp->data_meta); 1952 1953 /* build an skb around the page buffer */ 1954 skb = napi_build_skb(xdp->data_hard_start, truesize); 1955 if (unlikely(!skb)) 1956 return NULL; 1957 1958 /* update pointers within the skb to store the data */ 1959 skb_reserve(skb, xdp->data - xdp->data_hard_start); 1960 __skb_put(skb, size); 1961 if (metasize) 1962 skb_metadata_set(skb, metasize); 1963 1964 igc_rx_buffer_flip(rx_buffer, truesize); 1965 return skb; 1966 } 1967 1968 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 1969 struct igc_rx_buffer *rx_buffer, 1970 struct igc_xdp_buff *ctx) 1971 { 1972 struct xdp_buff *xdp = &ctx->xdp; 1973 unsigned int metasize = xdp->data - xdp->data_meta; 1974 unsigned int size = xdp->data_end - xdp->data; 1975 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1976 void *va = xdp->data; 1977 unsigned int headlen; 1978 struct sk_buff *skb; 1979 1980 /* prefetch first cache line of first page */ 1981 net_prefetch(xdp->data_meta); 1982 1983 /* allocate a skb to store the frags */ 1984 skb = napi_alloc_skb(&rx_ring->q_vector->napi, 1985 IGC_RX_HDR_LEN + metasize); 1986 if (unlikely(!skb)) 1987 return NULL; 1988 1989 if (ctx->rx_ts) { 1990 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; 1991 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; 1992 } 1993 1994 /* Determine available headroom for copy */ 1995 headlen = size; 1996 if (headlen > IGC_RX_HDR_LEN) 1997 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); 1998 1999 /* align pull length to size of long to optimize memcpy performance */ 2000 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, 2001 ALIGN(headlen + metasize, sizeof(long))); 2002 2003 if (metasize) { 2004 skb_metadata_set(skb, metasize); 2005 __skb_pull(skb, metasize); 2006 } 2007 2008 /* update all of the pointers */ 2009 size -= headlen; 2010 if (size) { 2011 skb_add_rx_frag(skb, 0, rx_buffer->page, 2012 (va + headlen) - page_address(rx_buffer->page), 2013 size, truesize); 2014 igc_rx_buffer_flip(rx_buffer, truesize); 2015 } else { 2016 rx_buffer->pagecnt_bias++; 2017 } 2018 2019 return skb; 2020 } 2021 2022 /** 2023 * igc_reuse_rx_page - page flip buffer and store it back on the ring 2024 * @rx_ring: rx descriptor ring to store buffers on 2025 * @old_buff: donor buffer to have page reused 2026 * 2027 * Synchronizes page for reuse by the adapter 2028 */ 2029 static void igc_reuse_rx_page(struct igc_ring *rx_ring, 2030 struct igc_rx_buffer *old_buff) 2031 { 2032 u16 nta = rx_ring->next_to_alloc; 2033 struct igc_rx_buffer *new_buff; 2034 2035 new_buff = &rx_ring->rx_buffer_info[nta]; 2036 2037 /* update, and store next to alloc */ 2038 nta++; 2039 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 2040 2041 /* Transfer page from old buffer to new buffer. 2042 * Move each member individually to avoid possible store 2043 * forwarding stalls. 2044 */ 2045 new_buff->dma = old_buff->dma; 2046 new_buff->page = old_buff->page; 2047 new_buff->page_offset = old_buff->page_offset; 2048 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 2049 } 2050 2051 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, 2052 int rx_buffer_pgcnt) 2053 { 2054 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 2055 struct page *page = rx_buffer->page; 2056 2057 /* avoid re-using remote and pfmemalloc pages */ 2058 if (!dev_page_is_reusable(page)) 2059 return false; 2060 2061 #if (PAGE_SIZE < 8192) 2062 /* if we are only owner of page we can reuse it */ 2063 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 2064 return false; 2065 #else 2066 #define IGC_LAST_OFFSET \ 2067 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 2068 2069 if (rx_buffer->page_offset > IGC_LAST_OFFSET) 2070 return false; 2071 #endif 2072 2073 /* If we have drained the page fragment pool we need to update 2074 * the pagecnt_bias and page count so that we fully restock the 2075 * number of references the driver holds. 2076 */ 2077 if (unlikely(pagecnt_bias == 1)) { 2078 page_ref_add(page, USHRT_MAX - 1); 2079 rx_buffer->pagecnt_bias = USHRT_MAX; 2080 } 2081 2082 return true; 2083 } 2084 2085 /** 2086 * igc_is_non_eop - process handling of non-EOP buffers 2087 * @rx_ring: Rx ring being processed 2088 * @rx_desc: Rx descriptor for current buffer 2089 * 2090 * This function updates next to clean. If the buffer is an EOP buffer 2091 * this function exits returning false, otherwise it will place the 2092 * sk_buff in the next buffer to be chained and return true indicating 2093 * that this is in fact a non-EOP buffer. 2094 */ 2095 static bool igc_is_non_eop(struct igc_ring *rx_ring, 2096 union igc_adv_rx_desc *rx_desc) 2097 { 2098 u32 ntc = rx_ring->next_to_clean + 1; 2099 2100 /* fetch, update, and store next to clean */ 2101 ntc = (ntc < rx_ring->count) ? ntc : 0; 2102 rx_ring->next_to_clean = ntc; 2103 2104 prefetch(IGC_RX_DESC(rx_ring, ntc)); 2105 2106 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 2107 return false; 2108 2109 return true; 2110 } 2111 2112 /** 2113 * igc_cleanup_headers - Correct corrupted or empty headers 2114 * @rx_ring: rx descriptor ring packet is being transacted on 2115 * @rx_desc: pointer to the EOP Rx descriptor 2116 * @skb: pointer to current skb being fixed 2117 * 2118 * Address the case where we are pulling data in on pages only 2119 * and as such no data is present in the skb header. 2120 * 2121 * In addition if skb is not at least 60 bytes we need to pad it so that 2122 * it is large enough to qualify as a valid Ethernet frame. 2123 * 2124 * Returns true if an error was encountered and skb was freed. 2125 */ 2126 static bool igc_cleanup_headers(struct igc_ring *rx_ring, 2127 union igc_adv_rx_desc *rx_desc, 2128 struct sk_buff *skb) 2129 { 2130 /* XDP packets use error pointer so abort at this point */ 2131 if (IS_ERR(skb)) 2132 return true; 2133 2134 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { 2135 struct net_device *netdev = rx_ring->netdev; 2136 2137 if (!(netdev->features & NETIF_F_RXALL)) { 2138 dev_kfree_skb_any(skb); 2139 return true; 2140 } 2141 } 2142 2143 /* if eth_skb_pad returns an error the skb was freed */ 2144 if (eth_skb_pad(skb)) 2145 return true; 2146 2147 return false; 2148 } 2149 2150 static void igc_put_rx_buffer(struct igc_ring *rx_ring, 2151 struct igc_rx_buffer *rx_buffer, 2152 int rx_buffer_pgcnt) 2153 { 2154 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 2155 /* hand second half of page back to the ring */ 2156 igc_reuse_rx_page(rx_ring, rx_buffer); 2157 } else { 2158 /* We are not reusing the buffer so unmap it and free 2159 * any references we are holding to it 2160 */ 2161 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 2162 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 2163 IGC_RX_DMA_ATTR); 2164 __page_frag_cache_drain(rx_buffer->page, 2165 rx_buffer->pagecnt_bias); 2166 } 2167 2168 /* clear contents of rx_buffer */ 2169 rx_buffer->page = NULL; 2170 } 2171 2172 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 2173 { 2174 struct igc_adapter *adapter = rx_ring->q_vector->adapter; 2175 2176 if (ring_uses_build_skb(rx_ring)) 2177 return IGC_SKB_PAD; 2178 if (igc_xdp_is_enabled(adapter)) 2179 return XDP_PACKET_HEADROOM; 2180 2181 return 0; 2182 } 2183 2184 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 2185 struct igc_rx_buffer *bi) 2186 { 2187 struct page *page = bi->page; 2188 dma_addr_t dma; 2189 2190 /* since we are recycling buffers we should seldom need to alloc */ 2191 if (likely(page)) 2192 return true; 2193 2194 /* alloc new page for storage */ 2195 page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 2196 if (unlikely(!page)) { 2197 rx_ring->rx_stats.alloc_failed++; 2198 return false; 2199 } 2200 2201 /* map page for use */ 2202 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 2203 igc_rx_pg_size(rx_ring), 2204 DMA_FROM_DEVICE, 2205 IGC_RX_DMA_ATTR); 2206 2207 /* if mapping failed free memory back to system since 2208 * there isn't much point in holding memory we can't use 2209 */ 2210 if (dma_mapping_error(rx_ring->dev, dma)) { 2211 __free_page(page); 2212 2213 rx_ring->rx_stats.alloc_failed++; 2214 return false; 2215 } 2216 2217 bi->dma = dma; 2218 bi->page = page; 2219 bi->page_offset = igc_rx_offset(rx_ring); 2220 page_ref_add(page, USHRT_MAX - 1); 2221 bi->pagecnt_bias = USHRT_MAX; 2222 2223 return true; 2224 } 2225 2226 /** 2227 * igc_alloc_rx_buffers - Replace used receive buffers; packet split 2228 * @rx_ring: rx descriptor ring 2229 * @cleaned_count: number of buffers to clean 2230 */ 2231 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 2232 { 2233 union igc_adv_rx_desc *rx_desc; 2234 u16 i = rx_ring->next_to_use; 2235 struct igc_rx_buffer *bi; 2236 u16 bufsz; 2237 2238 /* nothing to do */ 2239 if (!cleaned_count) 2240 return; 2241 2242 rx_desc = IGC_RX_DESC(rx_ring, i); 2243 bi = &rx_ring->rx_buffer_info[i]; 2244 i -= rx_ring->count; 2245 2246 bufsz = igc_rx_bufsz(rx_ring); 2247 2248 do { 2249 if (!igc_alloc_mapped_page(rx_ring, bi)) 2250 break; 2251 2252 /* sync the buffer for use by the device */ 2253 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 2254 bi->page_offset, bufsz, 2255 DMA_FROM_DEVICE); 2256 2257 /* Refresh the desc even if buffer_addrs didn't change 2258 * because each write-back erases this info. 2259 */ 2260 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 2261 2262 rx_desc++; 2263 bi++; 2264 i++; 2265 if (unlikely(!i)) { 2266 rx_desc = IGC_RX_DESC(rx_ring, 0); 2267 bi = rx_ring->rx_buffer_info; 2268 i -= rx_ring->count; 2269 } 2270 2271 /* clear the length for the next_to_use descriptor */ 2272 rx_desc->wb.upper.length = 0; 2273 2274 cleaned_count--; 2275 } while (cleaned_count); 2276 2277 i += rx_ring->count; 2278 2279 if (rx_ring->next_to_use != i) { 2280 /* record the next descriptor to use */ 2281 rx_ring->next_to_use = i; 2282 2283 /* update next to alloc since we have filled the ring */ 2284 rx_ring->next_to_alloc = i; 2285 2286 /* Force memory writes to complete before letting h/w 2287 * know there are new descriptors to fetch. (Only 2288 * applicable for weak-ordered memory model archs, 2289 * such as IA-64). 2290 */ 2291 wmb(); 2292 writel(i, rx_ring->tail); 2293 } 2294 } 2295 2296 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) 2297 { 2298 union igc_adv_rx_desc *desc; 2299 u16 i = ring->next_to_use; 2300 struct igc_rx_buffer *bi; 2301 dma_addr_t dma; 2302 bool ok = true; 2303 2304 if (!count) 2305 return ok; 2306 2307 XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); 2308 2309 desc = IGC_RX_DESC(ring, i); 2310 bi = &ring->rx_buffer_info[i]; 2311 i -= ring->count; 2312 2313 do { 2314 bi->xdp = xsk_buff_alloc(ring->xsk_pool); 2315 if (!bi->xdp) { 2316 ok = false; 2317 break; 2318 } 2319 2320 dma = xsk_buff_xdp_get_dma(bi->xdp); 2321 desc->read.pkt_addr = cpu_to_le64(dma); 2322 2323 desc++; 2324 bi++; 2325 i++; 2326 if (unlikely(!i)) { 2327 desc = IGC_RX_DESC(ring, 0); 2328 bi = ring->rx_buffer_info; 2329 i -= ring->count; 2330 } 2331 2332 /* Clear the length for the next_to_use descriptor. */ 2333 desc->wb.upper.length = 0; 2334 2335 count--; 2336 } while (count); 2337 2338 i += ring->count; 2339 2340 if (ring->next_to_use != i) { 2341 ring->next_to_use = i; 2342 2343 /* Force memory writes to complete before letting h/w 2344 * know there are new descriptors to fetch. (Only 2345 * applicable for weak-ordered memory model archs, 2346 * such as IA-64). 2347 */ 2348 wmb(); 2349 writel(i, ring->tail); 2350 } 2351 2352 return ok; 2353 } 2354 2355 /* This function requires __netif_tx_lock is held by the caller. */ 2356 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, 2357 struct xdp_frame *xdpf) 2358 { 2359 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 2360 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; 2361 u16 count, index = ring->next_to_use; 2362 struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; 2363 struct igc_tx_buffer *buffer = head; 2364 union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); 2365 u32 olinfo_status, len = xdpf->len, cmd_type; 2366 void *data = xdpf->data; 2367 u16 i; 2368 2369 count = TXD_USE_COUNT(len); 2370 for (i = 0; i < nr_frags; i++) 2371 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); 2372 2373 if (igc_maybe_stop_tx(ring, count + 3)) { 2374 /* this is a hard error */ 2375 return -EBUSY; 2376 } 2377 2378 i = 0; 2379 head->bytecount = xdp_get_frame_len(xdpf); 2380 head->type = IGC_TX_BUFFER_TYPE_XDP; 2381 head->gso_segs = 1; 2382 head->xdpf = xdpf; 2383 2384 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 2385 desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2386 2387 for (;;) { 2388 dma_addr_t dma; 2389 2390 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); 2391 if (dma_mapping_error(ring->dev, dma)) { 2392 netdev_err_once(ring->netdev, 2393 "Failed to map DMA for TX\n"); 2394 goto unmap; 2395 } 2396 2397 dma_unmap_len_set(buffer, len, len); 2398 dma_unmap_addr_set(buffer, dma, dma); 2399 2400 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2401 IGC_ADVTXD_DCMD_IFCS | len; 2402 2403 desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2404 desc->read.buffer_addr = cpu_to_le64(dma); 2405 2406 buffer->protocol = 0; 2407 2408 if (++index == ring->count) 2409 index = 0; 2410 2411 if (i == nr_frags) 2412 break; 2413 2414 buffer = &ring->tx_buffer_info[index]; 2415 desc = IGC_TX_DESC(ring, index); 2416 desc->read.olinfo_status = 0; 2417 2418 data = skb_frag_address(&sinfo->frags[i]); 2419 len = skb_frag_size(&sinfo->frags[i]); 2420 i++; 2421 } 2422 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); 2423 2424 netdev_tx_sent_queue(txring_txq(ring), head->bytecount); 2425 /* set the timestamp */ 2426 head->time_stamp = jiffies; 2427 /* set next_to_watch value indicating a packet is present */ 2428 head->next_to_watch = desc; 2429 ring->next_to_use = index; 2430 2431 return 0; 2432 2433 unmap: 2434 for (;;) { 2435 buffer = &ring->tx_buffer_info[index]; 2436 if (dma_unmap_len(buffer, len)) 2437 dma_unmap_page(ring->dev, 2438 dma_unmap_addr(buffer, dma), 2439 dma_unmap_len(buffer, len), 2440 DMA_TO_DEVICE); 2441 dma_unmap_len_set(buffer, len, 0); 2442 if (buffer == head) 2443 break; 2444 2445 if (!index) 2446 index += ring->count; 2447 index--; 2448 } 2449 2450 return -ENOMEM; 2451 } 2452 2453 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, 2454 int cpu) 2455 { 2456 int index = cpu; 2457 2458 if (unlikely(index < 0)) 2459 index = 0; 2460 2461 while (index >= adapter->num_tx_queues) 2462 index -= adapter->num_tx_queues; 2463 2464 return adapter->tx_ring[index]; 2465 } 2466 2467 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) 2468 { 2469 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2470 int cpu = smp_processor_id(); 2471 struct netdev_queue *nq; 2472 struct igc_ring *ring; 2473 int res; 2474 2475 if (unlikely(!xdpf)) 2476 return -EFAULT; 2477 2478 ring = igc_xdp_get_tx_ring(adapter, cpu); 2479 nq = txring_txq(ring); 2480 2481 __netif_tx_lock(nq, cpu); 2482 /* Avoid transmit queue timeout since we share it with the slow path */ 2483 txq_trans_cond_update(nq); 2484 res = igc_xdp_init_tx_descriptor(ring, xdpf); 2485 __netif_tx_unlock(nq); 2486 return res; 2487 } 2488 2489 /* This function assumes rcu_read_lock() is held by the caller. */ 2490 static int __igc_xdp_run_prog(struct igc_adapter *adapter, 2491 struct bpf_prog *prog, 2492 struct xdp_buff *xdp) 2493 { 2494 u32 act = bpf_prog_run_xdp(prog, xdp); 2495 2496 switch (act) { 2497 case XDP_PASS: 2498 return IGC_XDP_PASS; 2499 case XDP_TX: 2500 if (igc_xdp_xmit_back(adapter, xdp) < 0) 2501 goto out_failure; 2502 return IGC_XDP_TX; 2503 case XDP_REDIRECT: 2504 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) 2505 goto out_failure; 2506 return IGC_XDP_REDIRECT; 2507 break; 2508 default: 2509 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); 2510 fallthrough; 2511 case XDP_ABORTED: 2512 out_failure: 2513 trace_xdp_exception(adapter->netdev, prog, act); 2514 fallthrough; 2515 case XDP_DROP: 2516 return IGC_XDP_CONSUMED; 2517 } 2518 } 2519 2520 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, 2521 struct xdp_buff *xdp) 2522 { 2523 struct bpf_prog *prog; 2524 int res; 2525 2526 prog = READ_ONCE(adapter->xdp_prog); 2527 if (!prog) { 2528 res = IGC_XDP_PASS; 2529 goto out; 2530 } 2531 2532 res = __igc_xdp_run_prog(adapter, prog, xdp); 2533 2534 out: 2535 return ERR_PTR(-res); 2536 } 2537 2538 /* This function assumes __netif_tx_lock is held by the caller. */ 2539 static void igc_flush_tx_descriptors(struct igc_ring *ring) 2540 { 2541 /* Once tail pointer is updated, hardware can fetch the descriptors 2542 * any time so we issue a write membar here to ensure all memory 2543 * writes are complete before the tail pointer is updated. 2544 */ 2545 wmb(); 2546 writel(ring->next_to_use, ring->tail); 2547 } 2548 2549 static void igc_finalize_xdp(struct igc_adapter *adapter, int status) 2550 { 2551 int cpu = smp_processor_id(); 2552 struct netdev_queue *nq; 2553 struct igc_ring *ring; 2554 2555 if (status & IGC_XDP_TX) { 2556 ring = igc_xdp_get_tx_ring(adapter, cpu); 2557 nq = txring_txq(ring); 2558 2559 __netif_tx_lock(nq, cpu); 2560 igc_flush_tx_descriptors(ring); 2561 __netif_tx_unlock(nq); 2562 } 2563 2564 if (status & IGC_XDP_REDIRECT) 2565 xdp_do_flush(); 2566 } 2567 2568 static void igc_update_rx_stats(struct igc_q_vector *q_vector, 2569 unsigned int packets, unsigned int bytes) 2570 { 2571 struct igc_ring *ring = q_vector->rx.ring; 2572 2573 u64_stats_update_begin(&ring->rx_syncp); 2574 ring->rx_stats.packets += packets; 2575 ring->rx_stats.bytes += bytes; 2576 u64_stats_update_end(&ring->rx_syncp); 2577 2578 q_vector->rx.total_packets += packets; 2579 q_vector->rx.total_bytes += bytes; 2580 } 2581 2582 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 2583 { 2584 unsigned int total_bytes = 0, total_packets = 0; 2585 struct igc_adapter *adapter = q_vector->adapter; 2586 struct igc_ring *rx_ring = q_vector->rx.ring; 2587 struct sk_buff *skb = rx_ring->skb; 2588 u16 cleaned_count = igc_desc_unused(rx_ring); 2589 int xdp_status = 0, rx_buffer_pgcnt; 2590 2591 while (likely(total_packets < budget)) { 2592 struct igc_xdp_buff ctx = { .rx_ts = NULL }; 2593 struct igc_rx_buffer *rx_buffer; 2594 union igc_adv_rx_desc *rx_desc; 2595 unsigned int size, truesize; 2596 int pkt_offset = 0; 2597 void *pktbuf; 2598 2599 /* return some buffers to hardware, one at a time is too slow */ 2600 if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 2601 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2602 cleaned_count = 0; 2603 } 2604 2605 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 2606 size = le16_to_cpu(rx_desc->wb.upper.length); 2607 if (!size) 2608 break; 2609 2610 /* This memory barrier is needed to keep us from reading 2611 * any other fields out of the rx_desc until we know the 2612 * descriptor has been written back 2613 */ 2614 dma_rmb(); 2615 2616 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 2617 truesize = igc_get_rx_frame_truesize(rx_ring, size); 2618 2619 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; 2620 2621 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { 2622 ctx.rx_ts = pktbuf; 2623 pkt_offset = IGC_TS_HDR_LEN; 2624 size -= IGC_TS_HDR_LEN; 2625 } 2626 2627 if (!skb) { 2628 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); 2629 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), 2630 igc_rx_offset(rx_ring) + pkt_offset, 2631 size, true); 2632 xdp_buff_clear_frags_flag(&ctx.xdp); 2633 ctx.rx_desc = rx_desc; 2634 2635 skb = igc_xdp_run_prog(adapter, &ctx.xdp); 2636 } 2637 2638 if (IS_ERR(skb)) { 2639 unsigned int xdp_res = -PTR_ERR(skb); 2640 2641 switch (xdp_res) { 2642 case IGC_XDP_CONSUMED: 2643 rx_buffer->pagecnt_bias++; 2644 break; 2645 case IGC_XDP_TX: 2646 case IGC_XDP_REDIRECT: 2647 igc_rx_buffer_flip(rx_buffer, truesize); 2648 xdp_status |= xdp_res; 2649 break; 2650 } 2651 2652 total_packets++; 2653 total_bytes += size; 2654 } else if (skb) 2655 igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 2656 else if (ring_uses_build_skb(rx_ring)) 2657 skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); 2658 else 2659 skb = igc_construct_skb(rx_ring, rx_buffer, &ctx); 2660 2661 /* exit if we failed to retrieve a buffer */ 2662 if (!skb) { 2663 rx_ring->rx_stats.alloc_failed++; 2664 rx_buffer->pagecnt_bias++; 2665 break; 2666 } 2667 2668 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 2669 cleaned_count++; 2670 2671 /* fetch next buffer in frame if non-eop */ 2672 if (igc_is_non_eop(rx_ring, rx_desc)) 2673 continue; 2674 2675 /* verify the packet layout is correct */ 2676 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 2677 skb = NULL; 2678 continue; 2679 } 2680 2681 /* probably a little skewed due to removing CRC */ 2682 total_bytes += skb->len; 2683 2684 /* populate checksum, VLAN, and protocol */ 2685 igc_process_skb_fields(rx_ring, rx_desc, skb); 2686 2687 napi_gro_receive(&q_vector->napi, skb); 2688 2689 /* reset skb pointer */ 2690 skb = NULL; 2691 2692 /* update budget accounting */ 2693 total_packets++; 2694 } 2695 2696 if (xdp_status) 2697 igc_finalize_xdp(adapter, xdp_status); 2698 2699 /* place incomplete frames back on ring for completion */ 2700 rx_ring->skb = skb; 2701 2702 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2703 2704 if (cleaned_count) 2705 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2706 2707 return total_packets; 2708 } 2709 2710 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, 2711 struct xdp_buff *xdp) 2712 { 2713 unsigned int totalsize = xdp->data_end - xdp->data_meta; 2714 unsigned int metasize = xdp->data - xdp->data_meta; 2715 struct sk_buff *skb; 2716 2717 net_prefetch(xdp->data_meta); 2718 2719 skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, 2720 GFP_ATOMIC | __GFP_NOWARN); 2721 if (unlikely(!skb)) 2722 return NULL; 2723 2724 memcpy(__skb_put(skb, totalsize), xdp->data_meta, 2725 ALIGN(totalsize, sizeof(long))); 2726 2727 if (metasize) { 2728 skb_metadata_set(skb, metasize); 2729 __skb_pull(skb, metasize); 2730 } 2731 2732 return skb; 2733 } 2734 2735 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, 2736 union igc_adv_rx_desc *desc, 2737 struct xdp_buff *xdp, 2738 ktime_t timestamp) 2739 { 2740 struct igc_ring *ring = q_vector->rx.ring; 2741 struct sk_buff *skb; 2742 2743 skb = igc_construct_skb_zc(ring, xdp); 2744 if (!skb) { 2745 ring->rx_stats.alloc_failed++; 2746 return; 2747 } 2748 2749 if (timestamp) 2750 skb_hwtstamps(skb)->hwtstamp = timestamp; 2751 2752 if (igc_cleanup_headers(ring, desc, skb)) 2753 return; 2754 2755 igc_process_skb_fields(ring, desc, skb); 2756 napi_gro_receive(&q_vector->napi, skb); 2757 } 2758 2759 static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) 2760 { 2761 /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The 2762 * igc_xdp_buff shares its layout with xdp_buff_xsk and private 2763 * igc_xdp_buff fields fall into xdp_buff_xsk->cb 2764 */ 2765 return (struct igc_xdp_buff *)xdp; 2766 } 2767 2768 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) 2769 { 2770 struct igc_adapter *adapter = q_vector->adapter; 2771 struct igc_ring *ring = q_vector->rx.ring; 2772 u16 cleaned_count = igc_desc_unused(ring); 2773 int total_bytes = 0, total_packets = 0; 2774 u16 ntc = ring->next_to_clean; 2775 struct bpf_prog *prog; 2776 bool failure = false; 2777 int xdp_status = 0; 2778 2779 rcu_read_lock(); 2780 2781 prog = READ_ONCE(adapter->xdp_prog); 2782 2783 while (likely(total_packets < budget)) { 2784 union igc_adv_rx_desc *desc; 2785 struct igc_rx_buffer *bi; 2786 struct igc_xdp_buff *ctx; 2787 ktime_t timestamp = 0; 2788 unsigned int size; 2789 int res; 2790 2791 desc = IGC_RX_DESC(ring, ntc); 2792 size = le16_to_cpu(desc->wb.upper.length); 2793 if (!size) 2794 break; 2795 2796 /* This memory barrier is needed to keep us from reading 2797 * any other fields out of the rx_desc until we know the 2798 * descriptor has been written back 2799 */ 2800 dma_rmb(); 2801 2802 bi = &ring->rx_buffer_info[ntc]; 2803 2804 ctx = xsk_buff_to_igc_ctx(bi->xdp); 2805 ctx->rx_desc = desc; 2806 2807 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { 2808 ctx->rx_ts = bi->xdp->data; 2809 2810 bi->xdp->data += IGC_TS_HDR_LEN; 2811 2812 /* HW timestamp has been copied into local variable. Metadata 2813 * length when XDP program is called should be 0. 2814 */ 2815 bi->xdp->data_meta += IGC_TS_HDR_LEN; 2816 size -= IGC_TS_HDR_LEN; 2817 } 2818 2819 bi->xdp->data_end = bi->xdp->data + size; 2820 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); 2821 2822 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); 2823 switch (res) { 2824 case IGC_XDP_PASS: 2825 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); 2826 fallthrough; 2827 case IGC_XDP_CONSUMED: 2828 xsk_buff_free(bi->xdp); 2829 break; 2830 case IGC_XDP_TX: 2831 case IGC_XDP_REDIRECT: 2832 xdp_status |= res; 2833 break; 2834 } 2835 2836 bi->xdp = NULL; 2837 total_bytes += size; 2838 total_packets++; 2839 cleaned_count++; 2840 ntc++; 2841 if (ntc == ring->count) 2842 ntc = 0; 2843 } 2844 2845 ring->next_to_clean = ntc; 2846 rcu_read_unlock(); 2847 2848 if (cleaned_count >= IGC_RX_BUFFER_WRITE) 2849 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); 2850 2851 if (xdp_status) 2852 igc_finalize_xdp(adapter, xdp_status); 2853 2854 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2855 2856 if (xsk_uses_need_wakeup(ring->xsk_pool)) { 2857 if (failure || ring->next_to_clean == ring->next_to_use) 2858 xsk_set_rx_need_wakeup(ring->xsk_pool); 2859 else 2860 xsk_clear_rx_need_wakeup(ring->xsk_pool); 2861 return total_packets; 2862 } 2863 2864 return failure ? budget : total_packets; 2865 } 2866 2867 static void igc_update_tx_stats(struct igc_q_vector *q_vector, 2868 unsigned int packets, unsigned int bytes) 2869 { 2870 struct igc_ring *ring = q_vector->tx.ring; 2871 2872 u64_stats_update_begin(&ring->tx_syncp); 2873 ring->tx_stats.bytes += bytes; 2874 ring->tx_stats.packets += packets; 2875 u64_stats_update_end(&ring->tx_syncp); 2876 2877 q_vector->tx.total_bytes += bytes; 2878 q_vector->tx.total_packets += packets; 2879 } 2880 2881 static void igc_xdp_xmit_zc(struct igc_ring *ring) 2882 { 2883 struct xsk_buff_pool *pool = ring->xsk_pool; 2884 struct netdev_queue *nq = txring_txq(ring); 2885 union igc_adv_tx_desc *tx_desc = NULL; 2886 int cpu = smp_processor_id(); 2887 struct xdp_desc xdp_desc; 2888 u16 budget, ntu; 2889 2890 if (!netif_carrier_ok(ring->netdev)) 2891 return; 2892 2893 __netif_tx_lock(nq, cpu); 2894 2895 /* Avoid transmit queue timeout since we share it with the slow path */ 2896 txq_trans_cond_update(nq); 2897 2898 ntu = ring->next_to_use; 2899 budget = igc_desc_unused(ring); 2900 2901 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { 2902 u32 cmd_type, olinfo_status; 2903 struct igc_tx_buffer *bi; 2904 dma_addr_t dma; 2905 2906 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2907 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2908 xdp_desc.len; 2909 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; 2910 2911 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2912 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); 2913 2914 tx_desc = IGC_TX_DESC(ring, ntu); 2915 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2916 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2917 tx_desc->read.buffer_addr = cpu_to_le64(dma); 2918 2919 bi = &ring->tx_buffer_info[ntu]; 2920 bi->type = IGC_TX_BUFFER_TYPE_XSK; 2921 bi->protocol = 0; 2922 bi->bytecount = xdp_desc.len; 2923 bi->gso_segs = 1; 2924 bi->time_stamp = jiffies; 2925 bi->next_to_watch = tx_desc; 2926 2927 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); 2928 2929 ntu++; 2930 if (ntu == ring->count) 2931 ntu = 0; 2932 } 2933 2934 ring->next_to_use = ntu; 2935 if (tx_desc) { 2936 igc_flush_tx_descriptors(ring); 2937 xsk_tx_release(pool); 2938 } 2939 2940 __netif_tx_unlock(nq); 2941 } 2942 2943 /** 2944 * igc_clean_tx_irq - Reclaim resources after transmit completes 2945 * @q_vector: pointer to q_vector containing needed info 2946 * @napi_budget: Used to determine if we are in netpoll 2947 * 2948 * returns true if ring is completely cleaned 2949 */ 2950 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 2951 { 2952 struct igc_adapter *adapter = q_vector->adapter; 2953 unsigned int total_bytes = 0, total_packets = 0; 2954 unsigned int budget = q_vector->tx.work_limit; 2955 struct igc_ring *tx_ring = q_vector->tx.ring; 2956 unsigned int i = tx_ring->next_to_clean; 2957 struct igc_tx_buffer *tx_buffer; 2958 union igc_adv_tx_desc *tx_desc; 2959 u32 xsk_frames = 0; 2960 2961 if (test_bit(__IGC_DOWN, &adapter->state)) 2962 return true; 2963 2964 tx_buffer = &tx_ring->tx_buffer_info[i]; 2965 tx_desc = IGC_TX_DESC(tx_ring, i); 2966 i -= tx_ring->count; 2967 2968 do { 2969 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 2970 2971 /* if next_to_watch is not set then there is no work pending */ 2972 if (!eop_desc) 2973 break; 2974 2975 /* prevent any other reads prior to eop_desc */ 2976 smp_rmb(); 2977 2978 /* if DD is not set pending work has not been completed */ 2979 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 2980 break; 2981 2982 /* clear next_to_watch to prevent false hangs */ 2983 tx_buffer->next_to_watch = NULL; 2984 2985 /* update the statistics for this packet */ 2986 total_bytes += tx_buffer->bytecount; 2987 total_packets += tx_buffer->gso_segs; 2988 2989 switch (tx_buffer->type) { 2990 case IGC_TX_BUFFER_TYPE_XSK: 2991 xsk_frames++; 2992 break; 2993 case IGC_TX_BUFFER_TYPE_XDP: 2994 xdp_return_frame(tx_buffer->xdpf); 2995 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2996 break; 2997 case IGC_TX_BUFFER_TYPE_SKB: 2998 napi_consume_skb(tx_buffer->skb, napi_budget); 2999 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 3000 break; 3001 default: 3002 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 3003 break; 3004 } 3005 3006 /* clear last DMA location and unmap remaining buffers */ 3007 while (tx_desc != eop_desc) { 3008 tx_buffer++; 3009 tx_desc++; 3010 i++; 3011 if (unlikely(!i)) { 3012 i -= tx_ring->count; 3013 tx_buffer = tx_ring->tx_buffer_info; 3014 tx_desc = IGC_TX_DESC(tx_ring, 0); 3015 } 3016 3017 /* unmap any remaining paged data */ 3018 if (dma_unmap_len(tx_buffer, len)) 3019 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 3020 } 3021 3022 /* move us one more past the eop_desc for start of next pkt */ 3023 tx_buffer++; 3024 tx_desc++; 3025 i++; 3026 if (unlikely(!i)) { 3027 i -= tx_ring->count; 3028 tx_buffer = tx_ring->tx_buffer_info; 3029 tx_desc = IGC_TX_DESC(tx_ring, 0); 3030 } 3031 3032 /* issue prefetch for next Tx descriptor */ 3033 prefetch(tx_desc); 3034 3035 /* update budget accounting */ 3036 budget--; 3037 } while (likely(budget)); 3038 3039 netdev_tx_completed_queue(txring_txq(tx_ring), 3040 total_packets, total_bytes); 3041 3042 i += tx_ring->count; 3043 tx_ring->next_to_clean = i; 3044 3045 igc_update_tx_stats(q_vector, total_packets, total_bytes); 3046 3047 if (tx_ring->xsk_pool) { 3048 if (xsk_frames) 3049 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 3050 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 3051 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 3052 igc_xdp_xmit_zc(tx_ring); 3053 } 3054 3055 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 3056 struct igc_hw *hw = &adapter->hw; 3057 3058 /* Detect a transmit hang in hardware, this serializes the 3059 * check with the clearing of time_stamp and movement of i 3060 */ 3061 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 3062 if (tx_buffer->next_to_watch && 3063 time_after(jiffies, tx_buffer->time_stamp + 3064 (adapter->tx_timeout_factor * HZ)) && 3065 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && 3066 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && 3067 !tx_ring->oper_gate_closed) { 3068 /* detected Tx unit hang */ 3069 netdev_err(tx_ring->netdev, 3070 "Detected Tx Unit Hang\n" 3071 " Tx Queue <%d>\n" 3072 " TDH <%x>\n" 3073 " TDT <%x>\n" 3074 " next_to_use <%x>\n" 3075 " next_to_clean <%x>\n" 3076 "buffer_info[next_to_clean]\n" 3077 " time_stamp <%lx>\n" 3078 " next_to_watch <%p>\n" 3079 " jiffies <%lx>\n" 3080 " desc.status <%x>\n", 3081 tx_ring->queue_index, 3082 rd32(IGC_TDH(tx_ring->reg_idx)), 3083 readl(tx_ring->tail), 3084 tx_ring->next_to_use, 3085 tx_ring->next_to_clean, 3086 tx_buffer->time_stamp, 3087 tx_buffer->next_to_watch, 3088 jiffies, 3089 tx_buffer->next_to_watch->wb.status); 3090 netif_stop_subqueue(tx_ring->netdev, 3091 tx_ring->queue_index); 3092 3093 /* we are about to reset, no point in enabling stuff */ 3094 return true; 3095 } 3096 } 3097 3098 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 3099 if (unlikely(total_packets && 3100 netif_carrier_ok(tx_ring->netdev) && 3101 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 3102 /* Make sure that anybody stopping the queue after this 3103 * sees the new next_to_clean. 3104 */ 3105 smp_mb(); 3106 if (__netif_subqueue_stopped(tx_ring->netdev, 3107 tx_ring->queue_index) && 3108 !(test_bit(__IGC_DOWN, &adapter->state))) { 3109 netif_wake_subqueue(tx_ring->netdev, 3110 tx_ring->queue_index); 3111 3112 u64_stats_update_begin(&tx_ring->tx_syncp); 3113 tx_ring->tx_stats.restart_queue++; 3114 u64_stats_update_end(&tx_ring->tx_syncp); 3115 } 3116 } 3117 3118 return !!budget; 3119 } 3120 3121 static int igc_find_mac_filter(struct igc_adapter *adapter, 3122 enum igc_mac_filter_type type, const u8 *addr) 3123 { 3124 struct igc_hw *hw = &adapter->hw; 3125 int max_entries = hw->mac.rar_entry_count; 3126 u32 ral, rah; 3127 int i; 3128 3129 for (i = 0; i < max_entries; i++) { 3130 ral = rd32(IGC_RAL(i)); 3131 rah = rd32(IGC_RAH(i)); 3132 3133 if (!(rah & IGC_RAH_AV)) 3134 continue; 3135 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) 3136 continue; 3137 if ((rah & IGC_RAH_RAH_MASK) != 3138 le16_to_cpup((__le16 *)(addr + 4))) 3139 continue; 3140 if (ral != le32_to_cpup((__le32 *)(addr))) 3141 continue; 3142 3143 return i; 3144 } 3145 3146 return -1; 3147 } 3148 3149 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) 3150 { 3151 struct igc_hw *hw = &adapter->hw; 3152 int max_entries = hw->mac.rar_entry_count; 3153 u32 rah; 3154 int i; 3155 3156 for (i = 0; i < max_entries; i++) { 3157 rah = rd32(IGC_RAH(i)); 3158 3159 if (!(rah & IGC_RAH_AV)) 3160 return i; 3161 } 3162 3163 return -1; 3164 } 3165 3166 /** 3167 * igc_add_mac_filter() - Add MAC address filter 3168 * @adapter: Pointer to adapter where the filter should be added 3169 * @type: MAC address filter type (source or destination) 3170 * @addr: MAC address 3171 * @queue: If non-negative, queue assignment feature is enabled and frames 3172 * matching the filter are enqueued onto 'queue'. Otherwise, queue 3173 * assignment is disabled. 3174 * 3175 * Return: 0 in case of success, negative errno code otherwise. 3176 */ 3177 static int igc_add_mac_filter(struct igc_adapter *adapter, 3178 enum igc_mac_filter_type type, const u8 *addr, 3179 int queue) 3180 { 3181 struct net_device *dev = adapter->netdev; 3182 int index; 3183 3184 index = igc_find_mac_filter(adapter, type, addr); 3185 if (index >= 0) 3186 goto update_filter; 3187 3188 index = igc_get_avail_mac_filter_slot(adapter); 3189 if (index < 0) 3190 return -ENOSPC; 3191 3192 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", 3193 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 3194 addr, queue); 3195 3196 update_filter: 3197 igc_set_mac_filter_hw(adapter, index, type, addr, queue); 3198 return 0; 3199 } 3200 3201 /** 3202 * igc_del_mac_filter() - Delete MAC address filter 3203 * @adapter: Pointer to adapter where the filter should be deleted from 3204 * @type: MAC address filter type (source or destination) 3205 * @addr: MAC address 3206 */ 3207 static void igc_del_mac_filter(struct igc_adapter *adapter, 3208 enum igc_mac_filter_type type, const u8 *addr) 3209 { 3210 struct net_device *dev = adapter->netdev; 3211 int index; 3212 3213 index = igc_find_mac_filter(adapter, type, addr); 3214 if (index < 0) 3215 return; 3216 3217 if (index == 0) { 3218 /* If this is the default filter, we don't actually delete it. 3219 * We just reset to its default value i.e. disable queue 3220 * assignment. 3221 */ 3222 netdev_dbg(dev, "Disable default MAC filter queue assignment"); 3223 3224 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); 3225 } else { 3226 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", 3227 index, 3228 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 3229 addr); 3230 3231 igc_clear_mac_filter_hw(adapter, index); 3232 } 3233 } 3234 3235 /** 3236 * igc_add_vlan_prio_filter() - Add VLAN priority filter 3237 * @adapter: Pointer to adapter where the filter should be added 3238 * @prio: VLAN priority value 3239 * @queue: Queue number which matching frames are assigned to 3240 * 3241 * Return: 0 in case of success, negative errno code otherwise. 3242 */ 3243 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, 3244 int queue) 3245 { 3246 struct net_device *dev = adapter->netdev; 3247 struct igc_hw *hw = &adapter->hw; 3248 u32 vlanpqf; 3249 3250 vlanpqf = rd32(IGC_VLANPQF); 3251 3252 if (vlanpqf & IGC_VLANPQF_VALID(prio)) { 3253 netdev_dbg(dev, "VLAN priority filter already in use\n"); 3254 return -EEXIST; 3255 } 3256 3257 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); 3258 vlanpqf |= IGC_VLANPQF_VALID(prio); 3259 3260 wr32(IGC_VLANPQF, vlanpqf); 3261 3262 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", 3263 prio, queue); 3264 return 0; 3265 } 3266 3267 /** 3268 * igc_del_vlan_prio_filter() - Delete VLAN priority filter 3269 * @adapter: Pointer to adapter where the filter should be deleted from 3270 * @prio: VLAN priority value 3271 */ 3272 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) 3273 { 3274 struct igc_hw *hw = &adapter->hw; 3275 u32 vlanpqf; 3276 3277 vlanpqf = rd32(IGC_VLANPQF); 3278 3279 vlanpqf &= ~IGC_VLANPQF_VALID(prio); 3280 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); 3281 3282 wr32(IGC_VLANPQF, vlanpqf); 3283 3284 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", 3285 prio); 3286 } 3287 3288 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) 3289 { 3290 struct igc_hw *hw = &adapter->hw; 3291 int i; 3292 3293 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3294 u32 etqf = rd32(IGC_ETQF(i)); 3295 3296 if (!(etqf & IGC_ETQF_FILTER_ENABLE)) 3297 return i; 3298 } 3299 3300 return -1; 3301 } 3302 3303 /** 3304 * igc_add_etype_filter() - Add ethertype filter 3305 * @adapter: Pointer to adapter where the filter should be added 3306 * @etype: Ethertype value 3307 * @queue: If non-negative, queue assignment feature is enabled and frames 3308 * matching the filter are enqueued onto 'queue'. Otherwise, queue 3309 * assignment is disabled. 3310 * 3311 * Return: 0 in case of success, negative errno code otherwise. 3312 */ 3313 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, 3314 int queue) 3315 { 3316 struct igc_hw *hw = &adapter->hw; 3317 int index; 3318 u32 etqf; 3319 3320 index = igc_get_avail_etype_filter_slot(adapter); 3321 if (index < 0) 3322 return -ENOSPC; 3323 3324 etqf = rd32(IGC_ETQF(index)); 3325 3326 etqf &= ~IGC_ETQF_ETYPE_MASK; 3327 etqf |= etype; 3328 3329 if (queue >= 0) { 3330 etqf &= ~IGC_ETQF_QUEUE_MASK; 3331 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); 3332 etqf |= IGC_ETQF_QUEUE_ENABLE; 3333 } 3334 3335 etqf |= IGC_ETQF_FILTER_ENABLE; 3336 3337 wr32(IGC_ETQF(index), etqf); 3338 3339 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", 3340 etype, queue); 3341 return 0; 3342 } 3343 3344 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) 3345 { 3346 struct igc_hw *hw = &adapter->hw; 3347 int i; 3348 3349 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3350 u32 etqf = rd32(IGC_ETQF(i)); 3351 3352 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) 3353 return i; 3354 } 3355 3356 return -1; 3357 } 3358 3359 /** 3360 * igc_del_etype_filter() - Delete ethertype filter 3361 * @adapter: Pointer to adapter where the filter should be deleted from 3362 * @etype: Ethertype value 3363 */ 3364 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) 3365 { 3366 struct igc_hw *hw = &adapter->hw; 3367 int index; 3368 3369 index = igc_find_etype_filter(adapter, etype); 3370 if (index < 0) 3371 return; 3372 3373 wr32(IGC_ETQF(index), 0); 3374 3375 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", 3376 etype); 3377 } 3378 3379 static int igc_flex_filter_select(struct igc_adapter *adapter, 3380 struct igc_flex_filter *input, 3381 u32 *fhft) 3382 { 3383 struct igc_hw *hw = &adapter->hw; 3384 u8 fhft_index; 3385 u32 fhftsl; 3386 3387 if (input->index >= MAX_FLEX_FILTER) { 3388 dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); 3389 return -EINVAL; 3390 } 3391 3392 /* Indirect table select register */ 3393 fhftsl = rd32(IGC_FHFTSL); 3394 fhftsl &= ~IGC_FHFTSL_FTSL_MASK; 3395 switch (input->index) { 3396 case 0 ... 7: 3397 fhftsl |= 0x00; 3398 break; 3399 case 8 ... 15: 3400 fhftsl |= 0x01; 3401 break; 3402 case 16 ... 23: 3403 fhftsl |= 0x02; 3404 break; 3405 case 24 ... 31: 3406 fhftsl |= 0x03; 3407 break; 3408 } 3409 wr32(IGC_FHFTSL, fhftsl); 3410 3411 /* Normalize index down to host table register */ 3412 fhft_index = input->index % 8; 3413 3414 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : 3415 IGC_FHFT_EXT(fhft_index - 4); 3416 3417 return 0; 3418 } 3419 3420 static int igc_write_flex_filter_ll(struct igc_adapter *adapter, 3421 struct igc_flex_filter *input) 3422 { 3423 struct device *dev = &adapter->pdev->dev; 3424 struct igc_hw *hw = &adapter->hw; 3425 u8 *data = input->data; 3426 u8 *mask = input->mask; 3427 u32 queuing; 3428 u32 fhft; 3429 u32 wufc; 3430 int ret; 3431 int i; 3432 3433 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail 3434 * out early to avoid surprises later. 3435 */ 3436 if (input->length % 8 != 0) { 3437 dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); 3438 return -EINVAL; 3439 } 3440 3441 /* Select corresponding flex filter register and get base for host table. */ 3442 ret = igc_flex_filter_select(adapter, input, &fhft); 3443 if (ret) 3444 return ret; 3445 3446 /* When adding a filter globally disable flex filter feature. That is 3447 * recommended within the datasheet. 3448 */ 3449 wufc = rd32(IGC_WUFC); 3450 wufc &= ~IGC_WUFC_FLEX_HQ; 3451 wr32(IGC_WUFC, wufc); 3452 3453 /* Configure filter */ 3454 queuing = input->length & IGC_FHFT_LENGTH_MASK; 3455 queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue); 3456 queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio); 3457 3458 if (input->immediate_irq) 3459 queuing |= IGC_FHFT_IMM_INT; 3460 3461 if (input->drop) 3462 queuing |= IGC_FHFT_DROP; 3463 3464 wr32(fhft + 0xFC, queuing); 3465 3466 /* Write data (128 byte) and mask (128 bit) */ 3467 for (i = 0; i < 16; ++i) { 3468 const size_t data_idx = i * 8; 3469 const size_t row_idx = i * 16; 3470 u32 dw0 = 3471 (data[data_idx + 0] << 0) | 3472 (data[data_idx + 1] << 8) | 3473 (data[data_idx + 2] << 16) | 3474 (data[data_idx + 3] << 24); 3475 u32 dw1 = 3476 (data[data_idx + 4] << 0) | 3477 (data[data_idx + 5] << 8) | 3478 (data[data_idx + 6] << 16) | 3479 (data[data_idx + 7] << 24); 3480 u32 tmp; 3481 3482 /* Write row: dw0, dw1 and mask */ 3483 wr32(fhft + row_idx, dw0); 3484 wr32(fhft + row_idx + 4, dw1); 3485 3486 /* mask is only valid for MASK(7, 0) */ 3487 tmp = rd32(fhft + row_idx + 8); 3488 tmp &= ~GENMASK(7, 0); 3489 tmp |= mask[i]; 3490 wr32(fhft + row_idx + 8, tmp); 3491 } 3492 3493 /* Enable filter. */ 3494 wufc |= IGC_WUFC_FLEX_HQ; 3495 if (input->index > 8) { 3496 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ 3497 u32 wufc_ext = rd32(IGC_WUFC_EXT); 3498 3499 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); 3500 3501 wr32(IGC_WUFC_EXT, wufc_ext); 3502 } else { 3503 wufc |= (IGC_WUFC_FLX0 << input->index); 3504 } 3505 wr32(IGC_WUFC, wufc); 3506 3507 dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", 3508 input->index); 3509 3510 return 0; 3511 } 3512 3513 static void igc_flex_filter_add_field(struct igc_flex_filter *flex, 3514 const void *src, unsigned int offset, 3515 size_t len, const void *mask) 3516 { 3517 int i; 3518 3519 /* data */ 3520 memcpy(&flex->data[offset], src, len); 3521 3522 /* mask */ 3523 for (i = 0; i < len; ++i) { 3524 const unsigned int idx = i + offset; 3525 const u8 *ptr = mask; 3526 3527 if (mask) { 3528 if (ptr[i] & 0xff) 3529 flex->mask[idx / 8] |= BIT(idx % 8); 3530 3531 continue; 3532 } 3533 3534 flex->mask[idx / 8] |= BIT(idx % 8); 3535 } 3536 } 3537 3538 static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) 3539 { 3540 struct igc_hw *hw = &adapter->hw; 3541 u32 wufc, wufc_ext; 3542 int i; 3543 3544 wufc = rd32(IGC_WUFC); 3545 wufc_ext = rd32(IGC_WUFC_EXT); 3546 3547 for (i = 0; i < MAX_FLEX_FILTER; i++) { 3548 if (i < 8) { 3549 if (!(wufc & (IGC_WUFC_FLX0 << i))) 3550 return i; 3551 } else { 3552 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) 3553 return i; 3554 } 3555 } 3556 3557 return -ENOSPC; 3558 } 3559 3560 static bool igc_flex_filter_in_use(struct igc_adapter *adapter) 3561 { 3562 struct igc_hw *hw = &adapter->hw; 3563 u32 wufc, wufc_ext; 3564 3565 wufc = rd32(IGC_WUFC); 3566 wufc_ext = rd32(IGC_WUFC_EXT); 3567 3568 if (wufc & IGC_WUFC_FILTER_MASK) 3569 return true; 3570 3571 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) 3572 return true; 3573 3574 return false; 3575 } 3576 3577 static int igc_add_flex_filter(struct igc_adapter *adapter, 3578 struct igc_nfc_rule *rule) 3579 { 3580 struct igc_flex_filter flex = { }; 3581 struct igc_nfc_filter *filter = &rule->filter; 3582 unsigned int eth_offset, user_offset; 3583 int ret, index; 3584 bool vlan; 3585 3586 index = igc_find_avail_flex_filter_slot(adapter); 3587 if (index < 0) 3588 return -ENOSPC; 3589 3590 /* Construct the flex filter: 3591 * -> dest_mac [6] 3592 * -> src_mac [6] 3593 * -> tpid [2] 3594 * -> vlan tci [2] 3595 * -> ether type [2] 3596 * -> user data [8] 3597 * -> = 26 bytes => 32 length 3598 */ 3599 flex.index = index; 3600 flex.length = 32; 3601 flex.rx_queue = rule->action; 3602 3603 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; 3604 eth_offset = vlan ? 16 : 12; 3605 user_offset = vlan ? 18 : 14; 3606 3607 /* Add destination MAC */ 3608 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3609 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, 3610 ETH_ALEN, NULL); 3611 3612 /* Add source MAC */ 3613 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3614 igc_flex_filter_add_field(&flex, &filter->src_addr, 6, 3615 ETH_ALEN, NULL); 3616 3617 /* Add VLAN etype */ 3618 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) 3619 igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, 3620 sizeof(filter->vlan_etype), 3621 NULL); 3622 3623 /* Add VLAN TCI */ 3624 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) 3625 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, 3626 sizeof(filter->vlan_tci), NULL); 3627 3628 /* Add Ether type */ 3629 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3630 __be16 etype = cpu_to_be16(filter->etype); 3631 3632 igc_flex_filter_add_field(&flex, &etype, eth_offset, 3633 sizeof(etype), NULL); 3634 } 3635 3636 /* Add user data */ 3637 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) 3638 igc_flex_filter_add_field(&flex, &filter->user_data, 3639 user_offset, 3640 sizeof(filter->user_data), 3641 filter->user_mask); 3642 3643 /* Add it down to the hardware and enable it. */ 3644 ret = igc_write_flex_filter_ll(adapter, &flex); 3645 if (ret) 3646 return ret; 3647 3648 filter->flex_index = index; 3649 3650 return 0; 3651 } 3652 3653 static void igc_del_flex_filter(struct igc_adapter *adapter, 3654 u16 reg_index) 3655 { 3656 struct igc_hw *hw = &adapter->hw; 3657 u32 wufc; 3658 3659 /* Just disable the filter. The filter table itself is kept 3660 * intact. Another flex_filter_add() should override the "old" data 3661 * then. 3662 */ 3663 if (reg_index > 8) { 3664 u32 wufc_ext = rd32(IGC_WUFC_EXT); 3665 3666 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); 3667 wr32(IGC_WUFC_EXT, wufc_ext); 3668 } else { 3669 wufc = rd32(IGC_WUFC); 3670 3671 wufc &= ~(IGC_WUFC_FLX0 << reg_index); 3672 wr32(IGC_WUFC, wufc); 3673 } 3674 3675 if (igc_flex_filter_in_use(adapter)) 3676 return; 3677 3678 /* No filters are in use, we may disable flex filters */ 3679 wufc = rd32(IGC_WUFC); 3680 wufc &= ~IGC_WUFC_FLEX_HQ; 3681 wr32(IGC_WUFC, wufc); 3682 } 3683 3684 static int igc_enable_nfc_rule(struct igc_adapter *adapter, 3685 struct igc_nfc_rule *rule) 3686 { 3687 int err; 3688 3689 if (rule->flex) { 3690 return igc_add_flex_filter(adapter, rule); 3691 } 3692 3693 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3694 err = igc_add_etype_filter(adapter, rule->filter.etype, 3695 rule->action); 3696 if (err) 3697 return err; 3698 } 3699 3700 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { 3701 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3702 rule->filter.src_addr, rule->action); 3703 if (err) 3704 return err; 3705 } 3706 3707 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { 3708 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3709 rule->filter.dst_addr, rule->action); 3710 if (err) 3711 return err; 3712 } 3713 3714 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3715 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); 3716 3717 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); 3718 if (err) 3719 return err; 3720 } 3721 3722 return 0; 3723 } 3724 3725 static void igc_disable_nfc_rule(struct igc_adapter *adapter, 3726 const struct igc_nfc_rule *rule) 3727 { 3728 if (rule->flex) { 3729 igc_del_flex_filter(adapter, rule->filter.flex_index); 3730 return; 3731 } 3732 3733 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) 3734 igc_del_etype_filter(adapter, rule->filter.etype); 3735 3736 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3737 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); 3738 3739 igc_del_vlan_prio_filter(adapter, prio); 3740 } 3741 3742 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3743 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3744 rule->filter.src_addr); 3745 3746 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3747 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3748 rule->filter.dst_addr); 3749 } 3750 3751 /** 3752 * igc_get_nfc_rule() - Get NFC rule 3753 * @adapter: Pointer to adapter 3754 * @location: Rule location 3755 * 3756 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3757 * 3758 * Return: Pointer to NFC rule at @location. If not found, NULL. 3759 */ 3760 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, 3761 u32 location) 3762 { 3763 struct igc_nfc_rule *rule; 3764 3765 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { 3766 if (rule->location == location) 3767 return rule; 3768 if (rule->location > location) 3769 break; 3770 } 3771 3772 return NULL; 3773 } 3774 3775 /** 3776 * igc_del_nfc_rule() - Delete NFC rule 3777 * @adapter: Pointer to adapter 3778 * @rule: Pointer to rule to be deleted 3779 * 3780 * Disable NFC rule in hardware and delete it from adapter. 3781 * 3782 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3783 */ 3784 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3785 { 3786 igc_disable_nfc_rule(adapter, rule); 3787 3788 list_del(&rule->list); 3789 adapter->nfc_rule_count--; 3790 3791 kfree(rule); 3792 } 3793 3794 static void igc_flush_nfc_rules(struct igc_adapter *adapter) 3795 { 3796 struct igc_nfc_rule *rule, *tmp; 3797 3798 mutex_lock(&adapter->nfc_rule_lock); 3799 3800 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) 3801 igc_del_nfc_rule(adapter, rule); 3802 3803 mutex_unlock(&adapter->nfc_rule_lock); 3804 } 3805 3806 /** 3807 * igc_add_nfc_rule() - Add NFC rule 3808 * @adapter: Pointer to adapter 3809 * @rule: Pointer to rule to be added 3810 * 3811 * Enable NFC rule in hardware and add it to adapter. 3812 * 3813 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3814 * 3815 * Return: 0 on success, negative errno on failure. 3816 */ 3817 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3818 { 3819 struct igc_nfc_rule *pred, *cur; 3820 int err; 3821 3822 err = igc_enable_nfc_rule(adapter, rule); 3823 if (err) 3824 return err; 3825 3826 pred = NULL; 3827 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { 3828 if (cur->location >= rule->location) 3829 break; 3830 pred = cur; 3831 } 3832 3833 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); 3834 adapter->nfc_rule_count++; 3835 return 0; 3836 } 3837 3838 static void igc_restore_nfc_rules(struct igc_adapter *adapter) 3839 { 3840 struct igc_nfc_rule *rule; 3841 3842 mutex_lock(&adapter->nfc_rule_lock); 3843 3844 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) 3845 igc_enable_nfc_rule(adapter, rule); 3846 3847 mutex_unlock(&adapter->nfc_rule_lock); 3848 } 3849 3850 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) 3851 { 3852 struct igc_adapter *adapter = netdev_priv(netdev); 3853 3854 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); 3855 } 3856 3857 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) 3858 { 3859 struct igc_adapter *adapter = netdev_priv(netdev); 3860 3861 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); 3862 return 0; 3863 } 3864 3865 /** 3866 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 3867 * @netdev: network interface device structure 3868 * 3869 * The set_rx_mode entry point is called whenever the unicast or multicast 3870 * address lists or the network interface flags are updated. This routine is 3871 * responsible for configuring the hardware for proper unicast, multicast, 3872 * promiscuous mode, and all-multi behavior. 3873 */ 3874 static void igc_set_rx_mode(struct net_device *netdev) 3875 { 3876 struct igc_adapter *adapter = netdev_priv(netdev); 3877 struct igc_hw *hw = &adapter->hw; 3878 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; 3879 int count; 3880 3881 /* Check for Promiscuous and All Multicast modes */ 3882 if (netdev->flags & IFF_PROMISC) { 3883 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; 3884 } else { 3885 if (netdev->flags & IFF_ALLMULTI) { 3886 rctl |= IGC_RCTL_MPE; 3887 } else { 3888 /* Write addresses to the MTA, if the attempt fails 3889 * then we should just turn on promiscuous mode so 3890 * that we can at least receive multicast traffic 3891 */ 3892 count = igc_write_mc_addr_list(netdev); 3893 if (count < 0) 3894 rctl |= IGC_RCTL_MPE; 3895 } 3896 } 3897 3898 /* Write addresses to available RAR registers, if there is not 3899 * sufficient space to store all the addresses then enable 3900 * unicast promiscuous mode 3901 */ 3902 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) 3903 rctl |= IGC_RCTL_UPE; 3904 3905 /* update state of unicast and multicast */ 3906 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 3907 wr32(IGC_RCTL, rctl); 3908 3909 #if (PAGE_SIZE < 8192) 3910 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) 3911 rlpml = IGC_MAX_FRAME_BUILD_SKB; 3912 #endif 3913 wr32(IGC_RLPML, rlpml); 3914 } 3915 3916 /** 3917 * igc_configure - configure the hardware for RX and TX 3918 * @adapter: private board structure 3919 */ 3920 static void igc_configure(struct igc_adapter *adapter) 3921 { 3922 struct net_device *netdev = adapter->netdev; 3923 int i = 0; 3924 3925 igc_get_hw_control(adapter); 3926 igc_set_rx_mode(netdev); 3927 3928 igc_restore_vlan(adapter); 3929 3930 igc_setup_tctl(adapter); 3931 igc_setup_mrqc(adapter); 3932 igc_setup_rctl(adapter); 3933 3934 igc_set_default_mac_filter(adapter); 3935 igc_restore_nfc_rules(adapter); 3936 3937 igc_configure_tx(adapter); 3938 igc_configure_rx(adapter); 3939 3940 igc_rx_fifo_flush_base(&adapter->hw); 3941 3942 /* call igc_desc_unused which always leaves 3943 * at least 1 descriptor unused to make sure 3944 * next_to_use != next_to_clean 3945 */ 3946 for (i = 0; i < adapter->num_rx_queues; i++) { 3947 struct igc_ring *ring = adapter->rx_ring[i]; 3948 3949 if (ring->xsk_pool) 3950 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 3951 else 3952 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 3953 } 3954 } 3955 3956 /** 3957 * igc_write_ivar - configure ivar for given MSI-X vector 3958 * @hw: pointer to the HW structure 3959 * @msix_vector: vector number we are allocating to a given ring 3960 * @index: row index of IVAR register to write within IVAR table 3961 * @offset: column offset of in IVAR, should be multiple of 8 3962 * 3963 * The IVAR table consists of 2 columns, 3964 * each containing an cause allocation for an Rx and Tx ring, and a 3965 * variable number of rows depending on the number of queues supported. 3966 */ 3967 static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 3968 int index, int offset) 3969 { 3970 u32 ivar = array_rd32(IGC_IVAR0, index); 3971 3972 /* clear any bits that are currently set */ 3973 ivar &= ~((u32)0xFF << offset); 3974 3975 /* write vector and valid bit */ 3976 ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 3977 3978 array_wr32(IGC_IVAR0, index, ivar); 3979 } 3980 3981 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 3982 { 3983 struct igc_adapter *adapter = q_vector->adapter; 3984 struct igc_hw *hw = &adapter->hw; 3985 int rx_queue = IGC_N0_QUEUE; 3986 int tx_queue = IGC_N0_QUEUE; 3987 3988 if (q_vector->rx.ring) 3989 rx_queue = q_vector->rx.ring->reg_idx; 3990 if (q_vector->tx.ring) 3991 tx_queue = q_vector->tx.ring->reg_idx; 3992 3993 switch (hw->mac.type) { 3994 case igc_i225: 3995 if (rx_queue > IGC_N0_QUEUE) 3996 igc_write_ivar(hw, msix_vector, 3997 rx_queue >> 1, 3998 (rx_queue & 0x1) << 4); 3999 if (tx_queue > IGC_N0_QUEUE) 4000 igc_write_ivar(hw, msix_vector, 4001 tx_queue >> 1, 4002 ((tx_queue & 0x1) << 4) + 8); 4003 q_vector->eims_value = BIT(msix_vector); 4004 break; 4005 default: 4006 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 4007 break; 4008 } 4009 4010 /* add q_vector eims value to global eims_enable_mask */ 4011 adapter->eims_enable_mask |= q_vector->eims_value; 4012 4013 /* configure q_vector to set itr on first interrupt */ 4014 q_vector->set_itr = 1; 4015 } 4016 4017 /** 4018 * igc_configure_msix - Configure MSI-X hardware 4019 * @adapter: Pointer to adapter structure 4020 * 4021 * igc_configure_msix sets up the hardware to properly 4022 * generate MSI-X interrupts. 4023 */ 4024 static void igc_configure_msix(struct igc_adapter *adapter) 4025 { 4026 struct igc_hw *hw = &adapter->hw; 4027 int i, vector = 0; 4028 u32 tmp; 4029 4030 adapter->eims_enable_mask = 0; 4031 4032 /* set vector for other causes, i.e. link changes */ 4033 switch (hw->mac.type) { 4034 case igc_i225: 4035 /* Turn on MSI-X capability first, or our settings 4036 * won't stick. And it will take days to debug. 4037 */ 4038 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 4039 IGC_GPIE_PBA | IGC_GPIE_EIAME | 4040 IGC_GPIE_NSICR); 4041 4042 /* enable msix_other interrupt */ 4043 adapter->eims_other = BIT(vector); 4044 tmp = (vector++ | IGC_IVAR_VALID) << 8; 4045 4046 wr32(IGC_IVAR_MISC, tmp); 4047 break; 4048 default: 4049 /* do nothing, since nothing else supports MSI-X */ 4050 break; 4051 } /* switch (hw->mac.type) */ 4052 4053 adapter->eims_enable_mask |= adapter->eims_other; 4054 4055 for (i = 0; i < adapter->num_q_vectors; i++) 4056 igc_assign_vector(adapter->q_vector[i], vector++); 4057 4058 wrfl(); 4059 } 4060 4061 /** 4062 * igc_irq_enable - Enable default interrupt generation settings 4063 * @adapter: board private structure 4064 */ 4065 static void igc_irq_enable(struct igc_adapter *adapter) 4066 { 4067 struct igc_hw *hw = &adapter->hw; 4068 4069 if (adapter->msix_entries) { 4070 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 4071 u32 regval = rd32(IGC_EIAC); 4072 4073 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 4074 regval = rd32(IGC_EIAM); 4075 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 4076 wr32(IGC_EIMS, adapter->eims_enable_mask); 4077 wr32(IGC_IMS, ims); 4078 } else { 4079 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 4080 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 4081 } 4082 } 4083 4084 /** 4085 * igc_irq_disable - Mask off interrupt generation on the NIC 4086 * @adapter: board private structure 4087 */ 4088 static void igc_irq_disable(struct igc_adapter *adapter) 4089 { 4090 struct igc_hw *hw = &adapter->hw; 4091 4092 if (adapter->msix_entries) { 4093 u32 regval = rd32(IGC_EIAM); 4094 4095 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 4096 wr32(IGC_EIMC, adapter->eims_enable_mask); 4097 regval = rd32(IGC_EIAC); 4098 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 4099 } 4100 4101 wr32(IGC_IAM, 0); 4102 wr32(IGC_IMC, ~0); 4103 wrfl(); 4104 4105 if (adapter->msix_entries) { 4106 int vector = 0, i; 4107 4108 synchronize_irq(adapter->msix_entries[vector++].vector); 4109 4110 for (i = 0; i < adapter->num_q_vectors; i++) 4111 synchronize_irq(adapter->msix_entries[vector++].vector); 4112 } else { 4113 synchronize_irq(adapter->pdev->irq); 4114 } 4115 } 4116 4117 void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 4118 const u32 max_rss_queues) 4119 { 4120 /* Determine if we need to pair queues. */ 4121 /* If rss_queues > half of max_rss_queues, pair the queues in 4122 * order to conserve interrupts due to limited supply. 4123 */ 4124 if (adapter->rss_queues > (max_rss_queues / 2)) 4125 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 4126 else 4127 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 4128 } 4129 4130 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 4131 { 4132 return IGC_MAX_RX_QUEUES; 4133 } 4134 4135 static void igc_init_queue_configuration(struct igc_adapter *adapter) 4136 { 4137 u32 max_rss_queues; 4138 4139 max_rss_queues = igc_get_max_rss_queues(adapter); 4140 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 4141 4142 igc_set_flag_queue_pairs(adapter, max_rss_queues); 4143 } 4144 4145 /** 4146 * igc_reset_q_vector - Reset config for interrupt vector 4147 * @adapter: board private structure to initialize 4148 * @v_idx: Index of vector to be reset 4149 * 4150 * If NAPI is enabled it will delete any references to the 4151 * NAPI struct. This is preparation for igc_free_q_vector. 4152 */ 4153 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 4154 { 4155 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 4156 4157 /* if we're coming from igc_set_interrupt_capability, the vectors are 4158 * not yet allocated 4159 */ 4160 if (!q_vector) 4161 return; 4162 4163 if (q_vector->tx.ring) 4164 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 4165 4166 if (q_vector->rx.ring) 4167 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 4168 4169 netif_napi_del(&q_vector->napi); 4170 } 4171 4172 /** 4173 * igc_free_q_vector - Free memory allocated for specific interrupt vector 4174 * @adapter: board private structure to initialize 4175 * @v_idx: Index of vector to be freed 4176 * 4177 * This function frees the memory allocated to the q_vector. 4178 */ 4179 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 4180 { 4181 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 4182 4183 adapter->q_vector[v_idx] = NULL; 4184 4185 /* igc_get_stats64() might access the rings on this vector, 4186 * we must wait a grace period before freeing it. 4187 */ 4188 if (q_vector) 4189 kfree_rcu(q_vector, rcu); 4190 } 4191 4192 /** 4193 * igc_free_q_vectors - Free memory allocated for interrupt vectors 4194 * @adapter: board private structure to initialize 4195 * 4196 * This function frees the memory allocated to the q_vectors. In addition if 4197 * NAPI is enabled it will delete any references to the NAPI struct prior 4198 * to freeing the q_vector. 4199 */ 4200 static void igc_free_q_vectors(struct igc_adapter *adapter) 4201 { 4202 int v_idx = adapter->num_q_vectors; 4203 4204 adapter->num_tx_queues = 0; 4205 adapter->num_rx_queues = 0; 4206 adapter->num_q_vectors = 0; 4207 4208 while (v_idx--) { 4209 igc_reset_q_vector(adapter, v_idx); 4210 igc_free_q_vector(adapter, v_idx); 4211 } 4212 } 4213 4214 /** 4215 * igc_update_itr - update the dynamic ITR value based on statistics 4216 * @q_vector: pointer to q_vector 4217 * @ring_container: ring info to update the itr for 4218 * 4219 * Stores a new ITR value based on packets and byte 4220 * counts during the last interrupt. The advantage of per interrupt 4221 * computation is faster updates and more accurate ITR for the current 4222 * traffic pattern. Constants in this function were computed 4223 * based on theoretical maximum wire speed and thresholds were set based 4224 * on testing data as well as attempting to minimize response time 4225 * while increasing bulk throughput. 4226 * NOTE: These calculations are only valid when operating in a single- 4227 * queue environment. 4228 */ 4229 static void igc_update_itr(struct igc_q_vector *q_vector, 4230 struct igc_ring_container *ring_container) 4231 { 4232 unsigned int packets = ring_container->total_packets; 4233 unsigned int bytes = ring_container->total_bytes; 4234 u8 itrval = ring_container->itr; 4235 4236 /* no packets, exit with status unchanged */ 4237 if (packets == 0) 4238 return; 4239 4240 switch (itrval) { 4241 case lowest_latency: 4242 /* handle TSO and jumbo frames */ 4243 if (bytes / packets > 8000) 4244 itrval = bulk_latency; 4245 else if ((packets < 5) && (bytes > 512)) 4246 itrval = low_latency; 4247 break; 4248 case low_latency: /* 50 usec aka 20000 ints/s */ 4249 if (bytes > 10000) { 4250 /* this if handles the TSO accounting */ 4251 if (bytes / packets > 8000) 4252 itrval = bulk_latency; 4253 else if ((packets < 10) || ((bytes / packets) > 1200)) 4254 itrval = bulk_latency; 4255 else if ((packets > 35)) 4256 itrval = lowest_latency; 4257 } else if (bytes / packets > 2000) { 4258 itrval = bulk_latency; 4259 } else if (packets <= 2 && bytes < 512) { 4260 itrval = lowest_latency; 4261 } 4262 break; 4263 case bulk_latency: /* 250 usec aka 4000 ints/s */ 4264 if (bytes > 25000) { 4265 if (packets > 35) 4266 itrval = low_latency; 4267 } else if (bytes < 1500) { 4268 itrval = low_latency; 4269 } 4270 break; 4271 } 4272 4273 /* clear work counters since we have the values we need */ 4274 ring_container->total_bytes = 0; 4275 ring_container->total_packets = 0; 4276 4277 /* write updated itr to ring container */ 4278 ring_container->itr = itrval; 4279 } 4280 4281 static void igc_set_itr(struct igc_q_vector *q_vector) 4282 { 4283 struct igc_adapter *adapter = q_vector->adapter; 4284 u32 new_itr = q_vector->itr_val; 4285 u8 current_itr = 0; 4286 4287 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 4288 switch (adapter->link_speed) { 4289 case SPEED_10: 4290 case SPEED_100: 4291 current_itr = 0; 4292 new_itr = IGC_4K_ITR; 4293 goto set_itr_now; 4294 default: 4295 break; 4296 } 4297 4298 igc_update_itr(q_vector, &q_vector->tx); 4299 igc_update_itr(q_vector, &q_vector->rx); 4300 4301 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 4302 4303 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 4304 if (current_itr == lowest_latency && 4305 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 4306 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 4307 current_itr = low_latency; 4308 4309 switch (current_itr) { 4310 /* counts and packets in update_itr are dependent on these numbers */ 4311 case lowest_latency: 4312 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 4313 break; 4314 case low_latency: 4315 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 4316 break; 4317 case bulk_latency: 4318 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 4319 break; 4320 default: 4321 break; 4322 } 4323 4324 set_itr_now: 4325 if (new_itr != q_vector->itr_val) { 4326 /* this attempts to bias the interrupt rate towards Bulk 4327 * by adding intermediate steps when interrupt rate is 4328 * increasing 4329 */ 4330 new_itr = new_itr > q_vector->itr_val ? 4331 max((new_itr * q_vector->itr_val) / 4332 (new_itr + (q_vector->itr_val >> 2)), 4333 new_itr) : new_itr; 4334 /* Don't write the value here; it resets the adapter's 4335 * internal timer, and causes us to delay far longer than 4336 * we should between interrupts. Instead, we write the ITR 4337 * value at the beginning of the next interrupt so the timing 4338 * ends up being correct. 4339 */ 4340 q_vector->itr_val = new_itr; 4341 q_vector->set_itr = 1; 4342 } 4343 } 4344 4345 static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 4346 { 4347 int v_idx = adapter->num_q_vectors; 4348 4349 if (adapter->msix_entries) { 4350 pci_disable_msix(adapter->pdev); 4351 kfree(adapter->msix_entries); 4352 adapter->msix_entries = NULL; 4353 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 4354 pci_disable_msi(adapter->pdev); 4355 } 4356 4357 while (v_idx--) 4358 igc_reset_q_vector(adapter, v_idx); 4359 } 4360 4361 /** 4362 * igc_set_interrupt_capability - set MSI or MSI-X if supported 4363 * @adapter: Pointer to adapter structure 4364 * @msix: boolean value for MSI-X capability 4365 * 4366 * Attempt to configure interrupts using the best available 4367 * capabilities of the hardware and kernel. 4368 */ 4369 static void igc_set_interrupt_capability(struct igc_adapter *adapter, 4370 bool msix) 4371 { 4372 int numvecs, i; 4373 int err; 4374 4375 if (!msix) 4376 goto msi_only; 4377 adapter->flags |= IGC_FLAG_HAS_MSIX; 4378 4379 /* Number of supported queues. */ 4380 adapter->num_rx_queues = adapter->rss_queues; 4381 4382 adapter->num_tx_queues = adapter->rss_queues; 4383 4384 /* start with one vector for every Rx queue */ 4385 numvecs = adapter->num_rx_queues; 4386 4387 /* if Tx handler is separate add 1 for every Tx queue */ 4388 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 4389 numvecs += adapter->num_tx_queues; 4390 4391 /* store the number of vectors reserved for queues */ 4392 adapter->num_q_vectors = numvecs; 4393 4394 /* add 1 vector for link status interrupts */ 4395 numvecs++; 4396 4397 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 4398 GFP_KERNEL); 4399 4400 if (!adapter->msix_entries) 4401 return; 4402 4403 /* populate entry values */ 4404 for (i = 0; i < numvecs; i++) 4405 adapter->msix_entries[i].entry = i; 4406 4407 err = pci_enable_msix_range(adapter->pdev, 4408 adapter->msix_entries, 4409 numvecs, 4410 numvecs); 4411 if (err > 0) 4412 return; 4413 4414 kfree(adapter->msix_entries); 4415 adapter->msix_entries = NULL; 4416 4417 igc_reset_interrupt_capability(adapter); 4418 4419 msi_only: 4420 adapter->flags &= ~IGC_FLAG_HAS_MSIX; 4421 4422 adapter->rss_queues = 1; 4423 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 4424 adapter->num_rx_queues = 1; 4425 adapter->num_tx_queues = 1; 4426 adapter->num_q_vectors = 1; 4427 if (!pci_enable_msi(adapter->pdev)) 4428 adapter->flags |= IGC_FLAG_HAS_MSI; 4429 } 4430 4431 /** 4432 * igc_update_ring_itr - update the dynamic ITR value based on packet size 4433 * @q_vector: pointer to q_vector 4434 * 4435 * Stores a new ITR value based on strictly on packet size. This 4436 * algorithm is less sophisticated than that used in igc_update_itr, 4437 * due to the difficulty of synchronizing statistics across multiple 4438 * receive rings. The divisors and thresholds used by this function 4439 * were determined based on theoretical maximum wire speed and testing 4440 * data, in order to minimize response time while increasing bulk 4441 * throughput. 4442 * NOTE: This function is called only when operating in a multiqueue 4443 * receive environment. 4444 */ 4445 static void igc_update_ring_itr(struct igc_q_vector *q_vector) 4446 { 4447 struct igc_adapter *adapter = q_vector->adapter; 4448 int new_val = q_vector->itr_val; 4449 int avg_wire_size = 0; 4450 unsigned int packets; 4451 4452 /* For non-gigabit speeds, just fix the interrupt rate at 4000 4453 * ints/sec - ITR timer value of 120 ticks. 4454 */ 4455 switch (adapter->link_speed) { 4456 case SPEED_10: 4457 case SPEED_100: 4458 new_val = IGC_4K_ITR; 4459 goto set_itr_val; 4460 default: 4461 break; 4462 } 4463 4464 packets = q_vector->rx.total_packets; 4465 if (packets) 4466 avg_wire_size = q_vector->rx.total_bytes / packets; 4467 4468 packets = q_vector->tx.total_packets; 4469 if (packets) 4470 avg_wire_size = max_t(u32, avg_wire_size, 4471 q_vector->tx.total_bytes / packets); 4472 4473 /* if avg_wire_size isn't set no work was done */ 4474 if (!avg_wire_size) 4475 goto clear_counts; 4476 4477 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 4478 avg_wire_size += 24; 4479 4480 /* Don't starve jumbo frames */ 4481 avg_wire_size = min(avg_wire_size, 3000); 4482 4483 /* Give a little boost to mid-size frames */ 4484 if (avg_wire_size > 300 && avg_wire_size < 1200) 4485 new_val = avg_wire_size / 3; 4486 else 4487 new_val = avg_wire_size / 2; 4488 4489 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 4490 if (new_val < IGC_20K_ITR && 4491 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 4492 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 4493 new_val = IGC_20K_ITR; 4494 4495 set_itr_val: 4496 if (new_val != q_vector->itr_val) { 4497 q_vector->itr_val = new_val; 4498 q_vector->set_itr = 1; 4499 } 4500 clear_counts: 4501 q_vector->rx.total_bytes = 0; 4502 q_vector->rx.total_packets = 0; 4503 q_vector->tx.total_bytes = 0; 4504 q_vector->tx.total_packets = 0; 4505 } 4506 4507 static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 4508 { 4509 struct igc_adapter *adapter = q_vector->adapter; 4510 struct igc_hw *hw = &adapter->hw; 4511 4512 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 4513 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 4514 if (adapter->num_q_vectors == 1) 4515 igc_set_itr(q_vector); 4516 else 4517 igc_update_ring_itr(q_vector); 4518 } 4519 4520 if (!test_bit(__IGC_DOWN, &adapter->state)) { 4521 if (adapter->msix_entries) 4522 wr32(IGC_EIMS, q_vector->eims_value); 4523 else 4524 igc_irq_enable(adapter); 4525 } 4526 } 4527 4528 static void igc_add_ring(struct igc_ring *ring, 4529 struct igc_ring_container *head) 4530 { 4531 head->ring = ring; 4532 head->count++; 4533 } 4534 4535 /** 4536 * igc_cache_ring_register - Descriptor ring to register mapping 4537 * @adapter: board private structure to initialize 4538 * 4539 * Once we know the feature-set enabled for the device, we'll cache 4540 * the register offset the descriptor ring is assigned to. 4541 */ 4542 static void igc_cache_ring_register(struct igc_adapter *adapter) 4543 { 4544 int i = 0, j = 0; 4545 4546 switch (adapter->hw.mac.type) { 4547 case igc_i225: 4548 default: 4549 for (; i < adapter->num_rx_queues; i++) 4550 adapter->rx_ring[i]->reg_idx = i; 4551 for (; j < adapter->num_tx_queues; j++) 4552 adapter->tx_ring[j]->reg_idx = j; 4553 break; 4554 } 4555 } 4556 4557 /** 4558 * igc_poll - NAPI Rx polling callback 4559 * @napi: napi polling structure 4560 * @budget: count of how many packets we should handle 4561 */ 4562 static int igc_poll(struct napi_struct *napi, int budget) 4563 { 4564 struct igc_q_vector *q_vector = container_of(napi, 4565 struct igc_q_vector, 4566 napi); 4567 struct igc_ring *rx_ring = q_vector->rx.ring; 4568 bool clean_complete = true; 4569 int work_done = 0; 4570 4571 if (q_vector->tx.ring) 4572 clean_complete = igc_clean_tx_irq(q_vector, budget); 4573 4574 if (rx_ring) { 4575 int cleaned = rx_ring->xsk_pool ? 4576 igc_clean_rx_irq_zc(q_vector, budget) : 4577 igc_clean_rx_irq(q_vector, budget); 4578 4579 work_done += cleaned; 4580 if (cleaned >= budget) 4581 clean_complete = false; 4582 } 4583 4584 /* If all work not completed, return budget and keep polling */ 4585 if (!clean_complete) 4586 return budget; 4587 4588 /* Exit the polling mode, but don't re-enable interrupts if stack might 4589 * poll us due to busy-polling 4590 */ 4591 if (likely(napi_complete_done(napi, work_done))) 4592 igc_ring_irq_enable(q_vector); 4593 4594 return min(work_done, budget - 1); 4595 } 4596 4597 /** 4598 * igc_alloc_q_vector - Allocate memory for a single interrupt vector 4599 * @adapter: board private structure to initialize 4600 * @v_count: q_vectors allocated on adapter, used for ring interleaving 4601 * @v_idx: index of vector in adapter struct 4602 * @txr_count: total number of Tx rings to allocate 4603 * @txr_idx: index of first Tx ring to allocate 4604 * @rxr_count: total number of Rx rings to allocate 4605 * @rxr_idx: index of first Rx ring to allocate 4606 * 4607 * We allocate one q_vector. If allocation fails we return -ENOMEM. 4608 */ 4609 static int igc_alloc_q_vector(struct igc_adapter *adapter, 4610 unsigned int v_count, unsigned int v_idx, 4611 unsigned int txr_count, unsigned int txr_idx, 4612 unsigned int rxr_count, unsigned int rxr_idx) 4613 { 4614 struct igc_q_vector *q_vector; 4615 struct igc_ring *ring; 4616 int ring_count; 4617 4618 /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 4619 if (txr_count > 1 || rxr_count > 1) 4620 return -ENOMEM; 4621 4622 ring_count = txr_count + rxr_count; 4623 4624 /* allocate q_vector and rings */ 4625 q_vector = adapter->q_vector[v_idx]; 4626 if (!q_vector) 4627 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 4628 GFP_KERNEL); 4629 else 4630 memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 4631 if (!q_vector) 4632 return -ENOMEM; 4633 4634 /* initialize NAPI */ 4635 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); 4636 4637 /* tie q_vector and adapter together */ 4638 adapter->q_vector[v_idx] = q_vector; 4639 q_vector->adapter = adapter; 4640 4641 /* initialize work limits */ 4642 q_vector->tx.work_limit = adapter->tx_work_limit; 4643 4644 /* initialize ITR configuration */ 4645 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 4646 q_vector->itr_val = IGC_START_ITR; 4647 4648 /* initialize pointer to rings */ 4649 ring = q_vector->ring; 4650 4651 /* initialize ITR */ 4652 if (rxr_count) { 4653 /* rx or rx/tx vector */ 4654 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 4655 q_vector->itr_val = adapter->rx_itr_setting; 4656 } else { 4657 /* tx only vector */ 4658 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 4659 q_vector->itr_val = adapter->tx_itr_setting; 4660 } 4661 4662 if (txr_count) { 4663 /* assign generic ring traits */ 4664 ring->dev = &adapter->pdev->dev; 4665 ring->netdev = adapter->netdev; 4666 4667 /* configure backlink on ring */ 4668 ring->q_vector = q_vector; 4669 4670 /* update q_vector Tx values */ 4671 igc_add_ring(ring, &q_vector->tx); 4672 4673 /* apply Tx specific ring traits */ 4674 ring->count = adapter->tx_ring_count; 4675 ring->queue_index = txr_idx; 4676 4677 /* assign ring to adapter */ 4678 adapter->tx_ring[txr_idx] = ring; 4679 4680 /* push pointer to next ring */ 4681 ring++; 4682 } 4683 4684 if (rxr_count) { 4685 /* assign generic ring traits */ 4686 ring->dev = &adapter->pdev->dev; 4687 ring->netdev = adapter->netdev; 4688 4689 /* configure backlink on ring */ 4690 ring->q_vector = q_vector; 4691 4692 /* update q_vector Rx values */ 4693 igc_add_ring(ring, &q_vector->rx); 4694 4695 /* apply Rx specific ring traits */ 4696 ring->count = adapter->rx_ring_count; 4697 ring->queue_index = rxr_idx; 4698 4699 /* assign ring to adapter */ 4700 adapter->rx_ring[rxr_idx] = ring; 4701 } 4702 4703 return 0; 4704 } 4705 4706 /** 4707 * igc_alloc_q_vectors - Allocate memory for interrupt vectors 4708 * @adapter: board private structure to initialize 4709 * 4710 * We allocate one q_vector per queue interrupt. If allocation fails we 4711 * return -ENOMEM. 4712 */ 4713 static int igc_alloc_q_vectors(struct igc_adapter *adapter) 4714 { 4715 int rxr_remaining = adapter->num_rx_queues; 4716 int txr_remaining = adapter->num_tx_queues; 4717 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 4718 int q_vectors = adapter->num_q_vectors; 4719 int err; 4720 4721 if (q_vectors >= (rxr_remaining + txr_remaining)) { 4722 for (; rxr_remaining; v_idx++) { 4723 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4724 0, 0, 1, rxr_idx); 4725 4726 if (err) 4727 goto err_out; 4728 4729 /* update counts and index */ 4730 rxr_remaining--; 4731 rxr_idx++; 4732 } 4733 } 4734 4735 for (; v_idx < q_vectors; v_idx++) { 4736 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 4737 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 4738 4739 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4740 tqpv, txr_idx, rqpv, rxr_idx); 4741 4742 if (err) 4743 goto err_out; 4744 4745 /* update counts and index */ 4746 rxr_remaining -= rqpv; 4747 txr_remaining -= tqpv; 4748 rxr_idx++; 4749 txr_idx++; 4750 } 4751 4752 return 0; 4753 4754 err_out: 4755 adapter->num_tx_queues = 0; 4756 adapter->num_rx_queues = 0; 4757 adapter->num_q_vectors = 0; 4758 4759 while (v_idx--) 4760 igc_free_q_vector(adapter, v_idx); 4761 4762 return -ENOMEM; 4763 } 4764 4765 /** 4766 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 4767 * @adapter: Pointer to adapter structure 4768 * @msix: boolean for MSI-X capability 4769 * 4770 * This function initializes the interrupts and allocates all of the queues. 4771 */ 4772 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 4773 { 4774 struct net_device *dev = adapter->netdev; 4775 int err = 0; 4776 4777 igc_set_interrupt_capability(adapter, msix); 4778 4779 err = igc_alloc_q_vectors(adapter); 4780 if (err) { 4781 netdev_err(dev, "Unable to allocate memory for vectors\n"); 4782 goto err_alloc_q_vectors; 4783 } 4784 4785 igc_cache_ring_register(adapter); 4786 4787 return 0; 4788 4789 err_alloc_q_vectors: 4790 igc_reset_interrupt_capability(adapter); 4791 return err; 4792 } 4793 4794 /** 4795 * igc_sw_init - Initialize general software structures (struct igc_adapter) 4796 * @adapter: board private structure to initialize 4797 * 4798 * igc_sw_init initializes the Adapter private data structure. 4799 * Fields are initialized based on PCI device information and 4800 * OS network device settings (MTU size). 4801 */ 4802 static int igc_sw_init(struct igc_adapter *adapter) 4803 { 4804 struct net_device *netdev = adapter->netdev; 4805 struct pci_dev *pdev = adapter->pdev; 4806 struct igc_hw *hw = &adapter->hw; 4807 4808 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 4809 4810 /* set default ring sizes */ 4811 adapter->tx_ring_count = IGC_DEFAULT_TXD; 4812 adapter->rx_ring_count = IGC_DEFAULT_RXD; 4813 4814 /* set default ITR values */ 4815 adapter->rx_itr_setting = IGC_DEFAULT_ITR; 4816 adapter->tx_itr_setting = IGC_DEFAULT_ITR; 4817 4818 /* set default work limits */ 4819 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 4820 4821 /* adjust max frame to be at least the size of a standard frame */ 4822 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 4823 VLAN_HLEN; 4824 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 4825 4826 mutex_init(&adapter->nfc_rule_lock); 4827 INIT_LIST_HEAD(&adapter->nfc_rule_list); 4828 adapter->nfc_rule_count = 0; 4829 4830 spin_lock_init(&adapter->stats64_lock); 4831 spin_lock_init(&adapter->qbv_tx_lock); 4832 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 4833 adapter->flags |= IGC_FLAG_HAS_MSIX; 4834 4835 igc_init_queue_configuration(adapter); 4836 4837 /* This call may decrease the number of queues */ 4838 if (igc_init_interrupt_scheme(adapter, true)) { 4839 netdev_err(netdev, "Unable to allocate memory for queues\n"); 4840 return -ENOMEM; 4841 } 4842 4843 /* Explicitly disable IRQ since the NIC can be in any state. */ 4844 igc_irq_disable(adapter); 4845 4846 set_bit(__IGC_DOWN, &adapter->state); 4847 4848 return 0; 4849 } 4850 4851 /** 4852 * igc_up - Open the interface and prepare it to handle traffic 4853 * @adapter: board private structure 4854 */ 4855 void igc_up(struct igc_adapter *adapter) 4856 { 4857 struct igc_hw *hw = &adapter->hw; 4858 int i = 0; 4859 4860 /* hardware has been reset, we need to reload some things */ 4861 igc_configure(adapter); 4862 4863 clear_bit(__IGC_DOWN, &adapter->state); 4864 4865 for (i = 0; i < adapter->num_q_vectors; i++) 4866 napi_enable(&adapter->q_vector[i]->napi); 4867 4868 if (adapter->msix_entries) 4869 igc_configure_msix(adapter); 4870 else 4871 igc_assign_vector(adapter->q_vector[0], 0); 4872 4873 /* Clear any pending interrupts. */ 4874 rd32(IGC_ICR); 4875 igc_irq_enable(adapter); 4876 4877 netif_tx_start_all_queues(adapter->netdev); 4878 4879 /* start the watchdog. */ 4880 hw->mac.get_link_status = true; 4881 schedule_work(&adapter->watchdog_task); 4882 } 4883 4884 /** 4885 * igc_update_stats - Update the board statistics counters 4886 * @adapter: board private structure 4887 */ 4888 void igc_update_stats(struct igc_adapter *adapter) 4889 { 4890 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 4891 struct pci_dev *pdev = adapter->pdev; 4892 struct igc_hw *hw = &adapter->hw; 4893 u64 _bytes, _packets; 4894 u64 bytes, packets; 4895 unsigned int start; 4896 u32 mpc; 4897 int i; 4898 4899 /* Prevent stats update while adapter is being reset, or if the pci 4900 * connection is down. 4901 */ 4902 if (adapter->link_speed == 0) 4903 return; 4904 if (pci_channel_offline(pdev)) 4905 return; 4906 4907 packets = 0; 4908 bytes = 0; 4909 4910 rcu_read_lock(); 4911 for (i = 0; i < adapter->num_rx_queues; i++) { 4912 struct igc_ring *ring = adapter->rx_ring[i]; 4913 u32 rqdpc = rd32(IGC_RQDPC(i)); 4914 4915 if (hw->mac.type >= igc_i225) 4916 wr32(IGC_RQDPC(i), 0); 4917 4918 if (rqdpc) { 4919 ring->rx_stats.drops += rqdpc; 4920 net_stats->rx_fifo_errors += rqdpc; 4921 } 4922 4923 do { 4924 start = u64_stats_fetch_begin(&ring->rx_syncp); 4925 _bytes = ring->rx_stats.bytes; 4926 _packets = ring->rx_stats.packets; 4927 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); 4928 bytes += _bytes; 4929 packets += _packets; 4930 } 4931 4932 net_stats->rx_bytes = bytes; 4933 net_stats->rx_packets = packets; 4934 4935 packets = 0; 4936 bytes = 0; 4937 for (i = 0; i < adapter->num_tx_queues; i++) { 4938 struct igc_ring *ring = adapter->tx_ring[i]; 4939 4940 do { 4941 start = u64_stats_fetch_begin(&ring->tx_syncp); 4942 _bytes = ring->tx_stats.bytes; 4943 _packets = ring->tx_stats.packets; 4944 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); 4945 bytes += _bytes; 4946 packets += _packets; 4947 } 4948 net_stats->tx_bytes = bytes; 4949 net_stats->tx_packets = packets; 4950 rcu_read_unlock(); 4951 4952 /* read stats registers */ 4953 adapter->stats.crcerrs += rd32(IGC_CRCERRS); 4954 adapter->stats.gprc += rd32(IGC_GPRC); 4955 adapter->stats.gorc += rd32(IGC_GORCL); 4956 rd32(IGC_GORCH); /* clear GORCL */ 4957 adapter->stats.bprc += rd32(IGC_BPRC); 4958 adapter->stats.mprc += rd32(IGC_MPRC); 4959 adapter->stats.roc += rd32(IGC_ROC); 4960 4961 adapter->stats.prc64 += rd32(IGC_PRC64); 4962 adapter->stats.prc127 += rd32(IGC_PRC127); 4963 adapter->stats.prc255 += rd32(IGC_PRC255); 4964 adapter->stats.prc511 += rd32(IGC_PRC511); 4965 adapter->stats.prc1023 += rd32(IGC_PRC1023); 4966 adapter->stats.prc1522 += rd32(IGC_PRC1522); 4967 adapter->stats.tlpic += rd32(IGC_TLPIC); 4968 adapter->stats.rlpic += rd32(IGC_RLPIC); 4969 adapter->stats.hgptc += rd32(IGC_HGPTC); 4970 4971 mpc = rd32(IGC_MPC); 4972 adapter->stats.mpc += mpc; 4973 net_stats->rx_fifo_errors += mpc; 4974 adapter->stats.scc += rd32(IGC_SCC); 4975 adapter->stats.ecol += rd32(IGC_ECOL); 4976 adapter->stats.mcc += rd32(IGC_MCC); 4977 adapter->stats.latecol += rd32(IGC_LATECOL); 4978 adapter->stats.dc += rd32(IGC_DC); 4979 adapter->stats.rlec += rd32(IGC_RLEC); 4980 adapter->stats.xonrxc += rd32(IGC_XONRXC); 4981 adapter->stats.xontxc += rd32(IGC_XONTXC); 4982 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); 4983 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); 4984 adapter->stats.fcruc += rd32(IGC_FCRUC); 4985 adapter->stats.gptc += rd32(IGC_GPTC); 4986 adapter->stats.gotc += rd32(IGC_GOTCL); 4987 rd32(IGC_GOTCH); /* clear GOTCL */ 4988 adapter->stats.rnbc += rd32(IGC_RNBC); 4989 adapter->stats.ruc += rd32(IGC_RUC); 4990 adapter->stats.rfc += rd32(IGC_RFC); 4991 adapter->stats.rjc += rd32(IGC_RJC); 4992 adapter->stats.tor += rd32(IGC_TORH); 4993 adapter->stats.tot += rd32(IGC_TOTH); 4994 adapter->stats.tpr += rd32(IGC_TPR); 4995 4996 adapter->stats.ptc64 += rd32(IGC_PTC64); 4997 adapter->stats.ptc127 += rd32(IGC_PTC127); 4998 adapter->stats.ptc255 += rd32(IGC_PTC255); 4999 adapter->stats.ptc511 += rd32(IGC_PTC511); 5000 adapter->stats.ptc1023 += rd32(IGC_PTC1023); 5001 adapter->stats.ptc1522 += rd32(IGC_PTC1522); 5002 5003 adapter->stats.mptc += rd32(IGC_MPTC); 5004 adapter->stats.bptc += rd32(IGC_BPTC); 5005 5006 adapter->stats.tpt += rd32(IGC_TPT); 5007 adapter->stats.colc += rd32(IGC_COLC); 5008 adapter->stats.colc += rd32(IGC_RERC); 5009 5010 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); 5011 5012 adapter->stats.tsctc += rd32(IGC_TSCTC); 5013 5014 adapter->stats.iac += rd32(IGC_IAC); 5015 5016 /* Fill out the OS statistics structure */ 5017 net_stats->multicast = adapter->stats.mprc; 5018 net_stats->collisions = adapter->stats.colc; 5019 5020 /* Rx Errors */ 5021 5022 /* RLEC on some newer hardware can be incorrect so build 5023 * our own version based on RUC and ROC 5024 */ 5025 net_stats->rx_errors = adapter->stats.rxerrc + 5026 adapter->stats.crcerrs + adapter->stats.algnerrc + 5027 adapter->stats.ruc + adapter->stats.roc + 5028 adapter->stats.cexterr; 5029 net_stats->rx_length_errors = adapter->stats.ruc + 5030 adapter->stats.roc; 5031 net_stats->rx_crc_errors = adapter->stats.crcerrs; 5032 net_stats->rx_frame_errors = adapter->stats.algnerrc; 5033 net_stats->rx_missed_errors = adapter->stats.mpc; 5034 5035 /* Tx Errors */ 5036 net_stats->tx_errors = adapter->stats.ecol + 5037 adapter->stats.latecol; 5038 net_stats->tx_aborted_errors = adapter->stats.ecol; 5039 net_stats->tx_window_errors = adapter->stats.latecol; 5040 net_stats->tx_carrier_errors = adapter->stats.tncrs; 5041 5042 /* Tx Dropped */ 5043 net_stats->tx_dropped = adapter->stats.txdrop; 5044 5045 /* Management Stats */ 5046 adapter->stats.mgptc += rd32(IGC_MGTPTC); 5047 adapter->stats.mgprc += rd32(IGC_MGTPRC); 5048 adapter->stats.mgpdc += rd32(IGC_MGTPDC); 5049 } 5050 5051 /** 5052 * igc_down - Close the interface 5053 * @adapter: board private structure 5054 */ 5055 void igc_down(struct igc_adapter *adapter) 5056 { 5057 struct net_device *netdev = adapter->netdev; 5058 struct igc_hw *hw = &adapter->hw; 5059 u32 tctl, rctl; 5060 int i = 0; 5061 5062 set_bit(__IGC_DOWN, &adapter->state); 5063 5064 igc_ptp_suspend(adapter); 5065 5066 if (pci_device_is_present(adapter->pdev)) { 5067 /* disable receives in the hardware */ 5068 rctl = rd32(IGC_RCTL); 5069 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 5070 /* flush and sleep below */ 5071 } 5072 /* set trans_start so we don't get spurious watchdogs during reset */ 5073 netif_trans_update(netdev); 5074 5075 netif_carrier_off(netdev); 5076 netif_tx_stop_all_queues(netdev); 5077 5078 if (pci_device_is_present(adapter->pdev)) { 5079 /* disable transmits in the hardware */ 5080 tctl = rd32(IGC_TCTL); 5081 tctl &= ~IGC_TCTL_EN; 5082 wr32(IGC_TCTL, tctl); 5083 /* flush both disables and wait for them to finish */ 5084 wrfl(); 5085 usleep_range(10000, 20000); 5086 5087 igc_irq_disable(adapter); 5088 } 5089 5090 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5091 5092 for (i = 0; i < adapter->num_q_vectors; i++) { 5093 if (adapter->q_vector[i]) { 5094 napi_synchronize(&adapter->q_vector[i]->napi); 5095 napi_disable(&adapter->q_vector[i]->napi); 5096 } 5097 } 5098 5099 del_timer_sync(&adapter->watchdog_timer); 5100 del_timer_sync(&adapter->phy_info_timer); 5101 5102 /* record the stats before reset*/ 5103 spin_lock(&adapter->stats64_lock); 5104 igc_update_stats(adapter); 5105 spin_unlock(&adapter->stats64_lock); 5106 5107 adapter->link_speed = 0; 5108 adapter->link_duplex = 0; 5109 5110 if (!pci_channel_offline(adapter->pdev)) 5111 igc_reset(adapter); 5112 5113 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 5114 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 5115 5116 igc_disable_all_tx_rings_hw(adapter); 5117 igc_clean_all_tx_rings(adapter); 5118 igc_clean_all_rx_rings(adapter); 5119 } 5120 5121 void igc_reinit_locked(struct igc_adapter *adapter) 5122 { 5123 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 5124 usleep_range(1000, 2000); 5125 igc_down(adapter); 5126 igc_up(adapter); 5127 clear_bit(__IGC_RESETTING, &adapter->state); 5128 } 5129 5130 static void igc_reset_task(struct work_struct *work) 5131 { 5132 struct igc_adapter *adapter; 5133 5134 adapter = container_of(work, struct igc_adapter, reset_task); 5135 5136 rtnl_lock(); 5137 /* If we're already down or resetting, just bail */ 5138 if (test_bit(__IGC_DOWN, &adapter->state) || 5139 test_bit(__IGC_RESETTING, &adapter->state)) { 5140 rtnl_unlock(); 5141 return; 5142 } 5143 5144 igc_rings_dump(adapter); 5145 igc_regs_dump(adapter); 5146 netdev_err(adapter->netdev, "Reset adapter\n"); 5147 igc_reinit_locked(adapter); 5148 rtnl_unlock(); 5149 } 5150 5151 /** 5152 * igc_change_mtu - Change the Maximum Transfer Unit 5153 * @netdev: network interface device structure 5154 * @new_mtu: new value for maximum frame size 5155 * 5156 * Returns 0 on success, negative on failure 5157 */ 5158 static int igc_change_mtu(struct net_device *netdev, int new_mtu) 5159 { 5160 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 5161 struct igc_adapter *adapter = netdev_priv(netdev); 5162 5163 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { 5164 netdev_dbg(netdev, "Jumbo frames not supported with XDP"); 5165 return -EINVAL; 5166 } 5167 5168 /* adjust max frame to be at least the size of a standard frame */ 5169 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 5170 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 5171 5172 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 5173 usleep_range(1000, 2000); 5174 5175 /* igc_down has a dependency on max_frame_size */ 5176 adapter->max_frame_size = max_frame; 5177 5178 if (netif_running(netdev)) 5179 igc_down(adapter); 5180 5181 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5182 netdev->mtu = new_mtu; 5183 5184 if (netif_running(netdev)) 5185 igc_up(adapter); 5186 else 5187 igc_reset(adapter); 5188 5189 clear_bit(__IGC_RESETTING, &adapter->state); 5190 5191 return 0; 5192 } 5193 5194 /** 5195 * igc_tx_timeout - Respond to a Tx Hang 5196 * @netdev: network interface device structure 5197 * @txqueue: queue number that timed out 5198 **/ 5199 static void igc_tx_timeout(struct net_device *netdev, 5200 unsigned int __always_unused txqueue) 5201 { 5202 struct igc_adapter *adapter = netdev_priv(netdev); 5203 struct igc_hw *hw = &adapter->hw; 5204 5205 /* Do the reset outside of interrupt context */ 5206 adapter->tx_timeout_count++; 5207 schedule_work(&adapter->reset_task); 5208 wr32(IGC_EICS, 5209 (adapter->eims_enable_mask & ~adapter->eims_other)); 5210 } 5211 5212 /** 5213 * igc_get_stats64 - Get System Network Statistics 5214 * @netdev: network interface device structure 5215 * @stats: rtnl_link_stats64 pointer 5216 * 5217 * Returns the address of the device statistics structure. 5218 * The statistics are updated here and also from the timer callback. 5219 */ 5220 static void igc_get_stats64(struct net_device *netdev, 5221 struct rtnl_link_stats64 *stats) 5222 { 5223 struct igc_adapter *adapter = netdev_priv(netdev); 5224 5225 spin_lock(&adapter->stats64_lock); 5226 if (!test_bit(__IGC_RESETTING, &adapter->state)) 5227 igc_update_stats(adapter); 5228 memcpy(stats, &adapter->stats64, sizeof(*stats)); 5229 spin_unlock(&adapter->stats64_lock); 5230 } 5231 5232 static netdev_features_t igc_fix_features(struct net_device *netdev, 5233 netdev_features_t features) 5234 { 5235 /* Since there is no support for separate Rx/Tx vlan accel 5236 * enable/disable make sure Tx flag is always in same state as Rx. 5237 */ 5238 if (features & NETIF_F_HW_VLAN_CTAG_RX) 5239 features |= NETIF_F_HW_VLAN_CTAG_TX; 5240 else 5241 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 5242 5243 return features; 5244 } 5245 5246 static int igc_set_features(struct net_device *netdev, 5247 netdev_features_t features) 5248 { 5249 netdev_features_t changed = netdev->features ^ features; 5250 struct igc_adapter *adapter = netdev_priv(netdev); 5251 5252 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 5253 igc_vlan_mode(netdev, features); 5254 5255 /* Add VLAN support */ 5256 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) 5257 return 0; 5258 5259 if (!(features & NETIF_F_NTUPLE)) 5260 igc_flush_nfc_rules(adapter); 5261 5262 netdev->features = features; 5263 5264 if (netif_running(netdev)) 5265 igc_reinit_locked(adapter); 5266 else 5267 igc_reset(adapter); 5268 5269 return 1; 5270 } 5271 5272 static netdev_features_t 5273 igc_features_check(struct sk_buff *skb, struct net_device *dev, 5274 netdev_features_t features) 5275 { 5276 unsigned int network_hdr_len, mac_hdr_len; 5277 5278 /* Make certain the headers can be described by a context descriptor */ 5279 mac_hdr_len = skb_network_header(skb) - skb->data; 5280 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) 5281 return features & ~(NETIF_F_HW_CSUM | 5282 NETIF_F_SCTP_CRC | 5283 NETIF_F_HW_VLAN_CTAG_TX | 5284 NETIF_F_TSO | 5285 NETIF_F_TSO6); 5286 5287 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 5288 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) 5289 return features & ~(NETIF_F_HW_CSUM | 5290 NETIF_F_SCTP_CRC | 5291 NETIF_F_TSO | 5292 NETIF_F_TSO6); 5293 5294 /* We can only support IPv4 TSO in tunnels if we can mangle the 5295 * inner IP ID field, so strip TSO if MANGLEID is not supported. 5296 */ 5297 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 5298 features &= ~NETIF_F_TSO; 5299 5300 return features; 5301 } 5302 5303 static void igc_tsync_interrupt(struct igc_adapter *adapter) 5304 { 5305 u32 ack, tsauxc, sec, nsec, tsicr; 5306 struct igc_hw *hw = &adapter->hw; 5307 struct ptp_clock_event event; 5308 struct timespec64 ts; 5309 5310 tsicr = rd32(IGC_TSICR); 5311 ack = 0; 5312 5313 if (tsicr & IGC_TSICR_SYS_WRAP) { 5314 event.type = PTP_CLOCK_PPS; 5315 if (adapter->ptp_caps.pps) 5316 ptp_clock_event(adapter->ptp_clock, &event); 5317 ack |= IGC_TSICR_SYS_WRAP; 5318 } 5319 5320 if (tsicr & IGC_TSICR_TXTS) { 5321 /* retrieve hardware timestamp */ 5322 igc_ptp_tx_tstamp_event(adapter); 5323 ack |= IGC_TSICR_TXTS; 5324 } 5325 5326 if (tsicr & IGC_TSICR_TT0) { 5327 spin_lock(&adapter->tmreg_lock); 5328 ts = timespec64_add(adapter->perout[0].start, 5329 adapter->perout[0].period); 5330 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 5331 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); 5332 tsauxc = rd32(IGC_TSAUXC); 5333 tsauxc |= IGC_TSAUXC_EN_TT0; 5334 wr32(IGC_TSAUXC, tsauxc); 5335 adapter->perout[0].start = ts; 5336 spin_unlock(&adapter->tmreg_lock); 5337 ack |= IGC_TSICR_TT0; 5338 } 5339 5340 if (tsicr & IGC_TSICR_TT1) { 5341 spin_lock(&adapter->tmreg_lock); 5342 ts = timespec64_add(adapter->perout[1].start, 5343 adapter->perout[1].period); 5344 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 5345 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); 5346 tsauxc = rd32(IGC_TSAUXC); 5347 tsauxc |= IGC_TSAUXC_EN_TT1; 5348 wr32(IGC_TSAUXC, tsauxc); 5349 adapter->perout[1].start = ts; 5350 spin_unlock(&adapter->tmreg_lock); 5351 ack |= IGC_TSICR_TT1; 5352 } 5353 5354 if (tsicr & IGC_TSICR_AUTT0) { 5355 nsec = rd32(IGC_AUXSTMPL0); 5356 sec = rd32(IGC_AUXSTMPH0); 5357 event.type = PTP_CLOCK_EXTTS; 5358 event.index = 0; 5359 event.timestamp = sec * NSEC_PER_SEC + nsec; 5360 ptp_clock_event(adapter->ptp_clock, &event); 5361 ack |= IGC_TSICR_AUTT0; 5362 } 5363 5364 if (tsicr & IGC_TSICR_AUTT1) { 5365 nsec = rd32(IGC_AUXSTMPL1); 5366 sec = rd32(IGC_AUXSTMPH1); 5367 event.type = PTP_CLOCK_EXTTS; 5368 event.index = 1; 5369 event.timestamp = sec * NSEC_PER_SEC + nsec; 5370 ptp_clock_event(adapter->ptp_clock, &event); 5371 ack |= IGC_TSICR_AUTT1; 5372 } 5373 5374 /* acknowledge the interrupts */ 5375 wr32(IGC_TSICR, ack); 5376 } 5377 5378 /** 5379 * igc_msix_other - msix other interrupt handler 5380 * @irq: interrupt number 5381 * @data: pointer to a q_vector 5382 */ 5383 static irqreturn_t igc_msix_other(int irq, void *data) 5384 { 5385 struct igc_adapter *adapter = data; 5386 struct igc_hw *hw = &adapter->hw; 5387 u32 icr = rd32(IGC_ICR); 5388 5389 /* reading ICR causes bit 31 of EICR to be cleared */ 5390 if (icr & IGC_ICR_DRSTA) 5391 schedule_work(&adapter->reset_task); 5392 5393 if (icr & IGC_ICR_DOUTSYNC) { 5394 /* HW is reporting DMA is out of sync */ 5395 adapter->stats.doosync++; 5396 } 5397 5398 if (icr & IGC_ICR_LSC) { 5399 hw->mac.get_link_status = true; 5400 /* guard against interrupt when we're going down */ 5401 if (!test_bit(__IGC_DOWN, &adapter->state)) 5402 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5403 } 5404 5405 if (icr & IGC_ICR_TS) 5406 igc_tsync_interrupt(adapter); 5407 5408 wr32(IGC_EIMS, adapter->eims_other); 5409 5410 return IRQ_HANDLED; 5411 } 5412 5413 static void igc_write_itr(struct igc_q_vector *q_vector) 5414 { 5415 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 5416 5417 if (!q_vector->set_itr) 5418 return; 5419 5420 if (!itr_val) 5421 itr_val = IGC_ITR_VAL_MASK; 5422 5423 itr_val |= IGC_EITR_CNT_IGNR; 5424 5425 writel(itr_val, q_vector->itr_register); 5426 q_vector->set_itr = 0; 5427 } 5428 5429 static irqreturn_t igc_msix_ring(int irq, void *data) 5430 { 5431 struct igc_q_vector *q_vector = data; 5432 5433 /* Write the ITR value calculated from the previous interrupt. */ 5434 igc_write_itr(q_vector); 5435 5436 napi_schedule(&q_vector->napi); 5437 5438 return IRQ_HANDLED; 5439 } 5440 5441 /** 5442 * igc_request_msix - Initialize MSI-X interrupts 5443 * @adapter: Pointer to adapter structure 5444 * 5445 * igc_request_msix allocates MSI-X vectors and requests interrupts from the 5446 * kernel. 5447 */ 5448 static int igc_request_msix(struct igc_adapter *adapter) 5449 { 5450 unsigned int num_q_vectors = adapter->num_q_vectors; 5451 int i = 0, err = 0, vector = 0, free_vector = 0; 5452 struct net_device *netdev = adapter->netdev; 5453 5454 err = request_irq(adapter->msix_entries[vector].vector, 5455 &igc_msix_other, 0, netdev->name, adapter); 5456 if (err) 5457 goto err_out; 5458 5459 if (num_q_vectors > MAX_Q_VECTORS) { 5460 num_q_vectors = MAX_Q_VECTORS; 5461 dev_warn(&adapter->pdev->dev, 5462 "The number of queue vectors (%d) is higher than max allowed (%d)\n", 5463 adapter->num_q_vectors, MAX_Q_VECTORS); 5464 } 5465 for (i = 0; i < num_q_vectors; i++) { 5466 struct igc_q_vector *q_vector = adapter->q_vector[i]; 5467 5468 vector++; 5469 5470 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 5471 5472 if (q_vector->rx.ring && q_vector->tx.ring) 5473 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 5474 q_vector->rx.ring->queue_index); 5475 else if (q_vector->tx.ring) 5476 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 5477 q_vector->tx.ring->queue_index); 5478 else if (q_vector->rx.ring) 5479 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 5480 q_vector->rx.ring->queue_index); 5481 else 5482 sprintf(q_vector->name, "%s-unused", netdev->name); 5483 5484 err = request_irq(adapter->msix_entries[vector].vector, 5485 igc_msix_ring, 0, q_vector->name, 5486 q_vector); 5487 if (err) 5488 goto err_free; 5489 } 5490 5491 igc_configure_msix(adapter); 5492 return 0; 5493 5494 err_free: 5495 /* free already assigned IRQs */ 5496 free_irq(adapter->msix_entries[free_vector++].vector, adapter); 5497 5498 vector--; 5499 for (i = 0; i < vector; i++) { 5500 free_irq(adapter->msix_entries[free_vector++].vector, 5501 adapter->q_vector[i]); 5502 } 5503 err_out: 5504 return err; 5505 } 5506 5507 /** 5508 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 5509 * @adapter: Pointer to adapter structure 5510 * 5511 * This function resets the device so that it has 0 rx queues, tx queues, and 5512 * MSI-X interrupts allocated. 5513 */ 5514 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 5515 { 5516 igc_free_q_vectors(adapter); 5517 igc_reset_interrupt_capability(adapter); 5518 } 5519 5520 /* Need to wait a few seconds after link up to get diagnostic information from 5521 * the phy 5522 */ 5523 static void igc_update_phy_info(struct timer_list *t) 5524 { 5525 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 5526 5527 igc_get_phy_info(&adapter->hw); 5528 } 5529 5530 /** 5531 * igc_has_link - check shared code for link and determine up/down 5532 * @adapter: pointer to driver private info 5533 */ 5534 bool igc_has_link(struct igc_adapter *adapter) 5535 { 5536 struct igc_hw *hw = &adapter->hw; 5537 bool link_active = false; 5538 5539 /* get_link_status is set on LSC (link status) interrupt or 5540 * rx sequence error interrupt. get_link_status will stay 5541 * false until the igc_check_for_link establishes link 5542 * for copper adapters ONLY 5543 */ 5544 if (!hw->mac.get_link_status) 5545 return true; 5546 hw->mac.ops.check_for_link(hw); 5547 link_active = !hw->mac.get_link_status; 5548 5549 if (hw->mac.type == igc_i225) { 5550 if (!netif_carrier_ok(adapter->netdev)) { 5551 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5552 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 5553 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 5554 adapter->link_check_timeout = jiffies; 5555 } 5556 } 5557 5558 return link_active; 5559 } 5560 5561 /** 5562 * igc_watchdog - Timer Call-back 5563 * @t: timer for the watchdog 5564 */ 5565 static void igc_watchdog(struct timer_list *t) 5566 { 5567 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 5568 /* Do the rest outside of interrupt context */ 5569 schedule_work(&adapter->watchdog_task); 5570 } 5571 5572 static void igc_watchdog_task(struct work_struct *work) 5573 { 5574 struct igc_adapter *adapter = container_of(work, 5575 struct igc_adapter, 5576 watchdog_task); 5577 struct net_device *netdev = adapter->netdev; 5578 struct igc_hw *hw = &adapter->hw; 5579 struct igc_phy_info *phy = &hw->phy; 5580 u16 phy_data, retry_count = 20; 5581 u32 link; 5582 int i; 5583 5584 link = igc_has_link(adapter); 5585 5586 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 5587 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 5588 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5589 else 5590 link = false; 5591 } 5592 5593 if (link) { 5594 /* Cancel scheduled suspend requests. */ 5595 pm_runtime_resume(netdev->dev.parent); 5596 5597 if (!netif_carrier_ok(netdev)) { 5598 u32 ctrl; 5599 5600 hw->mac.ops.get_speed_and_duplex(hw, 5601 &adapter->link_speed, 5602 &adapter->link_duplex); 5603 5604 ctrl = rd32(IGC_CTRL); 5605 /* Link status message must follow this format */ 5606 netdev_info(netdev, 5607 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 5608 adapter->link_speed, 5609 adapter->link_duplex == FULL_DUPLEX ? 5610 "Full" : "Half", 5611 (ctrl & IGC_CTRL_TFCE) && 5612 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 5613 (ctrl & IGC_CTRL_RFCE) ? "RX" : 5614 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 5615 5616 /* disable EEE if enabled */ 5617 if ((adapter->flags & IGC_FLAG_EEE) && 5618 adapter->link_duplex == HALF_DUPLEX) { 5619 netdev_info(netdev, 5620 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); 5621 adapter->hw.dev_spec._base.eee_enable = false; 5622 adapter->flags &= ~IGC_FLAG_EEE; 5623 } 5624 5625 /* check if SmartSpeed worked */ 5626 igc_check_downshift(hw); 5627 if (phy->speed_downgraded) 5628 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 5629 5630 /* adjust timeout factor according to speed/duplex */ 5631 adapter->tx_timeout_factor = 1; 5632 switch (adapter->link_speed) { 5633 case SPEED_10: 5634 adapter->tx_timeout_factor = 14; 5635 break; 5636 case SPEED_100: 5637 case SPEED_1000: 5638 case SPEED_2500: 5639 adapter->tx_timeout_factor = 1; 5640 break; 5641 } 5642 5643 /* Once the launch time has been set on the wire, there 5644 * is a delay before the link speed can be determined 5645 * based on link-up activity. Write into the register 5646 * as soon as we know the correct link speed. 5647 */ 5648 igc_tsn_adjust_txtime_offset(adapter); 5649 5650 if (adapter->link_speed != SPEED_1000) 5651 goto no_wait; 5652 5653 /* wait for Remote receiver status OK */ 5654 retry_read_status: 5655 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 5656 &phy_data)) { 5657 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 5658 retry_count) { 5659 msleep(100); 5660 retry_count--; 5661 goto retry_read_status; 5662 } else if (!retry_count) { 5663 netdev_err(netdev, "exceed max 2 second\n"); 5664 } 5665 } else { 5666 netdev_err(netdev, "read 1000Base-T Status Reg\n"); 5667 } 5668 no_wait: 5669 netif_carrier_on(netdev); 5670 5671 /* link state has changed, schedule phy info update */ 5672 if (!test_bit(__IGC_DOWN, &adapter->state)) 5673 mod_timer(&adapter->phy_info_timer, 5674 round_jiffies(jiffies + 2 * HZ)); 5675 } 5676 } else { 5677 if (netif_carrier_ok(netdev)) { 5678 adapter->link_speed = 0; 5679 adapter->link_duplex = 0; 5680 5681 /* Links status message must follow this format */ 5682 netdev_info(netdev, "NIC Link is Down\n"); 5683 netif_carrier_off(netdev); 5684 5685 /* link state has changed, schedule phy info update */ 5686 if (!test_bit(__IGC_DOWN, &adapter->state)) 5687 mod_timer(&adapter->phy_info_timer, 5688 round_jiffies(jiffies + 2 * HZ)); 5689 5690 pm_schedule_suspend(netdev->dev.parent, 5691 MSEC_PER_SEC * 5); 5692 } 5693 } 5694 5695 spin_lock(&adapter->stats64_lock); 5696 igc_update_stats(adapter); 5697 spin_unlock(&adapter->stats64_lock); 5698 5699 for (i = 0; i < adapter->num_tx_queues; i++) { 5700 struct igc_ring *tx_ring = adapter->tx_ring[i]; 5701 5702 if (!netif_carrier_ok(netdev)) { 5703 /* We've lost link, so the controller stops DMA, 5704 * but we've got queued Tx work that's never going 5705 * to get done, so reset controller to flush Tx. 5706 * (Do the reset outside of interrupt context). 5707 */ 5708 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 5709 adapter->tx_timeout_count++; 5710 schedule_work(&adapter->reset_task); 5711 /* return immediately since reset is imminent */ 5712 return; 5713 } 5714 } 5715 5716 /* Force detection of hung controller every watchdog period */ 5717 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5718 } 5719 5720 /* Cause software interrupt to ensure Rx ring is cleaned */ 5721 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5722 u32 eics = 0; 5723 5724 for (i = 0; i < adapter->num_q_vectors; i++) 5725 eics |= adapter->q_vector[i]->eims_value; 5726 wr32(IGC_EICS, eics); 5727 } else { 5728 wr32(IGC_ICS, IGC_ICS_RXDMT0); 5729 } 5730 5731 igc_ptp_tx_hang(adapter); 5732 5733 /* Reset the timer */ 5734 if (!test_bit(__IGC_DOWN, &adapter->state)) { 5735 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 5736 mod_timer(&adapter->watchdog_timer, 5737 round_jiffies(jiffies + HZ)); 5738 else 5739 mod_timer(&adapter->watchdog_timer, 5740 round_jiffies(jiffies + 2 * HZ)); 5741 } 5742 } 5743 5744 /** 5745 * igc_intr_msi - Interrupt Handler 5746 * @irq: interrupt number 5747 * @data: pointer to a network interface device structure 5748 */ 5749 static irqreturn_t igc_intr_msi(int irq, void *data) 5750 { 5751 struct igc_adapter *adapter = data; 5752 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5753 struct igc_hw *hw = &adapter->hw; 5754 /* read ICR disables interrupts using IAM */ 5755 u32 icr = rd32(IGC_ICR); 5756 5757 igc_write_itr(q_vector); 5758 5759 if (icr & IGC_ICR_DRSTA) 5760 schedule_work(&adapter->reset_task); 5761 5762 if (icr & IGC_ICR_DOUTSYNC) { 5763 /* HW is reporting DMA is out of sync */ 5764 adapter->stats.doosync++; 5765 } 5766 5767 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5768 hw->mac.get_link_status = true; 5769 if (!test_bit(__IGC_DOWN, &adapter->state)) 5770 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5771 } 5772 5773 if (icr & IGC_ICR_TS) 5774 igc_tsync_interrupt(adapter); 5775 5776 napi_schedule(&q_vector->napi); 5777 5778 return IRQ_HANDLED; 5779 } 5780 5781 /** 5782 * igc_intr - Legacy Interrupt Handler 5783 * @irq: interrupt number 5784 * @data: pointer to a network interface device structure 5785 */ 5786 static irqreturn_t igc_intr(int irq, void *data) 5787 { 5788 struct igc_adapter *adapter = data; 5789 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5790 struct igc_hw *hw = &adapter->hw; 5791 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5792 * need for the IMC write 5793 */ 5794 u32 icr = rd32(IGC_ICR); 5795 5796 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5797 * not set, then the adapter didn't send an interrupt 5798 */ 5799 if (!(icr & IGC_ICR_INT_ASSERTED)) 5800 return IRQ_NONE; 5801 5802 igc_write_itr(q_vector); 5803 5804 if (icr & IGC_ICR_DRSTA) 5805 schedule_work(&adapter->reset_task); 5806 5807 if (icr & IGC_ICR_DOUTSYNC) { 5808 /* HW is reporting DMA is out of sync */ 5809 adapter->stats.doosync++; 5810 } 5811 5812 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5813 hw->mac.get_link_status = true; 5814 /* guard against interrupt when we're going down */ 5815 if (!test_bit(__IGC_DOWN, &adapter->state)) 5816 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5817 } 5818 5819 if (icr & IGC_ICR_TS) 5820 igc_tsync_interrupt(adapter); 5821 5822 napi_schedule(&q_vector->napi); 5823 5824 return IRQ_HANDLED; 5825 } 5826 5827 static void igc_free_irq(struct igc_adapter *adapter) 5828 { 5829 if (adapter->msix_entries) { 5830 int vector = 0, i; 5831 5832 free_irq(adapter->msix_entries[vector++].vector, adapter); 5833 5834 for (i = 0; i < adapter->num_q_vectors; i++) 5835 free_irq(adapter->msix_entries[vector++].vector, 5836 adapter->q_vector[i]); 5837 } else { 5838 free_irq(adapter->pdev->irq, adapter); 5839 } 5840 } 5841 5842 /** 5843 * igc_request_irq - initialize interrupts 5844 * @adapter: Pointer to adapter structure 5845 * 5846 * Attempts to configure interrupts using the best available 5847 * capabilities of the hardware and kernel. 5848 */ 5849 static int igc_request_irq(struct igc_adapter *adapter) 5850 { 5851 struct net_device *netdev = adapter->netdev; 5852 struct pci_dev *pdev = adapter->pdev; 5853 int err = 0; 5854 5855 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5856 err = igc_request_msix(adapter); 5857 if (!err) 5858 goto request_done; 5859 /* fall back to MSI */ 5860 igc_free_all_tx_resources(adapter); 5861 igc_free_all_rx_resources(adapter); 5862 5863 igc_clear_interrupt_scheme(adapter); 5864 err = igc_init_interrupt_scheme(adapter, false); 5865 if (err) 5866 goto request_done; 5867 igc_setup_all_tx_resources(adapter); 5868 igc_setup_all_rx_resources(adapter); 5869 igc_configure(adapter); 5870 } 5871 5872 igc_assign_vector(adapter->q_vector[0], 0); 5873 5874 if (adapter->flags & IGC_FLAG_HAS_MSI) { 5875 err = request_irq(pdev->irq, &igc_intr_msi, 0, 5876 netdev->name, adapter); 5877 if (!err) 5878 goto request_done; 5879 5880 /* fall back to legacy interrupts */ 5881 igc_reset_interrupt_capability(adapter); 5882 adapter->flags &= ~IGC_FLAG_HAS_MSI; 5883 } 5884 5885 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 5886 netdev->name, adapter); 5887 5888 if (err) 5889 netdev_err(netdev, "Error %d getting interrupt\n", err); 5890 5891 request_done: 5892 return err; 5893 } 5894 5895 /** 5896 * __igc_open - Called when a network interface is made active 5897 * @netdev: network interface device structure 5898 * @resuming: boolean indicating if the device is resuming 5899 * 5900 * Returns 0 on success, negative value on failure 5901 * 5902 * The open entry point is called when a network interface is made 5903 * active by the system (IFF_UP). At this point all resources needed 5904 * for transmit and receive operations are allocated, the interrupt 5905 * handler is registered with the OS, the watchdog timer is started, 5906 * and the stack is notified that the interface is ready. 5907 */ 5908 static int __igc_open(struct net_device *netdev, bool resuming) 5909 { 5910 struct igc_adapter *adapter = netdev_priv(netdev); 5911 struct pci_dev *pdev = adapter->pdev; 5912 struct igc_hw *hw = &adapter->hw; 5913 int err = 0; 5914 int i = 0; 5915 5916 /* disallow open during test */ 5917 5918 if (test_bit(__IGC_TESTING, &adapter->state)) { 5919 WARN_ON(resuming); 5920 return -EBUSY; 5921 } 5922 5923 if (!resuming) 5924 pm_runtime_get_sync(&pdev->dev); 5925 5926 netif_carrier_off(netdev); 5927 5928 /* allocate transmit descriptors */ 5929 err = igc_setup_all_tx_resources(adapter); 5930 if (err) 5931 goto err_setup_tx; 5932 5933 /* allocate receive descriptors */ 5934 err = igc_setup_all_rx_resources(adapter); 5935 if (err) 5936 goto err_setup_rx; 5937 5938 igc_power_up_link(adapter); 5939 5940 igc_configure(adapter); 5941 5942 err = igc_request_irq(adapter); 5943 if (err) 5944 goto err_req_irq; 5945 5946 /* Notify the stack of the actual queue counts. */ 5947 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 5948 if (err) 5949 goto err_set_queues; 5950 5951 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 5952 if (err) 5953 goto err_set_queues; 5954 5955 clear_bit(__IGC_DOWN, &adapter->state); 5956 5957 for (i = 0; i < adapter->num_q_vectors; i++) 5958 napi_enable(&adapter->q_vector[i]->napi); 5959 5960 /* Clear any pending interrupts. */ 5961 rd32(IGC_ICR); 5962 igc_irq_enable(adapter); 5963 5964 if (!resuming) 5965 pm_runtime_put(&pdev->dev); 5966 5967 netif_tx_start_all_queues(netdev); 5968 5969 /* start the watchdog. */ 5970 hw->mac.get_link_status = true; 5971 schedule_work(&adapter->watchdog_task); 5972 5973 return IGC_SUCCESS; 5974 5975 err_set_queues: 5976 igc_free_irq(adapter); 5977 err_req_irq: 5978 igc_release_hw_control(adapter); 5979 igc_power_down_phy_copper_base(&adapter->hw); 5980 igc_free_all_rx_resources(adapter); 5981 err_setup_rx: 5982 igc_free_all_tx_resources(adapter); 5983 err_setup_tx: 5984 igc_reset(adapter); 5985 if (!resuming) 5986 pm_runtime_put(&pdev->dev); 5987 5988 return err; 5989 } 5990 5991 int igc_open(struct net_device *netdev) 5992 { 5993 return __igc_open(netdev, false); 5994 } 5995 5996 /** 5997 * __igc_close - Disables a network interface 5998 * @netdev: network interface device structure 5999 * @suspending: boolean indicating the device is suspending 6000 * 6001 * Returns 0, this is not allowed to fail 6002 * 6003 * The close entry point is called when an interface is de-activated 6004 * by the OS. The hardware is still under the driver's control, but 6005 * needs to be disabled. A global MAC reset is issued to stop the 6006 * hardware, and all transmit and receive resources are freed. 6007 */ 6008 static int __igc_close(struct net_device *netdev, bool suspending) 6009 { 6010 struct igc_adapter *adapter = netdev_priv(netdev); 6011 struct pci_dev *pdev = adapter->pdev; 6012 6013 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 6014 6015 if (!suspending) 6016 pm_runtime_get_sync(&pdev->dev); 6017 6018 igc_down(adapter); 6019 6020 igc_release_hw_control(adapter); 6021 6022 igc_free_irq(adapter); 6023 6024 igc_free_all_tx_resources(adapter); 6025 igc_free_all_rx_resources(adapter); 6026 6027 if (!suspending) 6028 pm_runtime_put_sync(&pdev->dev); 6029 6030 return 0; 6031 } 6032 6033 int igc_close(struct net_device *netdev) 6034 { 6035 if (netif_device_present(netdev) || netdev->dismantle) 6036 return __igc_close(netdev, false); 6037 return 0; 6038 } 6039 6040 /** 6041 * igc_ioctl - Access the hwtstamp interface 6042 * @netdev: network interface device structure 6043 * @ifr: interface request data 6044 * @cmd: ioctl command 6045 **/ 6046 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 6047 { 6048 switch (cmd) { 6049 case SIOCGHWTSTAMP: 6050 return igc_ptp_get_ts_config(netdev, ifr); 6051 case SIOCSHWTSTAMP: 6052 return igc_ptp_set_ts_config(netdev, ifr); 6053 default: 6054 return -EOPNOTSUPP; 6055 } 6056 } 6057 6058 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, 6059 bool enable) 6060 { 6061 struct igc_ring *ring; 6062 6063 if (queue < 0 || queue >= adapter->num_tx_queues) 6064 return -EINVAL; 6065 6066 ring = adapter->tx_ring[queue]; 6067 ring->launchtime_enable = enable; 6068 6069 return 0; 6070 } 6071 6072 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) 6073 { 6074 struct timespec64 b; 6075 6076 b = ktime_to_timespec64(base_time); 6077 6078 return timespec64_compare(now, &b) > 0; 6079 } 6080 6081 static bool validate_schedule(struct igc_adapter *adapter, 6082 const struct tc_taprio_qopt_offload *qopt) 6083 { 6084 int queue_uses[IGC_MAX_TX_QUEUES] = { }; 6085 struct igc_hw *hw = &adapter->hw; 6086 struct timespec64 now; 6087 size_t n; 6088 6089 if (qopt->cycle_time_extension) 6090 return false; 6091 6092 igc_ptp_read(adapter, &now); 6093 6094 /* If we program the controller's BASET registers with a time 6095 * in the future, it will hold all the packets until that 6096 * time, causing a lot of TX Hangs, so to avoid that, we 6097 * reject schedules that would start in the future. 6098 * Note: Limitation above is no longer in i226. 6099 */ 6100 if (!is_base_time_past(qopt->base_time, &now) && 6101 igc_is_device_id_i225(hw)) 6102 return false; 6103 6104 for (n = 0; n < qopt->num_entries; n++) { 6105 const struct tc_taprio_sched_entry *e, *prev; 6106 int i; 6107 6108 prev = n ? &qopt->entries[n - 1] : NULL; 6109 e = &qopt->entries[n]; 6110 6111 /* i225 only supports "global" frame preemption 6112 * settings. 6113 */ 6114 if (e->command != TC_TAPRIO_CMD_SET_GATES) 6115 return false; 6116 6117 for (i = 0; i < adapter->num_tx_queues; i++) 6118 if (e->gate_mask & BIT(i)) { 6119 queue_uses[i]++; 6120 6121 /* There are limitations: A single queue cannot 6122 * be opened and closed multiple times per cycle 6123 * unless the gate stays open. Check for it. 6124 */ 6125 if (queue_uses[i] > 1 && 6126 !(prev->gate_mask & BIT(i))) 6127 return false; 6128 } 6129 } 6130 6131 return true; 6132 } 6133 6134 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, 6135 struct tc_etf_qopt_offload *qopt) 6136 { 6137 struct igc_hw *hw = &adapter->hw; 6138 int err; 6139 6140 if (hw->mac.type != igc_i225) 6141 return -EOPNOTSUPP; 6142 6143 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); 6144 if (err) 6145 return err; 6146 6147 return igc_tsn_offload_apply(adapter); 6148 } 6149 6150 static int igc_qbv_clear_schedule(struct igc_adapter *adapter) 6151 { 6152 unsigned long flags; 6153 int i; 6154 6155 adapter->base_time = 0; 6156 adapter->cycle_time = NSEC_PER_SEC; 6157 adapter->taprio_offload_enable = false; 6158 adapter->qbv_config_change_errors = 0; 6159 adapter->qbv_count = 0; 6160 6161 for (i = 0; i < adapter->num_tx_queues; i++) { 6162 struct igc_ring *ring = adapter->tx_ring[i]; 6163 6164 ring->start_time = 0; 6165 ring->end_time = NSEC_PER_SEC; 6166 ring->max_sdu = 0; 6167 } 6168 6169 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); 6170 6171 adapter->qbv_transition = false; 6172 6173 for (i = 0; i < adapter->num_tx_queues; i++) { 6174 struct igc_ring *ring = adapter->tx_ring[i]; 6175 6176 ring->oper_gate_closed = false; 6177 ring->admin_gate_closed = false; 6178 } 6179 6180 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); 6181 6182 return 0; 6183 } 6184 6185 static int igc_tsn_clear_schedule(struct igc_adapter *adapter) 6186 { 6187 igc_qbv_clear_schedule(adapter); 6188 6189 return 0; 6190 } 6191 6192 static void igc_taprio_stats(struct net_device *dev, 6193 struct tc_taprio_qopt_stats *stats) 6194 { 6195 /* When Strict_End is enabled, the tx_overruns counter 6196 * will always be zero. 6197 */ 6198 stats->tx_overruns = 0; 6199 } 6200 6201 static void igc_taprio_queue_stats(struct net_device *dev, 6202 struct tc_taprio_qopt_queue_stats *queue_stats) 6203 { 6204 struct tc_taprio_qopt_stats *stats = &queue_stats->stats; 6205 6206 /* When Strict_End is enabled, the tx_overruns counter 6207 * will always be zero. 6208 */ 6209 stats->tx_overruns = 0; 6210 } 6211 6212 static int igc_save_qbv_schedule(struct igc_adapter *adapter, 6213 struct tc_taprio_qopt_offload *qopt) 6214 { 6215 bool queue_configured[IGC_MAX_TX_QUEUES] = { }; 6216 struct igc_hw *hw = &adapter->hw; 6217 u32 start_time = 0, end_time = 0; 6218 struct timespec64 now; 6219 unsigned long flags; 6220 size_t n; 6221 int i; 6222 6223 switch (qopt->cmd) { 6224 case TAPRIO_CMD_REPLACE: 6225 break; 6226 case TAPRIO_CMD_DESTROY: 6227 return igc_tsn_clear_schedule(adapter); 6228 case TAPRIO_CMD_STATS: 6229 igc_taprio_stats(adapter->netdev, &qopt->stats); 6230 return 0; 6231 case TAPRIO_CMD_QUEUE_STATS: 6232 igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); 6233 return 0; 6234 default: 6235 return -EOPNOTSUPP; 6236 } 6237 6238 if (qopt->base_time < 0) 6239 return -ERANGE; 6240 6241 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) 6242 return -EALREADY; 6243 6244 if (!validate_schedule(adapter, qopt)) 6245 return -EINVAL; 6246 6247 adapter->cycle_time = qopt->cycle_time; 6248 adapter->base_time = qopt->base_time; 6249 adapter->taprio_offload_enable = true; 6250 6251 igc_ptp_read(adapter, &now); 6252 6253 for (n = 0; n < qopt->num_entries; n++) { 6254 struct tc_taprio_sched_entry *e = &qopt->entries[n]; 6255 6256 end_time += e->interval; 6257 6258 /* If any of the conditions below are true, we need to manually 6259 * control the end time of the cycle. 6260 * 1. Qbv users can specify a cycle time that is not equal 6261 * to the total GCL intervals. Hence, recalculation is 6262 * necessary here to exclude the time interval that 6263 * exceeds the cycle time. 6264 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, 6265 * once the end of the list is reached, it will switch 6266 * to the END_OF_CYCLE state and leave the gates in the 6267 * same state until the next cycle is started. 6268 */ 6269 if (end_time > adapter->cycle_time || 6270 n + 1 == qopt->num_entries) 6271 end_time = adapter->cycle_time; 6272 6273 for (i = 0; i < adapter->num_tx_queues; i++) { 6274 struct igc_ring *ring = adapter->tx_ring[i]; 6275 6276 if (!(e->gate_mask & BIT(i))) 6277 continue; 6278 6279 /* Check whether a queue stays open for more than one 6280 * entry. If so, keep the start and advance the end 6281 * time. 6282 */ 6283 if (!queue_configured[i]) 6284 ring->start_time = start_time; 6285 ring->end_time = end_time; 6286 6287 if (ring->start_time >= adapter->cycle_time) 6288 queue_configured[i] = false; 6289 else 6290 queue_configured[i] = true; 6291 } 6292 6293 start_time += e->interval; 6294 } 6295 6296 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); 6297 6298 /* Check whether a queue gets configured. 6299 * If not, set the start and end time to be end time. 6300 */ 6301 for (i = 0; i < adapter->num_tx_queues; i++) { 6302 struct igc_ring *ring = adapter->tx_ring[i]; 6303 6304 if (!is_base_time_past(qopt->base_time, &now)) { 6305 ring->admin_gate_closed = false; 6306 } else { 6307 ring->oper_gate_closed = false; 6308 ring->admin_gate_closed = false; 6309 } 6310 6311 if (!queue_configured[i]) { 6312 if (!is_base_time_past(qopt->base_time, &now)) 6313 ring->admin_gate_closed = true; 6314 else 6315 ring->oper_gate_closed = true; 6316 6317 ring->start_time = end_time; 6318 ring->end_time = end_time; 6319 } 6320 } 6321 6322 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); 6323 6324 for (i = 0; i < adapter->num_tx_queues; i++) { 6325 struct igc_ring *ring = adapter->tx_ring[i]; 6326 struct net_device *dev = adapter->netdev; 6327 6328 if (qopt->max_sdu[i]) 6329 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; 6330 else 6331 ring->max_sdu = 0; 6332 } 6333 6334 return 0; 6335 } 6336 6337 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, 6338 struct tc_taprio_qopt_offload *qopt) 6339 { 6340 struct igc_hw *hw = &adapter->hw; 6341 int err; 6342 6343 if (hw->mac.type != igc_i225) 6344 return -EOPNOTSUPP; 6345 6346 err = igc_save_qbv_schedule(adapter, qopt); 6347 if (err) 6348 return err; 6349 6350 return igc_tsn_offload_apply(adapter); 6351 } 6352 6353 static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, 6354 bool enable, int idleslope, int sendslope, 6355 int hicredit, int locredit) 6356 { 6357 bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; 6358 struct net_device *netdev = adapter->netdev; 6359 struct igc_ring *ring; 6360 int i; 6361 6362 /* i225 has two sets of credit-based shaper logic. 6363 * Supporting it only on the top two priority queues 6364 */ 6365 if (queue < 0 || queue > 1) 6366 return -EINVAL; 6367 6368 ring = adapter->tx_ring[queue]; 6369 6370 for (i = 0; i < IGC_MAX_SR_QUEUES; i++) 6371 if (adapter->tx_ring[i]) 6372 cbs_status[i] = adapter->tx_ring[i]->cbs_enable; 6373 6374 /* CBS should be enabled on the highest priority queue first in order 6375 * for the CBS algorithm to operate as intended. 6376 */ 6377 if (enable) { 6378 if (queue == 1 && !cbs_status[0]) { 6379 netdev_err(netdev, 6380 "Enabling CBS on queue1 before queue0\n"); 6381 return -EINVAL; 6382 } 6383 } else { 6384 if (queue == 0 && cbs_status[1]) { 6385 netdev_err(netdev, 6386 "Disabling CBS on queue0 before queue1\n"); 6387 return -EINVAL; 6388 } 6389 } 6390 6391 ring->cbs_enable = enable; 6392 ring->idleslope = idleslope; 6393 ring->sendslope = sendslope; 6394 ring->hicredit = hicredit; 6395 ring->locredit = locredit; 6396 6397 return 0; 6398 } 6399 6400 static int igc_tsn_enable_cbs(struct igc_adapter *adapter, 6401 struct tc_cbs_qopt_offload *qopt) 6402 { 6403 struct igc_hw *hw = &adapter->hw; 6404 int err; 6405 6406 if (hw->mac.type != igc_i225) 6407 return -EOPNOTSUPP; 6408 6409 if (qopt->queue < 0 || qopt->queue > 1) 6410 return -EINVAL; 6411 6412 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, 6413 qopt->idleslope, qopt->sendslope, 6414 qopt->hicredit, qopt->locredit); 6415 if (err) 6416 return err; 6417 6418 return igc_tsn_offload_apply(adapter); 6419 } 6420 6421 static int igc_tc_query_caps(struct igc_adapter *adapter, 6422 struct tc_query_caps_base *base) 6423 { 6424 struct igc_hw *hw = &adapter->hw; 6425 6426 switch (base->type) { 6427 case TC_SETUP_QDISC_TAPRIO: { 6428 struct tc_taprio_caps *caps = base->caps; 6429 6430 caps->broken_mqprio = true; 6431 6432 if (hw->mac.type == igc_i225) { 6433 caps->supports_queue_max_sdu = true; 6434 caps->gate_mask_per_txq = true; 6435 } 6436 6437 return 0; 6438 } 6439 default: 6440 return -EOPNOTSUPP; 6441 } 6442 } 6443 6444 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, 6445 void *type_data) 6446 { 6447 struct igc_adapter *adapter = netdev_priv(dev); 6448 6449 adapter->tc_setup_type = type; 6450 6451 switch (type) { 6452 case TC_QUERY_CAPS: 6453 return igc_tc_query_caps(adapter, type_data); 6454 case TC_SETUP_QDISC_TAPRIO: 6455 return igc_tsn_enable_qbv_scheduling(adapter, type_data); 6456 6457 case TC_SETUP_QDISC_ETF: 6458 return igc_tsn_enable_launchtime(adapter, type_data); 6459 6460 case TC_SETUP_QDISC_CBS: 6461 return igc_tsn_enable_cbs(adapter, type_data); 6462 6463 default: 6464 return -EOPNOTSUPP; 6465 } 6466 } 6467 6468 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) 6469 { 6470 struct igc_adapter *adapter = netdev_priv(dev); 6471 6472 switch (bpf->command) { 6473 case XDP_SETUP_PROG: 6474 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); 6475 case XDP_SETUP_XSK_POOL: 6476 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, 6477 bpf->xsk.queue_id); 6478 default: 6479 return -EOPNOTSUPP; 6480 } 6481 } 6482 6483 static int igc_xdp_xmit(struct net_device *dev, int num_frames, 6484 struct xdp_frame **frames, u32 flags) 6485 { 6486 struct igc_adapter *adapter = netdev_priv(dev); 6487 int cpu = smp_processor_id(); 6488 struct netdev_queue *nq; 6489 struct igc_ring *ring; 6490 int i, drops; 6491 6492 if (unlikely(!netif_carrier_ok(dev))) 6493 return -ENETDOWN; 6494 6495 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6496 return -EINVAL; 6497 6498 ring = igc_xdp_get_tx_ring(adapter, cpu); 6499 nq = txring_txq(ring); 6500 6501 __netif_tx_lock(nq, cpu); 6502 6503 /* Avoid transmit queue timeout since we share it with the slow path */ 6504 txq_trans_cond_update(nq); 6505 6506 drops = 0; 6507 for (i = 0; i < num_frames; i++) { 6508 int err; 6509 struct xdp_frame *xdpf = frames[i]; 6510 6511 err = igc_xdp_init_tx_descriptor(ring, xdpf); 6512 if (err) { 6513 xdp_return_frame_rx_napi(xdpf); 6514 drops++; 6515 } 6516 } 6517 6518 if (flags & XDP_XMIT_FLUSH) 6519 igc_flush_tx_descriptors(ring); 6520 6521 __netif_tx_unlock(nq); 6522 6523 return num_frames - drops; 6524 } 6525 6526 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, 6527 struct igc_q_vector *q_vector) 6528 { 6529 struct igc_hw *hw = &adapter->hw; 6530 u32 eics = 0; 6531 6532 eics |= q_vector->eims_value; 6533 wr32(IGC_EICS, eics); 6534 } 6535 6536 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 6537 { 6538 struct igc_adapter *adapter = netdev_priv(dev); 6539 struct igc_q_vector *q_vector; 6540 struct igc_ring *ring; 6541 6542 if (test_bit(__IGC_DOWN, &adapter->state)) 6543 return -ENETDOWN; 6544 6545 if (!igc_xdp_is_enabled(adapter)) 6546 return -ENXIO; 6547 6548 if (queue_id >= adapter->num_rx_queues) 6549 return -EINVAL; 6550 6551 ring = adapter->rx_ring[queue_id]; 6552 6553 if (!ring->xsk_pool) 6554 return -ENXIO; 6555 6556 q_vector = adapter->q_vector[queue_id]; 6557 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) 6558 igc_trigger_rxtxq_interrupt(adapter, q_vector); 6559 6560 return 0; 6561 } 6562 6563 static ktime_t igc_get_tstamp(struct net_device *dev, 6564 const struct skb_shared_hwtstamps *hwtstamps, 6565 bool cycles) 6566 { 6567 struct igc_adapter *adapter = netdev_priv(dev); 6568 struct igc_inline_rx_tstamps *tstamp; 6569 ktime_t timestamp; 6570 6571 tstamp = hwtstamps->netdev_data; 6572 6573 if (cycles) 6574 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1); 6575 else 6576 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); 6577 6578 return timestamp; 6579 } 6580 6581 static const struct net_device_ops igc_netdev_ops = { 6582 .ndo_open = igc_open, 6583 .ndo_stop = igc_close, 6584 .ndo_start_xmit = igc_xmit_frame, 6585 .ndo_set_rx_mode = igc_set_rx_mode, 6586 .ndo_set_mac_address = igc_set_mac, 6587 .ndo_change_mtu = igc_change_mtu, 6588 .ndo_tx_timeout = igc_tx_timeout, 6589 .ndo_get_stats64 = igc_get_stats64, 6590 .ndo_fix_features = igc_fix_features, 6591 .ndo_set_features = igc_set_features, 6592 .ndo_features_check = igc_features_check, 6593 .ndo_eth_ioctl = igc_ioctl, 6594 .ndo_setup_tc = igc_setup_tc, 6595 .ndo_bpf = igc_bpf, 6596 .ndo_xdp_xmit = igc_xdp_xmit, 6597 .ndo_xsk_wakeup = igc_xsk_wakeup, 6598 .ndo_get_tstamp = igc_get_tstamp, 6599 }; 6600 6601 /* PCIe configuration access */ 6602 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6603 { 6604 struct igc_adapter *adapter = hw->back; 6605 6606 pci_read_config_word(adapter->pdev, reg, value); 6607 } 6608 6609 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6610 { 6611 struct igc_adapter *adapter = hw->back; 6612 6613 pci_write_config_word(adapter->pdev, reg, *value); 6614 } 6615 6616 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6617 { 6618 struct igc_adapter *adapter = hw->back; 6619 6620 if (!pci_is_pcie(adapter->pdev)) 6621 return -IGC_ERR_CONFIG; 6622 6623 pcie_capability_read_word(adapter->pdev, reg, value); 6624 6625 return IGC_SUCCESS; 6626 } 6627 6628 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6629 { 6630 struct igc_adapter *adapter = hw->back; 6631 6632 if (!pci_is_pcie(adapter->pdev)) 6633 return -IGC_ERR_CONFIG; 6634 6635 pcie_capability_write_word(adapter->pdev, reg, *value); 6636 6637 return IGC_SUCCESS; 6638 } 6639 6640 u32 igc_rd32(struct igc_hw *hw, u32 reg) 6641 { 6642 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 6643 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 6644 u32 value = 0; 6645 6646 if (IGC_REMOVED(hw_addr)) 6647 return ~value; 6648 6649 value = readl(&hw_addr[reg]); 6650 6651 /* reads should not return all F's */ 6652 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 6653 struct net_device *netdev = igc->netdev; 6654 6655 hw->hw_addr = NULL; 6656 netif_device_detach(netdev); 6657 netdev_err(netdev, "PCIe link lost, device now detached\n"); 6658 WARN(pci_device_is_present(igc->pdev), 6659 "igc: Failed to read reg 0x%x!\n", reg); 6660 } 6661 6662 return value; 6663 } 6664 6665 /* Mapping HW RSS Type to enum xdp_rss_hash_type */ 6666 static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { 6667 [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2, 6668 [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP, 6669 [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4, 6670 [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP, 6671 [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, 6672 [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6, 6673 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, 6674 [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP, 6675 [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP, 6676 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, 6677 [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ 6678 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */ 6679 [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */ 6680 [13] = XDP_RSS_TYPE_NONE, 6681 [14] = XDP_RSS_TYPE_NONE, 6682 [15] = XDP_RSS_TYPE_NONE, 6683 }; 6684 6685 static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, 6686 enum xdp_rss_hash_type *rss_type) 6687 { 6688 const struct igc_xdp_buff *ctx = (void *)_ctx; 6689 6690 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) 6691 return -ENODATA; 6692 6693 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); 6694 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; 6695 6696 return 0; 6697 } 6698 6699 static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) 6700 { 6701 const struct igc_xdp_buff *ctx = (void *)_ctx; 6702 struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev); 6703 struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; 6704 6705 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { 6706 *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); 6707 6708 return 0; 6709 } 6710 6711 return -ENODATA; 6712 } 6713 6714 static const struct xdp_metadata_ops igc_xdp_metadata_ops = { 6715 .xmo_rx_hash = igc_xdp_rx_hash, 6716 .xmo_rx_timestamp = igc_xdp_rx_timestamp, 6717 }; 6718 6719 static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) 6720 { 6721 struct igc_adapter *adapter = container_of(timer, struct igc_adapter, 6722 hrtimer); 6723 unsigned long flags; 6724 unsigned int i; 6725 6726 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); 6727 6728 adapter->qbv_transition = true; 6729 for (i = 0; i < adapter->num_tx_queues; i++) { 6730 struct igc_ring *tx_ring = adapter->tx_ring[i]; 6731 6732 if (tx_ring->admin_gate_closed) { 6733 tx_ring->admin_gate_closed = false; 6734 tx_ring->oper_gate_closed = true; 6735 } else { 6736 tx_ring->oper_gate_closed = false; 6737 } 6738 } 6739 adapter->qbv_transition = false; 6740 6741 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); 6742 6743 return HRTIMER_NORESTART; 6744 } 6745 6746 /** 6747 * igc_probe - Device Initialization Routine 6748 * @pdev: PCI device information struct 6749 * @ent: entry in igc_pci_tbl 6750 * 6751 * Returns 0 on success, negative on failure 6752 * 6753 * igc_probe initializes an adapter identified by a pci_dev structure. 6754 * The OS initialization, configuring the adapter private structure, 6755 * and a hardware reset occur. 6756 */ 6757 static int igc_probe(struct pci_dev *pdev, 6758 const struct pci_device_id *ent) 6759 { 6760 struct igc_adapter *adapter; 6761 struct net_device *netdev; 6762 struct igc_hw *hw; 6763 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 6764 int err; 6765 6766 err = pci_enable_device_mem(pdev); 6767 if (err) 6768 return err; 6769 6770 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 6771 if (err) { 6772 dev_err(&pdev->dev, 6773 "No usable DMA configuration, aborting\n"); 6774 goto err_dma; 6775 } 6776 6777 err = pci_request_mem_regions(pdev, igc_driver_name); 6778 if (err) 6779 goto err_pci_reg; 6780 6781 err = pci_enable_ptm(pdev, NULL); 6782 if (err < 0) 6783 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); 6784 6785 pci_set_master(pdev); 6786 6787 err = -ENOMEM; 6788 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 6789 IGC_MAX_TX_QUEUES); 6790 6791 if (!netdev) 6792 goto err_alloc_etherdev; 6793 6794 SET_NETDEV_DEV(netdev, &pdev->dev); 6795 6796 pci_set_drvdata(pdev, netdev); 6797 adapter = netdev_priv(netdev); 6798 adapter->netdev = netdev; 6799 adapter->pdev = pdev; 6800 hw = &adapter->hw; 6801 hw->back = adapter; 6802 adapter->port_num = hw->bus.func; 6803 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 6804 6805 err = pci_save_state(pdev); 6806 if (err) 6807 goto err_ioremap; 6808 6809 err = -EIO; 6810 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 6811 pci_resource_len(pdev, 0)); 6812 if (!adapter->io_addr) 6813 goto err_ioremap; 6814 6815 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 6816 hw->hw_addr = adapter->io_addr; 6817 6818 netdev->netdev_ops = &igc_netdev_ops; 6819 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; 6820 igc_ethtool_set_ops(netdev); 6821 netdev->watchdog_timeo = 5 * HZ; 6822 6823 netdev->mem_start = pci_resource_start(pdev, 0); 6824 netdev->mem_end = pci_resource_end(pdev, 0); 6825 6826 /* PCI config space info */ 6827 hw->vendor_id = pdev->vendor; 6828 hw->device_id = pdev->device; 6829 hw->revision_id = pdev->revision; 6830 hw->subsystem_vendor_id = pdev->subsystem_vendor; 6831 hw->subsystem_device_id = pdev->subsystem_device; 6832 6833 /* Copy the default MAC and PHY function pointers */ 6834 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 6835 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 6836 6837 /* Initialize skew-specific constants */ 6838 err = ei->get_invariants(hw); 6839 if (err) 6840 goto err_sw_init; 6841 6842 /* Add supported features to the features list*/ 6843 netdev->features |= NETIF_F_SG; 6844 netdev->features |= NETIF_F_TSO; 6845 netdev->features |= NETIF_F_TSO6; 6846 netdev->features |= NETIF_F_TSO_ECN; 6847 netdev->features |= NETIF_F_RXHASH; 6848 netdev->features |= NETIF_F_RXCSUM; 6849 netdev->features |= NETIF_F_HW_CSUM; 6850 netdev->features |= NETIF_F_SCTP_CRC; 6851 netdev->features |= NETIF_F_HW_TC; 6852 6853 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 6854 NETIF_F_GSO_GRE_CSUM | \ 6855 NETIF_F_GSO_IPXIP4 | \ 6856 NETIF_F_GSO_IPXIP6 | \ 6857 NETIF_F_GSO_UDP_TUNNEL | \ 6858 NETIF_F_GSO_UDP_TUNNEL_CSUM) 6859 6860 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; 6861 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; 6862 6863 /* setup the private structure */ 6864 err = igc_sw_init(adapter); 6865 if (err) 6866 goto err_sw_init; 6867 6868 /* copy netdev features into list of user selectable features */ 6869 netdev->hw_features |= NETIF_F_NTUPLE; 6870 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 6871 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 6872 netdev->hw_features |= netdev->features; 6873 6874 netdev->features |= NETIF_F_HIGHDMA; 6875 6876 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 6877 netdev->mpls_features |= NETIF_F_HW_CSUM; 6878 netdev->hw_enc_features |= netdev->vlan_features; 6879 6880 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 6881 NETDEV_XDP_ACT_XSK_ZEROCOPY; 6882 6883 /* MTU range: 68 - 9216 */ 6884 netdev->min_mtu = ETH_MIN_MTU; 6885 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 6886 6887 /* before reading the NVM, reset the controller to put the device in a 6888 * known good starting state 6889 */ 6890 hw->mac.ops.reset_hw(hw); 6891 6892 if (igc_get_flash_presence_i225(hw)) { 6893 if (hw->nvm.ops.validate(hw) < 0) { 6894 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 6895 err = -EIO; 6896 goto err_eeprom; 6897 } 6898 } 6899 6900 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 6901 /* copy the MAC address out of the NVM */ 6902 if (hw->mac.ops.read_mac_addr(hw)) 6903 dev_err(&pdev->dev, "NVM Read Error\n"); 6904 } 6905 6906 eth_hw_addr_set(netdev, hw->mac.addr); 6907 6908 if (!is_valid_ether_addr(netdev->dev_addr)) { 6909 dev_err(&pdev->dev, "Invalid MAC Address\n"); 6910 err = -EIO; 6911 goto err_eeprom; 6912 } 6913 6914 /* configure RXPBSIZE and TXPBSIZE */ 6915 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 6916 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 6917 6918 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 6919 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 6920 6921 INIT_WORK(&adapter->reset_task, igc_reset_task); 6922 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 6923 6924 hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6925 adapter->hrtimer.function = &igc_qbv_scheduling_timer; 6926 6927 /* Initialize link properties that are user-changeable */ 6928 adapter->fc_autoneg = true; 6929 hw->mac.autoneg = true; 6930 hw->phy.autoneg_advertised = 0xaf; 6931 6932 hw->fc.requested_mode = igc_fc_default; 6933 hw->fc.current_mode = igc_fc_default; 6934 6935 /* By default, support wake on port A */ 6936 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; 6937 6938 /* initialize the wol settings based on the eeprom settings */ 6939 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) 6940 adapter->wol |= IGC_WUFC_MAG; 6941 6942 device_set_wakeup_enable(&adapter->pdev->dev, 6943 adapter->flags & IGC_FLAG_WOL_SUPPORTED); 6944 6945 igc_ptp_init(adapter); 6946 6947 igc_tsn_clear_schedule(adapter); 6948 6949 /* reset the hardware with the new settings */ 6950 igc_reset(adapter); 6951 6952 /* let the f/w know that the h/w is now under the control of the 6953 * driver. 6954 */ 6955 igc_get_hw_control(adapter); 6956 6957 strscpy(netdev->name, "eth%d", sizeof(netdev->name)); 6958 err = register_netdev(netdev); 6959 if (err) 6960 goto err_register; 6961 6962 /* carrier off reporting is important to ethtool even BEFORE open */ 6963 netif_carrier_off(netdev); 6964 6965 /* Check if Media Autosense is enabled */ 6966 adapter->ei = *ei; 6967 6968 /* print pcie link status and MAC address */ 6969 pcie_print_link_status(pdev); 6970 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 6971 6972 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 6973 /* Disable EEE for internal PHY devices */ 6974 hw->dev_spec._base.eee_enable = false; 6975 adapter->flags &= ~IGC_FLAG_EEE; 6976 igc_set_eee_i225(hw, false, false, false); 6977 6978 pm_runtime_put_noidle(&pdev->dev); 6979 6980 return 0; 6981 6982 err_register: 6983 igc_release_hw_control(adapter); 6984 err_eeprom: 6985 if (!igc_check_reset_block(hw)) 6986 igc_reset_phy(hw); 6987 err_sw_init: 6988 igc_clear_interrupt_scheme(adapter); 6989 iounmap(adapter->io_addr); 6990 err_ioremap: 6991 free_netdev(netdev); 6992 err_alloc_etherdev: 6993 pci_release_mem_regions(pdev); 6994 err_pci_reg: 6995 err_dma: 6996 pci_disable_device(pdev); 6997 return err; 6998 } 6999 7000 /** 7001 * igc_remove - Device Removal Routine 7002 * @pdev: PCI device information struct 7003 * 7004 * igc_remove is called by the PCI subsystem to alert the driver 7005 * that it should release a PCI device. This could be caused by a 7006 * Hot-Plug event, or because the driver is going to be removed from 7007 * memory. 7008 */ 7009 static void igc_remove(struct pci_dev *pdev) 7010 { 7011 struct net_device *netdev = pci_get_drvdata(pdev); 7012 struct igc_adapter *adapter = netdev_priv(netdev); 7013 7014 pm_runtime_get_noresume(&pdev->dev); 7015 7016 igc_flush_nfc_rules(adapter); 7017 7018 igc_ptp_stop(adapter); 7019 7020 pci_disable_ptm(pdev); 7021 pci_clear_master(pdev); 7022 7023 set_bit(__IGC_DOWN, &adapter->state); 7024 7025 del_timer_sync(&adapter->watchdog_timer); 7026 del_timer_sync(&adapter->phy_info_timer); 7027 7028 cancel_work_sync(&adapter->reset_task); 7029 cancel_work_sync(&adapter->watchdog_task); 7030 hrtimer_cancel(&adapter->hrtimer); 7031 7032 /* Release control of h/w to f/w. If f/w is AMT enabled, this 7033 * would have already happened in close and is redundant. 7034 */ 7035 igc_release_hw_control(adapter); 7036 unregister_netdev(netdev); 7037 7038 igc_clear_interrupt_scheme(adapter); 7039 pci_iounmap(pdev, adapter->io_addr); 7040 pci_release_mem_regions(pdev); 7041 7042 free_netdev(netdev); 7043 7044 pci_disable_device(pdev); 7045 } 7046 7047 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, 7048 bool runtime) 7049 { 7050 struct net_device *netdev = pci_get_drvdata(pdev); 7051 struct igc_adapter *adapter = netdev_priv(netdev); 7052 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; 7053 struct igc_hw *hw = &adapter->hw; 7054 u32 ctrl, rctl, status; 7055 bool wake; 7056 7057 rtnl_lock(); 7058 netif_device_detach(netdev); 7059 7060 if (netif_running(netdev)) 7061 __igc_close(netdev, true); 7062 7063 igc_ptp_suspend(adapter); 7064 7065 igc_clear_interrupt_scheme(adapter); 7066 rtnl_unlock(); 7067 7068 status = rd32(IGC_STATUS); 7069 if (status & IGC_STATUS_LU) 7070 wufc &= ~IGC_WUFC_LNKC; 7071 7072 if (wufc) { 7073 igc_setup_rctl(adapter); 7074 igc_set_rx_mode(netdev); 7075 7076 /* turn on all-multi mode if wake on multicast is enabled */ 7077 if (wufc & IGC_WUFC_MC) { 7078 rctl = rd32(IGC_RCTL); 7079 rctl |= IGC_RCTL_MPE; 7080 wr32(IGC_RCTL, rctl); 7081 } 7082 7083 ctrl = rd32(IGC_CTRL); 7084 ctrl |= IGC_CTRL_ADVD3WUC; 7085 wr32(IGC_CTRL, ctrl); 7086 7087 /* Allow time for pending master requests to run */ 7088 igc_disable_pcie_master(hw); 7089 7090 wr32(IGC_WUC, IGC_WUC_PME_EN); 7091 wr32(IGC_WUFC, wufc); 7092 } else { 7093 wr32(IGC_WUC, 0); 7094 wr32(IGC_WUFC, 0); 7095 } 7096 7097 wake = wufc || adapter->en_mng_pt; 7098 if (!wake) 7099 igc_power_down_phy_copper_base(&adapter->hw); 7100 else 7101 igc_power_up_link(adapter); 7102 7103 if (enable_wake) 7104 *enable_wake = wake; 7105 7106 /* Release control of h/w to f/w. If f/w is AMT enabled, this 7107 * would have already happened in close and is redundant. 7108 */ 7109 igc_release_hw_control(adapter); 7110 7111 pci_disable_device(pdev); 7112 7113 return 0; 7114 } 7115 7116 #ifdef CONFIG_PM 7117 static int __maybe_unused igc_runtime_suspend(struct device *dev) 7118 { 7119 return __igc_shutdown(to_pci_dev(dev), NULL, 1); 7120 } 7121 7122 static void igc_deliver_wake_packet(struct net_device *netdev) 7123 { 7124 struct igc_adapter *adapter = netdev_priv(netdev); 7125 struct igc_hw *hw = &adapter->hw; 7126 struct sk_buff *skb; 7127 u32 wupl; 7128 7129 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; 7130 7131 /* WUPM stores only the first 128 bytes of the wake packet. 7132 * Read the packet only if we have the whole thing. 7133 */ 7134 if (wupl == 0 || wupl > IGC_WUPM_BYTES) 7135 return; 7136 7137 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); 7138 if (!skb) 7139 return; 7140 7141 skb_put(skb, wupl); 7142 7143 /* Ensure reads are 32-bit aligned */ 7144 wupl = roundup(wupl, 4); 7145 7146 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); 7147 7148 skb->protocol = eth_type_trans(skb, netdev); 7149 netif_rx(skb); 7150 } 7151 7152 static int __maybe_unused igc_resume(struct device *dev) 7153 { 7154 struct pci_dev *pdev = to_pci_dev(dev); 7155 struct net_device *netdev = pci_get_drvdata(pdev); 7156 struct igc_adapter *adapter = netdev_priv(netdev); 7157 struct igc_hw *hw = &adapter->hw; 7158 u32 err, val; 7159 7160 pci_set_power_state(pdev, PCI_D0); 7161 pci_restore_state(pdev); 7162 pci_save_state(pdev); 7163 7164 if (!pci_device_is_present(pdev)) 7165 return -ENODEV; 7166 err = pci_enable_device_mem(pdev); 7167 if (err) { 7168 netdev_err(netdev, "Cannot enable PCI device from suspend\n"); 7169 return err; 7170 } 7171 pci_set_master(pdev); 7172 7173 pci_enable_wake(pdev, PCI_D3hot, 0); 7174 pci_enable_wake(pdev, PCI_D3cold, 0); 7175 7176 if (igc_init_interrupt_scheme(adapter, true)) { 7177 netdev_err(netdev, "Unable to allocate memory for queues\n"); 7178 return -ENOMEM; 7179 } 7180 7181 igc_reset(adapter); 7182 7183 /* let the f/w know that the h/w is now under the control of the 7184 * driver. 7185 */ 7186 igc_get_hw_control(adapter); 7187 7188 val = rd32(IGC_WUS); 7189 if (val & WAKE_PKT_WUS) 7190 igc_deliver_wake_packet(netdev); 7191 7192 wr32(IGC_WUS, ~0); 7193 7194 rtnl_lock(); 7195 if (!err && netif_running(netdev)) 7196 err = __igc_open(netdev, true); 7197 7198 if (!err) 7199 netif_device_attach(netdev); 7200 rtnl_unlock(); 7201 7202 return err; 7203 } 7204 7205 static int __maybe_unused igc_runtime_resume(struct device *dev) 7206 { 7207 return igc_resume(dev); 7208 } 7209 7210 static int __maybe_unused igc_suspend(struct device *dev) 7211 { 7212 return __igc_shutdown(to_pci_dev(dev), NULL, 0); 7213 } 7214 7215 static int __maybe_unused igc_runtime_idle(struct device *dev) 7216 { 7217 struct net_device *netdev = dev_get_drvdata(dev); 7218 struct igc_adapter *adapter = netdev_priv(netdev); 7219 7220 if (!igc_has_link(adapter)) 7221 pm_schedule_suspend(dev, MSEC_PER_SEC * 5); 7222 7223 return -EBUSY; 7224 } 7225 #endif /* CONFIG_PM */ 7226 7227 static void igc_shutdown(struct pci_dev *pdev) 7228 { 7229 bool wake; 7230 7231 __igc_shutdown(pdev, &wake, 0); 7232 7233 if (system_state == SYSTEM_POWER_OFF) { 7234 pci_wake_from_d3(pdev, wake); 7235 pci_set_power_state(pdev, PCI_D3hot); 7236 } 7237 } 7238 7239 /** 7240 * igc_io_error_detected - called when PCI error is detected 7241 * @pdev: Pointer to PCI device 7242 * @state: The current PCI connection state 7243 * 7244 * This function is called after a PCI bus error affecting 7245 * this device has been detected. 7246 **/ 7247 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, 7248 pci_channel_state_t state) 7249 { 7250 struct net_device *netdev = pci_get_drvdata(pdev); 7251 struct igc_adapter *adapter = netdev_priv(netdev); 7252 7253 netif_device_detach(netdev); 7254 7255 if (state == pci_channel_io_perm_failure) 7256 return PCI_ERS_RESULT_DISCONNECT; 7257 7258 if (netif_running(netdev)) 7259 igc_down(adapter); 7260 pci_disable_device(pdev); 7261 7262 /* Request a slot reset. */ 7263 return PCI_ERS_RESULT_NEED_RESET; 7264 } 7265 7266 /** 7267 * igc_io_slot_reset - called after the PCI bus has been reset. 7268 * @pdev: Pointer to PCI device 7269 * 7270 * Restart the card from scratch, as if from a cold-boot. Implementation 7271 * resembles the first-half of the igc_resume routine. 7272 **/ 7273 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) 7274 { 7275 struct net_device *netdev = pci_get_drvdata(pdev); 7276 struct igc_adapter *adapter = netdev_priv(netdev); 7277 struct igc_hw *hw = &adapter->hw; 7278 pci_ers_result_t result; 7279 7280 if (pci_enable_device_mem(pdev)) { 7281 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); 7282 result = PCI_ERS_RESULT_DISCONNECT; 7283 } else { 7284 pci_set_master(pdev); 7285 pci_restore_state(pdev); 7286 pci_save_state(pdev); 7287 7288 pci_enable_wake(pdev, PCI_D3hot, 0); 7289 pci_enable_wake(pdev, PCI_D3cold, 0); 7290 7291 /* In case of PCI error, adapter loses its HW address 7292 * so we should re-assign it here. 7293 */ 7294 hw->hw_addr = adapter->io_addr; 7295 7296 igc_reset(adapter); 7297 wr32(IGC_WUS, ~0); 7298 result = PCI_ERS_RESULT_RECOVERED; 7299 } 7300 7301 return result; 7302 } 7303 7304 /** 7305 * igc_io_resume - called when traffic can start to flow again. 7306 * @pdev: Pointer to PCI device 7307 * 7308 * This callback is called when the error recovery driver tells us that 7309 * its OK to resume normal operation. Implementation resembles the 7310 * second-half of the igc_resume routine. 7311 */ 7312 static void igc_io_resume(struct pci_dev *pdev) 7313 { 7314 struct net_device *netdev = pci_get_drvdata(pdev); 7315 struct igc_adapter *adapter = netdev_priv(netdev); 7316 7317 rtnl_lock(); 7318 if (netif_running(netdev)) { 7319 if (igc_open(netdev)) { 7320 netdev_err(netdev, "igc_open failed after reset\n"); 7321 return; 7322 } 7323 } 7324 7325 netif_device_attach(netdev); 7326 7327 /* let the f/w know that the h/w is now under the control of the 7328 * driver. 7329 */ 7330 igc_get_hw_control(adapter); 7331 rtnl_unlock(); 7332 } 7333 7334 static const struct pci_error_handlers igc_err_handler = { 7335 .error_detected = igc_io_error_detected, 7336 .slot_reset = igc_io_slot_reset, 7337 .resume = igc_io_resume, 7338 }; 7339 7340 #ifdef CONFIG_PM 7341 static const struct dev_pm_ops igc_pm_ops = { 7342 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) 7343 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, 7344 igc_runtime_idle) 7345 }; 7346 #endif 7347 7348 static struct pci_driver igc_driver = { 7349 .name = igc_driver_name, 7350 .id_table = igc_pci_tbl, 7351 .probe = igc_probe, 7352 .remove = igc_remove, 7353 #ifdef CONFIG_PM 7354 .driver.pm = &igc_pm_ops, 7355 #endif 7356 .shutdown = igc_shutdown, 7357 .err_handler = &igc_err_handler, 7358 }; 7359 7360 /** 7361 * igc_reinit_queues - return error 7362 * @adapter: pointer to adapter structure 7363 */ 7364 int igc_reinit_queues(struct igc_adapter *adapter) 7365 { 7366 struct net_device *netdev = adapter->netdev; 7367 int err = 0; 7368 7369 if (netif_running(netdev)) 7370 igc_close(netdev); 7371 7372 igc_reset_interrupt_capability(adapter); 7373 7374 if (igc_init_interrupt_scheme(adapter, true)) { 7375 netdev_err(netdev, "Unable to allocate memory for queues\n"); 7376 return -ENOMEM; 7377 } 7378 7379 if (netif_running(netdev)) 7380 err = igc_open(netdev); 7381 7382 return err; 7383 } 7384 7385 /** 7386 * igc_get_hw_dev - return device 7387 * @hw: pointer to hardware structure 7388 * 7389 * used by hardware layer to print debugging information 7390 */ 7391 struct net_device *igc_get_hw_dev(struct igc_hw *hw) 7392 { 7393 struct igc_adapter *adapter = hw->back; 7394 7395 return adapter->netdev; 7396 } 7397 7398 static void igc_disable_rx_ring_hw(struct igc_ring *ring) 7399 { 7400 struct igc_hw *hw = &ring->q_vector->adapter->hw; 7401 u8 idx = ring->reg_idx; 7402 u32 rxdctl; 7403 7404 rxdctl = rd32(IGC_RXDCTL(idx)); 7405 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; 7406 rxdctl |= IGC_RXDCTL_SWFLUSH; 7407 wr32(IGC_RXDCTL(idx), rxdctl); 7408 } 7409 7410 void igc_disable_rx_ring(struct igc_ring *ring) 7411 { 7412 igc_disable_rx_ring_hw(ring); 7413 igc_clean_rx_ring(ring); 7414 } 7415 7416 void igc_enable_rx_ring(struct igc_ring *ring) 7417 { 7418 struct igc_adapter *adapter = ring->q_vector->adapter; 7419 7420 igc_configure_rx_ring(adapter, ring); 7421 7422 if (ring->xsk_pool) 7423 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 7424 else 7425 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 7426 } 7427 7428 void igc_disable_tx_ring(struct igc_ring *ring) 7429 { 7430 igc_disable_tx_ring_hw(ring); 7431 igc_clean_tx_ring(ring); 7432 } 7433 7434 void igc_enable_tx_ring(struct igc_ring *ring) 7435 { 7436 struct igc_adapter *adapter = ring->q_vector->adapter; 7437 7438 igc_configure_tx_ring(adapter, ring); 7439 } 7440 7441 /** 7442 * igc_init_module - Driver Registration Routine 7443 * 7444 * igc_init_module is the first routine called when the driver is 7445 * loaded. All it does is register with the PCI subsystem. 7446 */ 7447 static int __init igc_init_module(void) 7448 { 7449 int ret; 7450 7451 pr_info("%s\n", igc_driver_string); 7452 pr_info("%s\n", igc_copyright); 7453 7454 ret = pci_register_driver(&igc_driver); 7455 return ret; 7456 } 7457 7458 module_init(igc_init_module); 7459 7460 /** 7461 * igc_exit_module - Driver Exit Cleanup Routine 7462 * 7463 * igc_exit_module is called just before the driver is removed 7464 * from memory. 7465 */ 7466 static void __exit igc_exit_module(void) 7467 { 7468 pci_unregister_driver(&igc_driver); 7469 } 7470 7471 module_exit(igc_exit_module); 7472 /* igc_main.c */ 7473