1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/if_vlan.h> 7 #include <linux/aer.h> 8 #include <linux/tcp.h> 9 #include <linux/udp.h> 10 #include <linux/ip.h> 11 #include <linux/pm_runtime.h> 12 #include <net/pkt_sched.h> 13 #include <linux/bpf_trace.h> 14 #include <net/xdp_sock_drv.h> 15 #include <net/ipv6.h> 16 17 #include "igc.h" 18 #include "igc_hw.h" 19 #include "igc_tsn.h" 20 #include "igc_xdp.h" 21 22 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 23 24 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 25 26 #define IGC_XDP_PASS 0 27 #define IGC_XDP_CONSUMED BIT(0) 28 #define IGC_XDP_TX BIT(1) 29 #define IGC_XDP_REDIRECT BIT(2) 30 31 static int debug = -1; 32 33 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 34 MODULE_DESCRIPTION(DRV_SUMMARY); 35 MODULE_LICENSE("GPL v2"); 36 module_param(debug, int, 0); 37 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 38 39 char igc_driver_name[] = "igc"; 40 static const char igc_driver_string[] = DRV_SUMMARY; 41 static const char igc_copyright[] = 42 "Copyright(c) 2018 Intel Corporation."; 43 44 static const struct igc_info *igc_info_tbl[] = { 45 [board_base] = &igc_base_info, 46 }; 47 48 static const struct pci_device_id igc_pci_tbl[] = { 49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, 52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, 53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, 54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, 55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, 56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, 57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, 58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, 59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, 60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, 61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, 62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, 63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, 64 /* required last entry */ 65 {0, } 66 }; 67 68 MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 69 70 enum latency_range { 71 lowest_latency = 0, 72 low_latency = 1, 73 bulk_latency = 2, 74 latency_invalid = 255 75 }; 76 77 void igc_reset(struct igc_adapter *adapter) 78 { 79 struct net_device *dev = adapter->netdev; 80 struct igc_hw *hw = &adapter->hw; 81 struct igc_fc_info *fc = &hw->fc; 82 u32 pba, hwm; 83 84 /* Repartition PBA for greater than 9k MTU if required */ 85 pba = IGC_PBA_34K; 86 87 /* flow control settings 88 * The high water mark must be low enough to fit one full frame 89 * after transmitting the pause frame. As such we must have enough 90 * space to allow for us to complete our current transmit and then 91 * receive the frame that is in progress from the link partner. 92 * Set it to: 93 * - the full Rx FIFO size minus one full Tx plus one full Rx frame 94 */ 95 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); 96 97 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ 98 fc->low_water = fc->high_water - 16; 99 fc->pause_time = 0xFFFF; 100 fc->send_xon = 1; 101 fc->current_mode = fc->requested_mode; 102 103 hw->mac.ops.reset_hw(hw); 104 105 if (hw->mac.ops.init_hw(hw)) 106 netdev_err(dev, "Error on hardware initialization\n"); 107 108 /* Re-establish EEE setting */ 109 igc_set_eee_i225(hw, true, true, true); 110 111 if (!netif_running(adapter->netdev)) 112 igc_power_down_phy_copper_base(&adapter->hw); 113 114 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ 115 wr32(IGC_VET, ETH_P_8021Q); 116 117 /* Re-enable PTP, where applicable. */ 118 igc_ptp_reset(adapter); 119 120 /* Re-enable TSN offloading, where applicable. */ 121 igc_tsn_offload_apply(adapter); 122 123 igc_get_phy_info(hw); 124 } 125 126 /** 127 * igc_power_up_link - Power up the phy link 128 * @adapter: address of board private structure 129 */ 130 static void igc_power_up_link(struct igc_adapter *adapter) 131 { 132 igc_reset_phy(&adapter->hw); 133 134 igc_power_up_phy_copper(&adapter->hw); 135 136 igc_setup_link(&adapter->hw); 137 } 138 139 /** 140 * igc_release_hw_control - release control of the h/w to f/w 141 * @adapter: address of board private structure 142 * 143 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 144 * For ASF and Pass Through versions of f/w this means that the 145 * driver is no longer loaded. 146 */ 147 static void igc_release_hw_control(struct igc_adapter *adapter) 148 { 149 struct igc_hw *hw = &adapter->hw; 150 u32 ctrl_ext; 151 152 /* Let firmware take over control of h/w */ 153 ctrl_ext = rd32(IGC_CTRL_EXT); 154 wr32(IGC_CTRL_EXT, 155 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 156 } 157 158 /** 159 * igc_get_hw_control - get control of the h/w from f/w 160 * @adapter: address of board private structure 161 * 162 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 163 * For ASF and Pass Through versions of f/w this means that 164 * the driver is loaded. 165 */ 166 static void igc_get_hw_control(struct igc_adapter *adapter) 167 { 168 struct igc_hw *hw = &adapter->hw; 169 u32 ctrl_ext; 170 171 /* Let firmware know the driver has taken over */ 172 ctrl_ext = rd32(IGC_CTRL_EXT); 173 wr32(IGC_CTRL_EXT, 174 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 175 } 176 177 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) 178 { 179 dma_unmap_single(dev, dma_unmap_addr(buf, dma), 180 dma_unmap_len(buf, len), DMA_TO_DEVICE); 181 182 dma_unmap_len_set(buf, len, 0); 183 } 184 185 /** 186 * igc_clean_tx_ring - Free Tx Buffers 187 * @tx_ring: ring to be cleaned 188 */ 189 static void igc_clean_tx_ring(struct igc_ring *tx_ring) 190 { 191 u16 i = tx_ring->next_to_clean; 192 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 193 u32 xsk_frames = 0; 194 195 while (i != tx_ring->next_to_use) { 196 union igc_adv_tx_desc *eop_desc, *tx_desc; 197 198 switch (tx_buffer->type) { 199 case IGC_TX_BUFFER_TYPE_XSK: 200 xsk_frames++; 201 break; 202 case IGC_TX_BUFFER_TYPE_XDP: 203 xdp_return_frame(tx_buffer->xdpf); 204 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 205 break; 206 case IGC_TX_BUFFER_TYPE_SKB: 207 dev_kfree_skb_any(tx_buffer->skb); 208 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 209 break; 210 default: 211 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 212 break; 213 } 214 215 /* check for eop_desc to determine the end of the packet */ 216 eop_desc = tx_buffer->next_to_watch; 217 tx_desc = IGC_TX_DESC(tx_ring, i); 218 219 /* unmap remaining buffers */ 220 while (tx_desc != eop_desc) { 221 tx_buffer++; 222 tx_desc++; 223 i++; 224 if (unlikely(i == tx_ring->count)) { 225 i = 0; 226 tx_buffer = tx_ring->tx_buffer_info; 227 tx_desc = IGC_TX_DESC(tx_ring, 0); 228 } 229 230 /* unmap any remaining paged data */ 231 if (dma_unmap_len(tx_buffer, len)) 232 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 233 } 234 235 tx_buffer->next_to_watch = NULL; 236 237 /* move us one more past the eop_desc for start of next pkt */ 238 tx_buffer++; 239 i++; 240 if (unlikely(i == tx_ring->count)) { 241 i = 0; 242 tx_buffer = tx_ring->tx_buffer_info; 243 } 244 } 245 246 if (tx_ring->xsk_pool && xsk_frames) 247 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 248 249 /* reset BQL for queue */ 250 netdev_tx_reset_queue(txring_txq(tx_ring)); 251 252 /* reset next_to_use and next_to_clean */ 253 tx_ring->next_to_use = 0; 254 tx_ring->next_to_clean = 0; 255 } 256 257 /** 258 * igc_free_tx_resources - Free Tx Resources per Queue 259 * @tx_ring: Tx descriptor ring for a specific queue 260 * 261 * Free all transmit software resources 262 */ 263 void igc_free_tx_resources(struct igc_ring *tx_ring) 264 { 265 igc_clean_tx_ring(tx_ring); 266 267 vfree(tx_ring->tx_buffer_info); 268 tx_ring->tx_buffer_info = NULL; 269 270 /* if not set, then don't free */ 271 if (!tx_ring->desc) 272 return; 273 274 dma_free_coherent(tx_ring->dev, tx_ring->size, 275 tx_ring->desc, tx_ring->dma); 276 277 tx_ring->desc = NULL; 278 } 279 280 /** 281 * igc_free_all_tx_resources - Free Tx Resources for All Queues 282 * @adapter: board private structure 283 * 284 * Free all transmit software resources 285 */ 286 static void igc_free_all_tx_resources(struct igc_adapter *adapter) 287 { 288 int i; 289 290 for (i = 0; i < adapter->num_tx_queues; i++) 291 igc_free_tx_resources(adapter->tx_ring[i]); 292 } 293 294 /** 295 * igc_clean_all_tx_rings - Free Tx Buffers for all queues 296 * @adapter: board private structure 297 */ 298 static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 299 { 300 int i; 301 302 for (i = 0; i < adapter->num_tx_queues; i++) 303 if (adapter->tx_ring[i]) 304 igc_clean_tx_ring(adapter->tx_ring[i]); 305 } 306 307 /** 308 * igc_setup_tx_resources - allocate Tx resources (Descriptors) 309 * @tx_ring: tx descriptor ring (for a specific queue) to setup 310 * 311 * Return 0 on success, negative on failure 312 */ 313 int igc_setup_tx_resources(struct igc_ring *tx_ring) 314 { 315 struct net_device *ndev = tx_ring->netdev; 316 struct device *dev = tx_ring->dev; 317 int size = 0; 318 319 size = sizeof(struct igc_tx_buffer) * tx_ring->count; 320 tx_ring->tx_buffer_info = vzalloc(size); 321 if (!tx_ring->tx_buffer_info) 322 goto err; 323 324 /* round up to nearest 4K */ 325 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 326 tx_ring->size = ALIGN(tx_ring->size, 4096); 327 328 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 329 &tx_ring->dma, GFP_KERNEL); 330 331 if (!tx_ring->desc) 332 goto err; 333 334 tx_ring->next_to_use = 0; 335 tx_ring->next_to_clean = 0; 336 337 return 0; 338 339 err: 340 vfree(tx_ring->tx_buffer_info); 341 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); 342 return -ENOMEM; 343 } 344 345 /** 346 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 347 * @adapter: board private structure 348 * 349 * Return 0 on success, negative on failure 350 */ 351 static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 352 { 353 struct net_device *dev = adapter->netdev; 354 int i, err = 0; 355 356 for (i = 0; i < adapter->num_tx_queues; i++) { 357 err = igc_setup_tx_resources(adapter->tx_ring[i]); 358 if (err) { 359 netdev_err(dev, "Error on Tx queue %u setup\n", i); 360 for (i--; i >= 0; i--) 361 igc_free_tx_resources(adapter->tx_ring[i]); 362 break; 363 } 364 } 365 366 return err; 367 } 368 369 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) 370 { 371 u16 i = rx_ring->next_to_clean; 372 373 dev_kfree_skb(rx_ring->skb); 374 rx_ring->skb = NULL; 375 376 /* Free all the Rx ring sk_buffs */ 377 while (i != rx_ring->next_to_alloc) { 378 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 379 380 /* Invalidate cache lines that may have been written to by 381 * device so that we avoid corrupting memory. 382 */ 383 dma_sync_single_range_for_cpu(rx_ring->dev, 384 buffer_info->dma, 385 buffer_info->page_offset, 386 igc_rx_bufsz(rx_ring), 387 DMA_FROM_DEVICE); 388 389 /* free resources associated with mapping */ 390 dma_unmap_page_attrs(rx_ring->dev, 391 buffer_info->dma, 392 igc_rx_pg_size(rx_ring), 393 DMA_FROM_DEVICE, 394 IGC_RX_DMA_ATTR); 395 __page_frag_cache_drain(buffer_info->page, 396 buffer_info->pagecnt_bias); 397 398 i++; 399 if (i == rx_ring->count) 400 i = 0; 401 } 402 } 403 404 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) 405 { 406 struct igc_rx_buffer *bi; 407 u16 i; 408 409 for (i = 0; i < ring->count; i++) { 410 bi = &ring->rx_buffer_info[i]; 411 if (!bi->xdp) 412 continue; 413 414 xsk_buff_free(bi->xdp); 415 bi->xdp = NULL; 416 } 417 } 418 419 /** 420 * igc_clean_rx_ring - Free Rx Buffers per Queue 421 * @ring: ring to free buffers from 422 */ 423 static void igc_clean_rx_ring(struct igc_ring *ring) 424 { 425 if (ring->xsk_pool) 426 igc_clean_rx_ring_xsk_pool(ring); 427 else 428 igc_clean_rx_ring_page_shared(ring); 429 430 clear_ring_uses_large_buffer(ring); 431 432 ring->next_to_alloc = 0; 433 ring->next_to_clean = 0; 434 ring->next_to_use = 0; 435 } 436 437 /** 438 * igc_clean_all_rx_rings - Free Rx Buffers for all queues 439 * @adapter: board private structure 440 */ 441 static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 442 { 443 int i; 444 445 for (i = 0; i < adapter->num_rx_queues; i++) 446 if (adapter->rx_ring[i]) 447 igc_clean_rx_ring(adapter->rx_ring[i]); 448 } 449 450 /** 451 * igc_free_rx_resources - Free Rx Resources 452 * @rx_ring: ring to clean the resources from 453 * 454 * Free all receive software resources 455 */ 456 void igc_free_rx_resources(struct igc_ring *rx_ring) 457 { 458 igc_clean_rx_ring(rx_ring); 459 460 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 461 462 vfree(rx_ring->rx_buffer_info); 463 rx_ring->rx_buffer_info = NULL; 464 465 /* if not set, then don't free */ 466 if (!rx_ring->desc) 467 return; 468 469 dma_free_coherent(rx_ring->dev, rx_ring->size, 470 rx_ring->desc, rx_ring->dma); 471 472 rx_ring->desc = NULL; 473 } 474 475 /** 476 * igc_free_all_rx_resources - Free Rx Resources for All Queues 477 * @adapter: board private structure 478 * 479 * Free all receive software resources 480 */ 481 static void igc_free_all_rx_resources(struct igc_adapter *adapter) 482 { 483 int i; 484 485 for (i = 0; i < adapter->num_rx_queues; i++) 486 igc_free_rx_resources(adapter->rx_ring[i]); 487 } 488 489 /** 490 * igc_setup_rx_resources - allocate Rx resources (Descriptors) 491 * @rx_ring: rx descriptor ring (for a specific queue) to setup 492 * 493 * Returns 0 on success, negative on failure 494 */ 495 int igc_setup_rx_resources(struct igc_ring *rx_ring) 496 { 497 struct net_device *ndev = rx_ring->netdev; 498 struct device *dev = rx_ring->dev; 499 u8 index = rx_ring->queue_index; 500 int size, desc_len, res; 501 502 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, 503 rx_ring->q_vector->napi.napi_id); 504 if (res < 0) { 505 netdev_err(ndev, "Failed to register xdp_rxq index %u\n", 506 index); 507 return res; 508 } 509 510 size = sizeof(struct igc_rx_buffer) * rx_ring->count; 511 rx_ring->rx_buffer_info = vzalloc(size); 512 if (!rx_ring->rx_buffer_info) 513 goto err; 514 515 desc_len = sizeof(union igc_adv_rx_desc); 516 517 /* Round up to nearest 4K */ 518 rx_ring->size = rx_ring->count * desc_len; 519 rx_ring->size = ALIGN(rx_ring->size, 4096); 520 521 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 522 &rx_ring->dma, GFP_KERNEL); 523 524 if (!rx_ring->desc) 525 goto err; 526 527 rx_ring->next_to_alloc = 0; 528 rx_ring->next_to_clean = 0; 529 rx_ring->next_to_use = 0; 530 531 return 0; 532 533 err: 534 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 535 vfree(rx_ring->rx_buffer_info); 536 rx_ring->rx_buffer_info = NULL; 537 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); 538 return -ENOMEM; 539 } 540 541 /** 542 * igc_setup_all_rx_resources - wrapper to allocate Rx resources 543 * (Descriptors) for all queues 544 * @adapter: board private structure 545 * 546 * Return 0 on success, negative on failure 547 */ 548 static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 549 { 550 struct net_device *dev = adapter->netdev; 551 int i, err = 0; 552 553 for (i = 0; i < adapter->num_rx_queues; i++) { 554 err = igc_setup_rx_resources(adapter->rx_ring[i]); 555 if (err) { 556 netdev_err(dev, "Error on Rx queue %u setup\n", i); 557 for (i--; i >= 0; i--) 558 igc_free_rx_resources(adapter->rx_ring[i]); 559 break; 560 } 561 } 562 563 return err; 564 } 565 566 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, 567 struct igc_ring *ring) 568 { 569 if (!igc_xdp_is_enabled(adapter) || 570 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) 571 return NULL; 572 573 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); 574 } 575 576 /** 577 * igc_configure_rx_ring - Configure a receive ring after Reset 578 * @adapter: board private structure 579 * @ring: receive ring to be configured 580 * 581 * Configure the Rx unit of the MAC after a reset. 582 */ 583 static void igc_configure_rx_ring(struct igc_adapter *adapter, 584 struct igc_ring *ring) 585 { 586 struct igc_hw *hw = &adapter->hw; 587 union igc_adv_rx_desc *rx_desc; 588 int reg_idx = ring->reg_idx; 589 u32 srrctl = 0, rxdctl = 0; 590 u64 rdba = ring->dma; 591 u32 buf_size; 592 593 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 594 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 595 if (ring->xsk_pool) { 596 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 597 MEM_TYPE_XSK_BUFF_POOL, 598 NULL)); 599 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 600 } else { 601 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 602 MEM_TYPE_PAGE_SHARED, 603 NULL)); 604 } 605 606 if (igc_xdp_is_enabled(adapter)) 607 set_ring_uses_large_buffer(ring); 608 609 /* disable the queue */ 610 wr32(IGC_RXDCTL(reg_idx), 0); 611 612 /* Set DMA base address registers */ 613 wr32(IGC_RDBAL(reg_idx), 614 rdba & 0x00000000ffffffffULL); 615 wr32(IGC_RDBAH(reg_idx), rdba >> 32); 616 wr32(IGC_RDLEN(reg_idx), 617 ring->count * sizeof(union igc_adv_rx_desc)); 618 619 /* initialize head and tail */ 620 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 621 wr32(IGC_RDH(reg_idx), 0); 622 writel(0, ring->tail); 623 624 /* reset next-to- use/clean to place SW in sync with hardware */ 625 ring->next_to_clean = 0; 626 ring->next_to_use = 0; 627 628 if (ring->xsk_pool) 629 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); 630 else if (ring_uses_large_buffer(ring)) 631 buf_size = IGC_RXBUFFER_3072; 632 else 633 buf_size = IGC_RXBUFFER_2048; 634 635 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; 636 srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; 637 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 638 639 wr32(IGC_SRRCTL(reg_idx), srrctl); 640 641 rxdctl |= IGC_RX_PTHRESH; 642 rxdctl |= IGC_RX_HTHRESH << 8; 643 rxdctl |= IGC_RX_WTHRESH << 16; 644 645 /* initialize rx_buffer_info */ 646 memset(ring->rx_buffer_info, 0, 647 sizeof(struct igc_rx_buffer) * ring->count); 648 649 /* initialize Rx descriptor 0 */ 650 rx_desc = IGC_RX_DESC(ring, 0); 651 rx_desc->wb.upper.length = 0; 652 653 /* enable receive descriptor fetching */ 654 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 655 656 wr32(IGC_RXDCTL(reg_idx), rxdctl); 657 } 658 659 /** 660 * igc_configure_rx - Configure receive Unit after Reset 661 * @adapter: board private structure 662 * 663 * Configure the Rx unit of the MAC after a reset. 664 */ 665 static void igc_configure_rx(struct igc_adapter *adapter) 666 { 667 int i; 668 669 /* Setup the HW Rx Head and Tail Descriptor Pointers and 670 * the Base and Length of the Rx Descriptor Ring 671 */ 672 for (i = 0; i < adapter->num_rx_queues; i++) 673 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 674 } 675 676 /** 677 * igc_configure_tx_ring - Configure transmit ring after Reset 678 * @adapter: board private structure 679 * @ring: tx ring to configure 680 * 681 * Configure a transmit ring after a reset. 682 */ 683 static void igc_configure_tx_ring(struct igc_adapter *adapter, 684 struct igc_ring *ring) 685 { 686 struct igc_hw *hw = &adapter->hw; 687 int reg_idx = ring->reg_idx; 688 u64 tdba = ring->dma; 689 u32 txdctl = 0; 690 691 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 692 693 /* disable the queue */ 694 wr32(IGC_TXDCTL(reg_idx), 0); 695 wrfl(); 696 mdelay(10); 697 698 wr32(IGC_TDLEN(reg_idx), 699 ring->count * sizeof(union igc_adv_tx_desc)); 700 wr32(IGC_TDBAL(reg_idx), 701 tdba & 0x00000000ffffffffULL); 702 wr32(IGC_TDBAH(reg_idx), tdba >> 32); 703 704 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 705 wr32(IGC_TDH(reg_idx), 0); 706 writel(0, ring->tail); 707 708 txdctl |= IGC_TX_PTHRESH; 709 txdctl |= IGC_TX_HTHRESH << 8; 710 txdctl |= IGC_TX_WTHRESH << 16; 711 712 txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 713 wr32(IGC_TXDCTL(reg_idx), txdctl); 714 } 715 716 /** 717 * igc_configure_tx - Configure transmit Unit after Reset 718 * @adapter: board private structure 719 * 720 * Configure the Tx unit of the MAC after a reset. 721 */ 722 static void igc_configure_tx(struct igc_adapter *adapter) 723 { 724 int i; 725 726 for (i = 0; i < adapter->num_tx_queues; i++) 727 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 728 } 729 730 /** 731 * igc_setup_mrqc - configure the multiple receive queue control registers 732 * @adapter: Board private structure 733 */ 734 static void igc_setup_mrqc(struct igc_adapter *adapter) 735 { 736 struct igc_hw *hw = &adapter->hw; 737 u32 j, num_rx_queues; 738 u32 mrqc, rxcsum; 739 u32 rss_key[10]; 740 741 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 742 for (j = 0; j < 10; j++) 743 wr32(IGC_RSSRK(j), rss_key[j]); 744 745 num_rx_queues = adapter->rss_queues; 746 747 if (adapter->rss_indir_tbl_init != num_rx_queues) { 748 for (j = 0; j < IGC_RETA_SIZE; j++) 749 adapter->rss_indir_tbl[j] = 750 (j * num_rx_queues) / IGC_RETA_SIZE; 751 adapter->rss_indir_tbl_init = num_rx_queues; 752 } 753 igc_write_rss_indir_tbl(adapter); 754 755 /* Disable raw packet checksumming so that RSS hash is placed in 756 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 757 * offloads as they are enabled by default 758 */ 759 rxcsum = rd32(IGC_RXCSUM); 760 rxcsum |= IGC_RXCSUM_PCSD; 761 762 /* Enable Receive Checksum Offload for SCTP */ 763 rxcsum |= IGC_RXCSUM_CRCOFL; 764 765 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 766 wr32(IGC_RXCSUM, rxcsum); 767 768 /* Generate RSS hash based on packet types, TCP/UDP 769 * port numbers and/or IPv4/v6 src and dst addresses 770 */ 771 mrqc = IGC_MRQC_RSS_FIELD_IPV4 | 772 IGC_MRQC_RSS_FIELD_IPV4_TCP | 773 IGC_MRQC_RSS_FIELD_IPV6 | 774 IGC_MRQC_RSS_FIELD_IPV6_TCP | 775 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 776 777 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 778 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; 779 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 780 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; 781 782 mrqc |= IGC_MRQC_ENABLE_RSS_MQ; 783 784 wr32(IGC_MRQC, mrqc); 785 } 786 787 /** 788 * igc_setup_rctl - configure the receive control registers 789 * @adapter: Board private structure 790 */ 791 static void igc_setup_rctl(struct igc_adapter *adapter) 792 { 793 struct igc_hw *hw = &adapter->hw; 794 u32 rctl; 795 796 rctl = rd32(IGC_RCTL); 797 798 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 799 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 800 801 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 802 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 803 804 /* enable stripping of CRC. Newer features require 805 * that the HW strips the CRC. 806 */ 807 rctl |= IGC_RCTL_SECRC; 808 809 /* disable store bad packets and clear size bits. */ 810 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 811 812 /* enable LPE to allow for reception of jumbo frames */ 813 rctl |= IGC_RCTL_LPE; 814 815 /* disable queue 0 to prevent tail write w/o re-config */ 816 wr32(IGC_RXDCTL(0), 0); 817 818 /* This is useful for sniffing bad packets. */ 819 if (adapter->netdev->features & NETIF_F_RXALL) { 820 /* UPE and MPE will be handled by normal PROMISC logic 821 * in set_rx_mode 822 */ 823 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 824 IGC_RCTL_BAM | /* RX All Bcast Pkts */ 825 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 826 827 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 828 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 829 } 830 831 wr32(IGC_RCTL, rctl); 832 } 833 834 /** 835 * igc_setup_tctl - configure the transmit control registers 836 * @adapter: Board private structure 837 */ 838 static void igc_setup_tctl(struct igc_adapter *adapter) 839 { 840 struct igc_hw *hw = &adapter->hw; 841 u32 tctl; 842 843 /* disable queue 0 which icould be enabled by default */ 844 wr32(IGC_TXDCTL(0), 0); 845 846 /* Program the Transmit Control Register */ 847 tctl = rd32(IGC_TCTL); 848 tctl &= ~IGC_TCTL_CT; 849 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 850 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 851 852 /* Enable transmits */ 853 tctl |= IGC_TCTL_EN; 854 855 wr32(IGC_TCTL, tctl); 856 } 857 858 /** 859 * igc_set_mac_filter_hw() - Set MAC address filter in hardware 860 * @adapter: Pointer to adapter where the filter should be set 861 * @index: Filter index 862 * @type: MAC address filter type (source or destination) 863 * @addr: MAC address 864 * @queue: If non-negative, queue assignment feature is enabled and frames 865 * matching the filter are enqueued onto 'queue'. Otherwise, queue 866 * assignment is disabled. 867 */ 868 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, 869 enum igc_mac_filter_type type, 870 const u8 *addr, int queue) 871 { 872 struct net_device *dev = adapter->netdev; 873 struct igc_hw *hw = &adapter->hw; 874 u32 ral, rah; 875 876 if (WARN_ON(index >= hw->mac.rar_entry_count)) 877 return; 878 879 ral = le32_to_cpup((__le32 *)(addr)); 880 rah = le16_to_cpup((__le16 *)(addr + 4)); 881 882 if (type == IGC_MAC_FILTER_TYPE_SRC) { 883 rah &= ~IGC_RAH_ASEL_MASK; 884 rah |= IGC_RAH_ASEL_SRC_ADDR; 885 } 886 887 if (queue >= 0) { 888 rah &= ~IGC_RAH_QSEL_MASK; 889 rah |= (queue << IGC_RAH_QSEL_SHIFT); 890 rah |= IGC_RAH_QSEL_ENABLE; 891 } 892 893 rah |= IGC_RAH_AV; 894 895 wr32(IGC_RAL(index), ral); 896 wr32(IGC_RAH(index), rah); 897 898 netdev_dbg(dev, "MAC address filter set in HW: index %d", index); 899 } 900 901 /** 902 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware 903 * @adapter: Pointer to adapter where the filter should be cleared 904 * @index: Filter index 905 */ 906 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) 907 { 908 struct net_device *dev = adapter->netdev; 909 struct igc_hw *hw = &adapter->hw; 910 911 if (WARN_ON(index >= hw->mac.rar_entry_count)) 912 return; 913 914 wr32(IGC_RAL(index), 0); 915 wr32(IGC_RAH(index), 0); 916 917 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); 918 } 919 920 /* Set default MAC address for the PF in the first RAR entry */ 921 static void igc_set_default_mac_filter(struct igc_adapter *adapter) 922 { 923 struct net_device *dev = adapter->netdev; 924 u8 *addr = adapter->hw.mac.addr; 925 926 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); 927 928 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); 929 } 930 931 /** 932 * igc_set_mac - Change the Ethernet Address of the NIC 933 * @netdev: network interface device structure 934 * @p: pointer to an address structure 935 * 936 * Returns 0 on success, negative on failure 937 */ 938 static int igc_set_mac(struct net_device *netdev, void *p) 939 { 940 struct igc_adapter *adapter = netdev_priv(netdev); 941 struct igc_hw *hw = &adapter->hw; 942 struct sockaddr *addr = p; 943 944 if (!is_valid_ether_addr(addr->sa_data)) 945 return -EADDRNOTAVAIL; 946 947 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 948 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 949 950 /* set the correct pool for the new PF MAC address in entry 0 */ 951 igc_set_default_mac_filter(adapter); 952 953 return 0; 954 } 955 956 /** 957 * igc_write_mc_addr_list - write multicast addresses to MTA 958 * @netdev: network interface device structure 959 * 960 * Writes multicast address list to the MTA hash table. 961 * Returns: -ENOMEM on failure 962 * 0 on no addresses written 963 * X on writing X addresses to MTA 964 **/ 965 static int igc_write_mc_addr_list(struct net_device *netdev) 966 { 967 struct igc_adapter *adapter = netdev_priv(netdev); 968 struct igc_hw *hw = &adapter->hw; 969 struct netdev_hw_addr *ha; 970 u8 *mta_list; 971 int i; 972 973 if (netdev_mc_empty(netdev)) { 974 /* nothing to program, so clear mc list */ 975 igc_update_mc_addr_list(hw, NULL, 0); 976 return 0; 977 } 978 979 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); 980 if (!mta_list) 981 return -ENOMEM; 982 983 /* The shared function expects a packed array of only addresses. */ 984 i = 0; 985 netdev_for_each_mc_addr(ha, netdev) 986 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 987 988 igc_update_mc_addr_list(hw, mta_list, i); 989 kfree(mta_list); 990 991 return netdev_mc_count(netdev); 992 } 993 994 static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) 995 { 996 ktime_t cycle_time = adapter->cycle_time; 997 ktime_t base_time = adapter->base_time; 998 u32 launchtime; 999 1000 /* FIXME: when using ETF together with taprio, we may have a 1001 * case where 'delta' is larger than the cycle_time, this may 1002 * cause problems if we don't read the current value of 1003 * IGC_BASET, as the value writen into the launchtime 1004 * descriptor field may be misinterpreted. 1005 */ 1006 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); 1007 1008 return cpu_to_le32(launchtime); 1009 } 1010 1011 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, 1012 struct igc_tx_buffer *first, 1013 u32 vlan_macip_lens, u32 type_tucmd, 1014 u32 mss_l4len_idx) 1015 { 1016 struct igc_adv_tx_context_desc *context_desc; 1017 u16 i = tx_ring->next_to_use; 1018 1019 context_desc = IGC_TX_CTXTDESC(tx_ring, i); 1020 1021 i++; 1022 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1023 1024 /* set bits to identify this as an advanced context descriptor */ 1025 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 1026 1027 /* For i225, context index must be unique per ring. */ 1028 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 1029 mss_l4len_idx |= tx_ring->reg_idx << 4; 1030 1031 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1032 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1033 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1034 1035 /* We assume there is always a valid Tx time available. Invalid times 1036 * should have been handled by the upper layers. 1037 */ 1038 if (tx_ring->launchtime_enable) { 1039 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1040 ktime_t txtime = first->skb->tstamp; 1041 1042 skb_txtime_consumed(first->skb); 1043 context_desc->launch_time = igc_tx_launchtime(adapter, 1044 txtime); 1045 } else { 1046 context_desc->launch_time = 0; 1047 } 1048 } 1049 1050 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) 1051 { 1052 struct sk_buff *skb = first->skb; 1053 u32 vlan_macip_lens = 0; 1054 u32 type_tucmd = 0; 1055 1056 if (skb->ip_summed != CHECKSUM_PARTIAL) { 1057 csum_failed: 1058 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && 1059 !tx_ring->launchtime_enable) 1060 return; 1061 goto no_csum; 1062 } 1063 1064 switch (skb->csum_offset) { 1065 case offsetof(struct tcphdr, check): 1066 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1067 fallthrough; 1068 case offsetof(struct udphdr, check): 1069 break; 1070 case offsetof(struct sctphdr, checksum): 1071 /* validate that this is actually an SCTP request */ 1072 if (skb_csum_is_sctp(skb)) { 1073 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; 1074 break; 1075 } 1076 fallthrough; 1077 default: 1078 skb_checksum_help(skb); 1079 goto csum_failed; 1080 } 1081 1082 /* update TX checksum flag */ 1083 first->tx_flags |= IGC_TX_FLAGS_CSUM; 1084 vlan_macip_lens = skb_checksum_start_offset(skb) - 1085 skb_network_offset(skb); 1086 no_csum: 1087 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; 1088 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1089 1090 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); 1091 } 1092 1093 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1094 { 1095 struct net_device *netdev = tx_ring->netdev; 1096 1097 netif_stop_subqueue(netdev, tx_ring->queue_index); 1098 1099 /* memory barriier comment */ 1100 smp_mb(); 1101 1102 /* We need to check again in a case another CPU has just 1103 * made room available. 1104 */ 1105 if (igc_desc_unused(tx_ring) < size) 1106 return -EBUSY; 1107 1108 /* A reprieve! */ 1109 netif_wake_subqueue(netdev, tx_ring->queue_index); 1110 1111 u64_stats_update_begin(&tx_ring->tx_syncp2); 1112 tx_ring->tx_stats.restart_queue2++; 1113 u64_stats_update_end(&tx_ring->tx_syncp2); 1114 1115 return 0; 1116 } 1117 1118 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1119 { 1120 if (igc_desc_unused(tx_ring) >= size) 1121 return 0; 1122 return __igc_maybe_stop_tx(tx_ring, size); 1123 } 1124 1125 #define IGC_SET_FLAG(_input, _flag, _result) \ 1126 (((_flag) <= (_result)) ? \ 1127 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ 1128 ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) 1129 1130 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 1131 { 1132 /* set type for advanced descriptor with frame checksum insertion */ 1133 u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 1134 IGC_ADVTXD_DCMD_DEXT | 1135 IGC_ADVTXD_DCMD_IFCS; 1136 1137 /* set HW vlan bit if vlan is present */ 1138 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, 1139 IGC_ADVTXD_DCMD_VLE); 1140 1141 /* set segmentation bits for TSO */ 1142 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, 1143 (IGC_ADVTXD_DCMD_TSE)); 1144 1145 /* set timestamp bit if present */ 1146 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, 1147 (IGC_ADVTXD_MAC_TSTAMP)); 1148 1149 /* insert frame checksum */ 1150 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); 1151 1152 return cmd_type; 1153 } 1154 1155 static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 1156 union igc_adv_tx_desc *tx_desc, 1157 u32 tx_flags, unsigned int paylen) 1158 { 1159 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 1160 1161 /* insert L4 checksum */ 1162 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * 1163 ((IGC_TXD_POPTS_TXSM << 8) / 1164 IGC_TX_FLAGS_CSUM); 1165 1166 /* insert IPv4 checksum */ 1167 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * 1168 (((IGC_TXD_POPTS_IXSM << 8)) / 1169 IGC_TX_FLAGS_IPV4); 1170 1171 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1172 } 1173 1174 static int igc_tx_map(struct igc_ring *tx_ring, 1175 struct igc_tx_buffer *first, 1176 const u8 hdr_len) 1177 { 1178 struct sk_buff *skb = first->skb; 1179 struct igc_tx_buffer *tx_buffer; 1180 union igc_adv_tx_desc *tx_desc; 1181 u32 tx_flags = first->tx_flags; 1182 skb_frag_t *frag; 1183 u16 i = tx_ring->next_to_use; 1184 unsigned int data_len, size; 1185 dma_addr_t dma; 1186 u32 cmd_type; 1187 1188 cmd_type = igc_tx_cmd_type(skb, tx_flags); 1189 tx_desc = IGC_TX_DESC(tx_ring, i); 1190 1191 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 1192 1193 size = skb_headlen(skb); 1194 data_len = skb->data_len; 1195 1196 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1197 1198 tx_buffer = first; 1199 1200 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1201 if (dma_mapping_error(tx_ring->dev, dma)) 1202 goto dma_error; 1203 1204 /* record length, and DMA address */ 1205 dma_unmap_len_set(tx_buffer, len, size); 1206 dma_unmap_addr_set(tx_buffer, dma, dma); 1207 1208 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1209 1210 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 1211 tx_desc->read.cmd_type_len = 1212 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 1213 1214 i++; 1215 tx_desc++; 1216 if (i == tx_ring->count) { 1217 tx_desc = IGC_TX_DESC(tx_ring, 0); 1218 i = 0; 1219 } 1220 tx_desc->read.olinfo_status = 0; 1221 1222 dma += IGC_MAX_DATA_PER_TXD; 1223 size -= IGC_MAX_DATA_PER_TXD; 1224 1225 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1226 } 1227 1228 if (likely(!data_len)) 1229 break; 1230 1231 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 1232 1233 i++; 1234 tx_desc++; 1235 if (i == tx_ring->count) { 1236 tx_desc = IGC_TX_DESC(tx_ring, 0); 1237 i = 0; 1238 } 1239 tx_desc->read.olinfo_status = 0; 1240 1241 size = skb_frag_size(frag); 1242 data_len -= size; 1243 1244 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 1245 size, DMA_TO_DEVICE); 1246 1247 tx_buffer = &tx_ring->tx_buffer_info[i]; 1248 } 1249 1250 /* write last descriptor with RS and EOP bits */ 1251 cmd_type |= size | IGC_TXD_DCMD; 1252 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1253 1254 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1255 1256 /* set the timestamp */ 1257 first->time_stamp = jiffies; 1258 1259 skb_tx_timestamp(skb); 1260 1261 /* Force memory writes to complete before letting h/w know there 1262 * are new descriptors to fetch. (Only applicable for weak-ordered 1263 * memory model archs, such as IA-64). 1264 * 1265 * We also need this memory barrier to make certain all of the 1266 * status bits have been updated before next_to_watch is written. 1267 */ 1268 wmb(); 1269 1270 /* set next_to_watch value indicating a packet is present */ 1271 first->next_to_watch = tx_desc; 1272 1273 i++; 1274 if (i == tx_ring->count) 1275 i = 0; 1276 1277 tx_ring->next_to_use = i; 1278 1279 /* Make sure there is space in the ring for the next send. */ 1280 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 1281 1282 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1283 writel(i, tx_ring->tail); 1284 } 1285 1286 return 0; 1287 dma_error: 1288 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); 1289 tx_buffer = &tx_ring->tx_buffer_info[i]; 1290 1291 /* clear dma mappings for failed tx_buffer_info map */ 1292 while (tx_buffer != first) { 1293 if (dma_unmap_len(tx_buffer, len)) 1294 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1295 1296 if (i-- == 0) 1297 i += tx_ring->count; 1298 tx_buffer = &tx_ring->tx_buffer_info[i]; 1299 } 1300 1301 if (dma_unmap_len(tx_buffer, len)) 1302 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1303 1304 dev_kfree_skb_any(tx_buffer->skb); 1305 tx_buffer->skb = NULL; 1306 1307 tx_ring->next_to_use = i; 1308 1309 return -1; 1310 } 1311 1312 static int igc_tso(struct igc_ring *tx_ring, 1313 struct igc_tx_buffer *first, 1314 u8 *hdr_len) 1315 { 1316 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1317 struct sk_buff *skb = first->skb; 1318 union { 1319 struct iphdr *v4; 1320 struct ipv6hdr *v6; 1321 unsigned char *hdr; 1322 } ip; 1323 union { 1324 struct tcphdr *tcp; 1325 struct udphdr *udp; 1326 unsigned char *hdr; 1327 } l4; 1328 u32 paylen, l4_offset; 1329 int err; 1330 1331 if (skb->ip_summed != CHECKSUM_PARTIAL) 1332 return 0; 1333 1334 if (!skb_is_gso(skb)) 1335 return 0; 1336 1337 err = skb_cow_head(skb, 0); 1338 if (err < 0) 1339 return err; 1340 1341 ip.hdr = skb_network_header(skb); 1342 l4.hdr = skb_checksum_start(skb); 1343 1344 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1345 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1346 1347 /* initialize outer IP header fields */ 1348 if (ip.v4->version == 4) { 1349 unsigned char *csum_start = skb_checksum_start(skb); 1350 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 1351 1352 /* IP header will have to cancel out any data that 1353 * is not a part of the outer IP header 1354 */ 1355 ip.v4->check = csum_fold(csum_partial(trans_start, 1356 csum_start - trans_start, 1357 0)); 1358 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; 1359 1360 ip.v4->tot_len = 0; 1361 first->tx_flags |= IGC_TX_FLAGS_TSO | 1362 IGC_TX_FLAGS_CSUM | 1363 IGC_TX_FLAGS_IPV4; 1364 } else { 1365 ip.v6->payload_len = 0; 1366 first->tx_flags |= IGC_TX_FLAGS_TSO | 1367 IGC_TX_FLAGS_CSUM; 1368 } 1369 1370 /* determine offset of inner transport header */ 1371 l4_offset = l4.hdr - skb->data; 1372 1373 /* remove payload length from inner checksum */ 1374 paylen = skb->len - l4_offset; 1375 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { 1376 /* compute length of segmentation header */ 1377 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1378 csum_replace_by_diff(&l4.tcp->check, 1379 (__force __wsum)htonl(paylen)); 1380 } else { 1381 /* compute length of segmentation header */ 1382 *hdr_len = sizeof(*l4.udp) + l4_offset; 1383 csum_replace_by_diff(&l4.udp->check, 1384 (__force __wsum)htonl(paylen)); 1385 } 1386 1387 /* update gso size and bytecount with header size */ 1388 first->gso_segs = skb_shinfo(skb)->gso_segs; 1389 first->bytecount += (first->gso_segs - 1) * *hdr_len; 1390 1391 /* MSS L4LEN IDX */ 1392 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; 1393 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; 1394 1395 /* VLAN MACLEN IPLEN */ 1396 vlan_macip_lens = l4.hdr - ip.hdr; 1397 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; 1398 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1399 1400 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, 1401 type_tucmd, mss_l4len_idx); 1402 1403 return 1; 1404 } 1405 1406 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 1407 struct igc_ring *tx_ring) 1408 { 1409 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1410 __be16 protocol = vlan_get_protocol(skb); 1411 struct igc_tx_buffer *first; 1412 u32 tx_flags = 0; 1413 unsigned short f; 1414 u8 hdr_len = 0; 1415 int tso = 0; 1416 1417 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 1418 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 1419 * + 2 desc gap to keep tail from touching head, 1420 * + 1 desc for context descriptor, 1421 * otherwise try next time 1422 */ 1423 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1424 count += TXD_USE_COUNT(skb_frag_size( 1425 &skb_shinfo(skb)->frags[f])); 1426 1427 if (igc_maybe_stop_tx(tx_ring, count + 3)) { 1428 /* this is a hard error */ 1429 return NETDEV_TX_BUSY; 1430 } 1431 1432 /* record the location of the first descriptor for this packet */ 1433 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1434 first->type = IGC_TX_BUFFER_TYPE_SKB; 1435 first->skb = skb; 1436 first->bytecount = skb->len; 1437 first->gso_segs = 1; 1438 1439 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1440 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1441 1442 /* FIXME: add support for retrieving timestamps from 1443 * the other timer registers before skipping the 1444 * timestamping request. 1445 */ 1446 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && 1447 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, 1448 &adapter->state)) { 1449 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1450 tx_flags |= IGC_TX_FLAGS_TSTAMP; 1451 1452 adapter->ptp_tx_skb = skb_get(skb); 1453 adapter->ptp_tx_start = jiffies; 1454 } else { 1455 adapter->tx_hwtstamp_skipped++; 1456 } 1457 } 1458 1459 if (skb_vlan_tag_present(skb)) { 1460 tx_flags |= IGC_TX_FLAGS_VLAN; 1461 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); 1462 } 1463 1464 /* record initial flags and protocol */ 1465 first->tx_flags = tx_flags; 1466 first->protocol = protocol; 1467 1468 tso = igc_tso(tx_ring, first, &hdr_len); 1469 if (tso < 0) 1470 goto out_drop; 1471 else if (!tso) 1472 igc_tx_csum(tx_ring, first); 1473 1474 igc_tx_map(tx_ring, first, hdr_len); 1475 1476 return NETDEV_TX_OK; 1477 1478 out_drop: 1479 dev_kfree_skb_any(first->skb); 1480 first->skb = NULL; 1481 1482 return NETDEV_TX_OK; 1483 } 1484 1485 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 1486 struct sk_buff *skb) 1487 { 1488 unsigned int r_idx = skb->queue_mapping; 1489 1490 if (r_idx >= adapter->num_tx_queues) 1491 r_idx = r_idx % adapter->num_tx_queues; 1492 1493 return adapter->tx_ring[r_idx]; 1494 } 1495 1496 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 1497 struct net_device *netdev) 1498 { 1499 struct igc_adapter *adapter = netdev_priv(netdev); 1500 1501 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 1502 * in order to meet this minimum size requirement. 1503 */ 1504 if (skb->len < 17) { 1505 if (skb_padto(skb, 17)) 1506 return NETDEV_TX_OK; 1507 skb->len = 17; 1508 } 1509 1510 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1511 } 1512 1513 static void igc_rx_checksum(struct igc_ring *ring, 1514 union igc_adv_rx_desc *rx_desc, 1515 struct sk_buff *skb) 1516 { 1517 skb_checksum_none_assert(skb); 1518 1519 /* Ignore Checksum bit is set */ 1520 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) 1521 return; 1522 1523 /* Rx checksum disabled via ethtool */ 1524 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1525 return; 1526 1527 /* TCP/UDP checksum error bit is set */ 1528 if (igc_test_staterr(rx_desc, 1529 IGC_RXDEXT_STATERR_L4E | 1530 IGC_RXDEXT_STATERR_IPE)) { 1531 /* work around errata with sctp packets where the TCPE aka 1532 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 1533 * packets (aka let the stack check the crc32c) 1534 */ 1535 if (!(skb->len == 60 && 1536 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { 1537 u64_stats_update_begin(&ring->rx_syncp); 1538 ring->rx_stats.csum_err++; 1539 u64_stats_update_end(&ring->rx_syncp); 1540 } 1541 /* let the stack verify checksum errors */ 1542 return; 1543 } 1544 /* It must be a TCP or UDP packet with a valid checksum */ 1545 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | 1546 IGC_RXD_STAT_UDPCS)) 1547 skb->ip_summed = CHECKSUM_UNNECESSARY; 1548 1549 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", 1550 le32_to_cpu(rx_desc->wb.upper.status_error)); 1551 } 1552 1553 static inline void igc_rx_hash(struct igc_ring *ring, 1554 union igc_adv_rx_desc *rx_desc, 1555 struct sk_buff *skb) 1556 { 1557 if (ring->netdev->features & NETIF_F_RXHASH) 1558 skb_set_hash(skb, 1559 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1560 PKT_HASH_TYPE_L3); 1561 } 1562 1563 static void igc_rx_vlan(struct igc_ring *rx_ring, 1564 union igc_adv_rx_desc *rx_desc, 1565 struct sk_buff *skb) 1566 { 1567 struct net_device *dev = rx_ring->netdev; 1568 u16 vid; 1569 1570 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1571 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { 1572 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && 1573 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 1574 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); 1575 else 1576 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1577 1578 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1579 } 1580 } 1581 1582 /** 1583 * igc_process_skb_fields - Populate skb header fields from Rx descriptor 1584 * @rx_ring: rx descriptor ring packet is being transacted on 1585 * @rx_desc: pointer to the EOP Rx descriptor 1586 * @skb: pointer to current skb being populated 1587 * 1588 * This function checks the ring, descriptor, and packet information in order 1589 * to populate the hash, checksum, VLAN, protocol, and other fields within the 1590 * skb. 1591 */ 1592 static void igc_process_skb_fields(struct igc_ring *rx_ring, 1593 union igc_adv_rx_desc *rx_desc, 1594 struct sk_buff *skb) 1595 { 1596 igc_rx_hash(rx_ring, rx_desc, skb); 1597 1598 igc_rx_checksum(rx_ring, rx_desc, skb); 1599 1600 igc_rx_vlan(rx_ring, rx_desc, skb); 1601 1602 skb_record_rx_queue(skb, rx_ring->queue_index); 1603 1604 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1605 } 1606 1607 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) 1608 { 1609 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1610 struct igc_adapter *adapter = netdev_priv(netdev); 1611 struct igc_hw *hw = &adapter->hw; 1612 u32 ctrl; 1613 1614 ctrl = rd32(IGC_CTRL); 1615 1616 if (enable) { 1617 /* enable VLAN tag insert/strip */ 1618 ctrl |= IGC_CTRL_VME; 1619 } else { 1620 /* disable VLAN tag insert/strip */ 1621 ctrl &= ~IGC_CTRL_VME; 1622 } 1623 wr32(IGC_CTRL, ctrl); 1624 } 1625 1626 static void igc_restore_vlan(struct igc_adapter *adapter) 1627 { 1628 igc_vlan_mode(adapter->netdev, adapter->netdev->features); 1629 } 1630 1631 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 1632 const unsigned int size, 1633 int *rx_buffer_pgcnt) 1634 { 1635 struct igc_rx_buffer *rx_buffer; 1636 1637 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1638 *rx_buffer_pgcnt = 1639 #if (PAGE_SIZE < 8192) 1640 page_count(rx_buffer->page); 1641 #else 1642 0; 1643 #endif 1644 prefetchw(rx_buffer->page); 1645 1646 /* we are reusing so sync this buffer for CPU use */ 1647 dma_sync_single_range_for_cpu(rx_ring->dev, 1648 rx_buffer->dma, 1649 rx_buffer->page_offset, 1650 size, 1651 DMA_FROM_DEVICE); 1652 1653 rx_buffer->pagecnt_bias--; 1654 1655 return rx_buffer; 1656 } 1657 1658 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, 1659 unsigned int truesize) 1660 { 1661 #if (PAGE_SIZE < 8192) 1662 buffer->page_offset ^= truesize; 1663 #else 1664 buffer->page_offset += truesize; 1665 #endif 1666 } 1667 1668 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, 1669 unsigned int size) 1670 { 1671 unsigned int truesize; 1672 1673 #if (PAGE_SIZE < 8192) 1674 truesize = igc_rx_pg_size(ring) / 2; 1675 #else 1676 truesize = ring_uses_build_skb(ring) ? 1677 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1678 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1679 SKB_DATA_ALIGN(size); 1680 #endif 1681 return truesize; 1682 } 1683 1684 /** 1685 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 1686 * @rx_ring: rx descriptor ring to transact packets on 1687 * @rx_buffer: buffer containing page to add 1688 * @skb: sk_buff to place the data into 1689 * @size: size of buffer to be added 1690 * 1691 * This function will add the data contained in rx_buffer->page to the skb. 1692 */ 1693 static void igc_add_rx_frag(struct igc_ring *rx_ring, 1694 struct igc_rx_buffer *rx_buffer, 1695 struct sk_buff *skb, 1696 unsigned int size) 1697 { 1698 unsigned int truesize; 1699 1700 #if (PAGE_SIZE < 8192) 1701 truesize = igc_rx_pg_size(rx_ring) / 2; 1702 #else 1703 truesize = ring_uses_build_skb(rx_ring) ? 1704 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1705 SKB_DATA_ALIGN(size); 1706 #endif 1707 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1708 rx_buffer->page_offset, size, truesize); 1709 1710 igc_rx_buffer_flip(rx_buffer, truesize); 1711 } 1712 1713 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 1714 struct igc_rx_buffer *rx_buffer, 1715 union igc_adv_rx_desc *rx_desc, 1716 unsigned int size) 1717 { 1718 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1719 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1720 struct sk_buff *skb; 1721 1722 /* prefetch first cache line of first page */ 1723 net_prefetch(va); 1724 1725 /* build an skb around the page buffer */ 1726 skb = build_skb(va - IGC_SKB_PAD, truesize); 1727 if (unlikely(!skb)) 1728 return NULL; 1729 1730 /* update pointers within the skb to store the data */ 1731 skb_reserve(skb, IGC_SKB_PAD); 1732 __skb_put(skb, size); 1733 1734 igc_rx_buffer_flip(rx_buffer, truesize); 1735 return skb; 1736 } 1737 1738 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 1739 struct igc_rx_buffer *rx_buffer, 1740 struct xdp_buff *xdp, 1741 ktime_t timestamp) 1742 { 1743 unsigned int size = xdp->data_end - xdp->data; 1744 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1745 void *va = xdp->data; 1746 unsigned int headlen; 1747 struct sk_buff *skb; 1748 1749 /* prefetch first cache line of first page */ 1750 net_prefetch(va); 1751 1752 /* allocate a skb to store the frags */ 1753 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); 1754 if (unlikely(!skb)) 1755 return NULL; 1756 1757 if (timestamp) 1758 skb_hwtstamps(skb)->hwtstamp = timestamp; 1759 1760 /* Determine available headroom for copy */ 1761 headlen = size; 1762 if (headlen > IGC_RX_HDR_LEN) 1763 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); 1764 1765 /* align pull length to size of long to optimize memcpy performance */ 1766 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 1767 1768 /* update all of the pointers */ 1769 size -= headlen; 1770 if (size) { 1771 skb_add_rx_frag(skb, 0, rx_buffer->page, 1772 (va + headlen) - page_address(rx_buffer->page), 1773 size, truesize); 1774 igc_rx_buffer_flip(rx_buffer, truesize); 1775 } else { 1776 rx_buffer->pagecnt_bias++; 1777 } 1778 1779 return skb; 1780 } 1781 1782 /** 1783 * igc_reuse_rx_page - page flip buffer and store it back on the ring 1784 * @rx_ring: rx descriptor ring to store buffers on 1785 * @old_buff: donor buffer to have page reused 1786 * 1787 * Synchronizes page for reuse by the adapter 1788 */ 1789 static void igc_reuse_rx_page(struct igc_ring *rx_ring, 1790 struct igc_rx_buffer *old_buff) 1791 { 1792 u16 nta = rx_ring->next_to_alloc; 1793 struct igc_rx_buffer *new_buff; 1794 1795 new_buff = &rx_ring->rx_buffer_info[nta]; 1796 1797 /* update, and store next to alloc */ 1798 nta++; 1799 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1800 1801 /* Transfer page from old buffer to new buffer. 1802 * Move each member individually to avoid possible store 1803 * forwarding stalls. 1804 */ 1805 new_buff->dma = old_buff->dma; 1806 new_buff->page = old_buff->page; 1807 new_buff->page_offset = old_buff->page_offset; 1808 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1809 } 1810 1811 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, 1812 int rx_buffer_pgcnt) 1813 { 1814 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1815 struct page *page = rx_buffer->page; 1816 1817 /* avoid re-using remote and pfmemalloc pages */ 1818 if (!dev_page_is_reusable(page)) 1819 return false; 1820 1821 #if (PAGE_SIZE < 8192) 1822 /* if we are only owner of page we can reuse it */ 1823 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 1824 return false; 1825 #else 1826 #define IGC_LAST_OFFSET \ 1827 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 1828 1829 if (rx_buffer->page_offset > IGC_LAST_OFFSET) 1830 return false; 1831 #endif 1832 1833 /* If we have drained the page fragment pool we need to update 1834 * the pagecnt_bias and page count so that we fully restock the 1835 * number of references the driver holds. 1836 */ 1837 if (unlikely(pagecnt_bias == 1)) { 1838 page_ref_add(page, USHRT_MAX - 1); 1839 rx_buffer->pagecnt_bias = USHRT_MAX; 1840 } 1841 1842 return true; 1843 } 1844 1845 /** 1846 * igc_is_non_eop - process handling of non-EOP buffers 1847 * @rx_ring: Rx ring being processed 1848 * @rx_desc: Rx descriptor for current buffer 1849 * 1850 * This function updates next to clean. If the buffer is an EOP buffer 1851 * this function exits returning false, otherwise it will place the 1852 * sk_buff in the next buffer to be chained and return true indicating 1853 * that this is in fact a non-EOP buffer. 1854 */ 1855 static bool igc_is_non_eop(struct igc_ring *rx_ring, 1856 union igc_adv_rx_desc *rx_desc) 1857 { 1858 u32 ntc = rx_ring->next_to_clean + 1; 1859 1860 /* fetch, update, and store next to clean */ 1861 ntc = (ntc < rx_ring->count) ? ntc : 0; 1862 rx_ring->next_to_clean = ntc; 1863 1864 prefetch(IGC_RX_DESC(rx_ring, ntc)); 1865 1866 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 1867 return false; 1868 1869 return true; 1870 } 1871 1872 /** 1873 * igc_cleanup_headers - Correct corrupted or empty headers 1874 * @rx_ring: rx descriptor ring packet is being transacted on 1875 * @rx_desc: pointer to the EOP Rx descriptor 1876 * @skb: pointer to current skb being fixed 1877 * 1878 * Address the case where we are pulling data in on pages only 1879 * and as such no data is present in the skb header. 1880 * 1881 * In addition if skb is not at least 60 bytes we need to pad it so that 1882 * it is large enough to qualify as a valid Ethernet frame. 1883 * 1884 * Returns true if an error was encountered and skb was freed. 1885 */ 1886 static bool igc_cleanup_headers(struct igc_ring *rx_ring, 1887 union igc_adv_rx_desc *rx_desc, 1888 struct sk_buff *skb) 1889 { 1890 /* XDP packets use error pointer so abort at this point */ 1891 if (IS_ERR(skb)) 1892 return true; 1893 1894 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { 1895 struct net_device *netdev = rx_ring->netdev; 1896 1897 if (!(netdev->features & NETIF_F_RXALL)) { 1898 dev_kfree_skb_any(skb); 1899 return true; 1900 } 1901 } 1902 1903 /* if eth_skb_pad returns an error the skb was freed */ 1904 if (eth_skb_pad(skb)) 1905 return true; 1906 1907 return false; 1908 } 1909 1910 static void igc_put_rx_buffer(struct igc_ring *rx_ring, 1911 struct igc_rx_buffer *rx_buffer, 1912 int rx_buffer_pgcnt) 1913 { 1914 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 1915 /* hand second half of page back to the ring */ 1916 igc_reuse_rx_page(rx_ring, rx_buffer); 1917 } else { 1918 /* We are not reusing the buffer so unmap it and free 1919 * any references we are holding to it 1920 */ 1921 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 1922 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1923 IGC_RX_DMA_ATTR); 1924 __page_frag_cache_drain(rx_buffer->page, 1925 rx_buffer->pagecnt_bias); 1926 } 1927 1928 /* clear contents of rx_buffer */ 1929 rx_buffer->page = NULL; 1930 } 1931 1932 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 1933 { 1934 struct igc_adapter *adapter = rx_ring->q_vector->adapter; 1935 1936 if (ring_uses_build_skb(rx_ring)) 1937 return IGC_SKB_PAD; 1938 if (igc_xdp_is_enabled(adapter)) 1939 return XDP_PACKET_HEADROOM; 1940 1941 return 0; 1942 } 1943 1944 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 1945 struct igc_rx_buffer *bi) 1946 { 1947 struct page *page = bi->page; 1948 dma_addr_t dma; 1949 1950 /* since we are recycling buffers we should seldom need to alloc */ 1951 if (likely(page)) 1952 return true; 1953 1954 /* alloc new page for storage */ 1955 page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 1956 if (unlikely(!page)) { 1957 rx_ring->rx_stats.alloc_failed++; 1958 return false; 1959 } 1960 1961 /* map page for use */ 1962 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1963 igc_rx_pg_size(rx_ring), 1964 DMA_FROM_DEVICE, 1965 IGC_RX_DMA_ATTR); 1966 1967 /* if mapping failed free memory back to system since 1968 * there isn't much point in holding memory we can't use 1969 */ 1970 if (dma_mapping_error(rx_ring->dev, dma)) { 1971 __free_page(page); 1972 1973 rx_ring->rx_stats.alloc_failed++; 1974 return false; 1975 } 1976 1977 bi->dma = dma; 1978 bi->page = page; 1979 bi->page_offset = igc_rx_offset(rx_ring); 1980 page_ref_add(page, USHRT_MAX - 1); 1981 bi->pagecnt_bias = USHRT_MAX; 1982 1983 return true; 1984 } 1985 1986 /** 1987 * igc_alloc_rx_buffers - Replace used receive buffers; packet split 1988 * @rx_ring: rx descriptor ring 1989 * @cleaned_count: number of buffers to clean 1990 */ 1991 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 1992 { 1993 union igc_adv_rx_desc *rx_desc; 1994 u16 i = rx_ring->next_to_use; 1995 struct igc_rx_buffer *bi; 1996 u16 bufsz; 1997 1998 /* nothing to do */ 1999 if (!cleaned_count) 2000 return; 2001 2002 rx_desc = IGC_RX_DESC(rx_ring, i); 2003 bi = &rx_ring->rx_buffer_info[i]; 2004 i -= rx_ring->count; 2005 2006 bufsz = igc_rx_bufsz(rx_ring); 2007 2008 do { 2009 if (!igc_alloc_mapped_page(rx_ring, bi)) 2010 break; 2011 2012 /* sync the buffer for use by the device */ 2013 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 2014 bi->page_offset, bufsz, 2015 DMA_FROM_DEVICE); 2016 2017 /* Refresh the desc even if buffer_addrs didn't change 2018 * because each write-back erases this info. 2019 */ 2020 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 2021 2022 rx_desc++; 2023 bi++; 2024 i++; 2025 if (unlikely(!i)) { 2026 rx_desc = IGC_RX_DESC(rx_ring, 0); 2027 bi = rx_ring->rx_buffer_info; 2028 i -= rx_ring->count; 2029 } 2030 2031 /* clear the length for the next_to_use descriptor */ 2032 rx_desc->wb.upper.length = 0; 2033 2034 cleaned_count--; 2035 } while (cleaned_count); 2036 2037 i += rx_ring->count; 2038 2039 if (rx_ring->next_to_use != i) { 2040 /* record the next descriptor to use */ 2041 rx_ring->next_to_use = i; 2042 2043 /* update next to alloc since we have filled the ring */ 2044 rx_ring->next_to_alloc = i; 2045 2046 /* Force memory writes to complete before letting h/w 2047 * know there are new descriptors to fetch. (Only 2048 * applicable for weak-ordered memory model archs, 2049 * such as IA-64). 2050 */ 2051 wmb(); 2052 writel(i, rx_ring->tail); 2053 } 2054 } 2055 2056 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) 2057 { 2058 union igc_adv_rx_desc *desc; 2059 u16 i = ring->next_to_use; 2060 struct igc_rx_buffer *bi; 2061 dma_addr_t dma; 2062 bool ok = true; 2063 2064 if (!count) 2065 return ok; 2066 2067 desc = IGC_RX_DESC(ring, i); 2068 bi = &ring->rx_buffer_info[i]; 2069 i -= ring->count; 2070 2071 do { 2072 bi->xdp = xsk_buff_alloc(ring->xsk_pool); 2073 if (!bi->xdp) { 2074 ok = false; 2075 break; 2076 } 2077 2078 dma = xsk_buff_xdp_get_dma(bi->xdp); 2079 desc->read.pkt_addr = cpu_to_le64(dma); 2080 2081 desc++; 2082 bi++; 2083 i++; 2084 if (unlikely(!i)) { 2085 desc = IGC_RX_DESC(ring, 0); 2086 bi = ring->rx_buffer_info; 2087 i -= ring->count; 2088 } 2089 2090 /* Clear the length for the next_to_use descriptor. */ 2091 desc->wb.upper.length = 0; 2092 2093 count--; 2094 } while (count); 2095 2096 i += ring->count; 2097 2098 if (ring->next_to_use != i) { 2099 ring->next_to_use = i; 2100 2101 /* Force memory writes to complete before letting h/w 2102 * know there are new descriptors to fetch. (Only 2103 * applicable for weak-ordered memory model archs, 2104 * such as IA-64). 2105 */ 2106 wmb(); 2107 writel(i, ring->tail); 2108 } 2109 2110 return ok; 2111 } 2112 2113 static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, 2114 struct xdp_frame *xdpf, 2115 struct igc_ring *ring) 2116 { 2117 dma_addr_t dma; 2118 2119 dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); 2120 if (dma_mapping_error(ring->dev, dma)) { 2121 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); 2122 return -ENOMEM; 2123 } 2124 2125 buffer->type = IGC_TX_BUFFER_TYPE_XDP; 2126 buffer->xdpf = xdpf; 2127 buffer->protocol = 0; 2128 buffer->bytecount = xdpf->len; 2129 buffer->gso_segs = 1; 2130 buffer->time_stamp = jiffies; 2131 dma_unmap_len_set(buffer, len, xdpf->len); 2132 dma_unmap_addr_set(buffer, dma, dma); 2133 return 0; 2134 } 2135 2136 /* This function requires __netif_tx_lock is held by the caller. */ 2137 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, 2138 struct xdp_frame *xdpf) 2139 { 2140 struct igc_tx_buffer *buffer; 2141 union igc_adv_tx_desc *desc; 2142 u32 cmd_type, olinfo_status; 2143 int err; 2144 2145 if (!igc_desc_unused(ring)) 2146 return -EBUSY; 2147 2148 buffer = &ring->tx_buffer_info[ring->next_to_use]; 2149 err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); 2150 if (err) 2151 return err; 2152 2153 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2154 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2155 buffer->bytecount; 2156 olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 2157 2158 desc = IGC_TX_DESC(ring, ring->next_to_use); 2159 desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2160 desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2161 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); 2162 2163 netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); 2164 2165 buffer->next_to_watch = desc; 2166 2167 ring->next_to_use++; 2168 if (ring->next_to_use == ring->count) 2169 ring->next_to_use = 0; 2170 2171 return 0; 2172 } 2173 2174 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, 2175 int cpu) 2176 { 2177 int index = cpu; 2178 2179 if (unlikely(index < 0)) 2180 index = 0; 2181 2182 while (index >= adapter->num_tx_queues) 2183 index -= adapter->num_tx_queues; 2184 2185 return adapter->tx_ring[index]; 2186 } 2187 2188 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) 2189 { 2190 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2191 int cpu = smp_processor_id(); 2192 struct netdev_queue *nq; 2193 struct igc_ring *ring; 2194 int res; 2195 2196 if (unlikely(!xdpf)) 2197 return -EFAULT; 2198 2199 ring = igc_xdp_get_tx_ring(adapter, cpu); 2200 nq = txring_txq(ring); 2201 2202 __netif_tx_lock(nq, cpu); 2203 res = igc_xdp_init_tx_descriptor(ring, xdpf); 2204 __netif_tx_unlock(nq); 2205 return res; 2206 } 2207 2208 /* This function assumes rcu_read_lock() is held by the caller. */ 2209 static int __igc_xdp_run_prog(struct igc_adapter *adapter, 2210 struct bpf_prog *prog, 2211 struct xdp_buff *xdp) 2212 { 2213 u32 act = bpf_prog_run_xdp(prog, xdp); 2214 2215 switch (act) { 2216 case XDP_PASS: 2217 return IGC_XDP_PASS; 2218 case XDP_TX: 2219 if (igc_xdp_xmit_back(adapter, xdp) < 0) 2220 goto out_failure; 2221 return IGC_XDP_TX; 2222 case XDP_REDIRECT: 2223 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) 2224 goto out_failure; 2225 return IGC_XDP_REDIRECT; 2226 break; 2227 default: 2228 bpf_warn_invalid_xdp_action(act); 2229 fallthrough; 2230 case XDP_ABORTED: 2231 out_failure: 2232 trace_xdp_exception(adapter->netdev, prog, act); 2233 fallthrough; 2234 case XDP_DROP: 2235 return IGC_XDP_CONSUMED; 2236 } 2237 } 2238 2239 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, 2240 struct xdp_buff *xdp) 2241 { 2242 struct bpf_prog *prog; 2243 int res; 2244 2245 prog = READ_ONCE(adapter->xdp_prog); 2246 if (!prog) { 2247 res = IGC_XDP_PASS; 2248 goto out; 2249 } 2250 2251 res = __igc_xdp_run_prog(adapter, prog, xdp); 2252 2253 out: 2254 return ERR_PTR(-res); 2255 } 2256 2257 /* This function assumes __netif_tx_lock is held by the caller. */ 2258 static void igc_flush_tx_descriptors(struct igc_ring *ring) 2259 { 2260 /* Once tail pointer is updated, hardware can fetch the descriptors 2261 * any time so we issue a write membar here to ensure all memory 2262 * writes are complete before the tail pointer is updated. 2263 */ 2264 wmb(); 2265 writel(ring->next_to_use, ring->tail); 2266 } 2267 2268 static void igc_finalize_xdp(struct igc_adapter *adapter, int status) 2269 { 2270 int cpu = smp_processor_id(); 2271 struct netdev_queue *nq; 2272 struct igc_ring *ring; 2273 2274 if (status & IGC_XDP_TX) { 2275 ring = igc_xdp_get_tx_ring(adapter, cpu); 2276 nq = txring_txq(ring); 2277 2278 __netif_tx_lock(nq, cpu); 2279 igc_flush_tx_descriptors(ring); 2280 __netif_tx_unlock(nq); 2281 } 2282 2283 if (status & IGC_XDP_REDIRECT) 2284 xdp_do_flush(); 2285 } 2286 2287 static void igc_update_rx_stats(struct igc_q_vector *q_vector, 2288 unsigned int packets, unsigned int bytes) 2289 { 2290 struct igc_ring *ring = q_vector->rx.ring; 2291 2292 u64_stats_update_begin(&ring->rx_syncp); 2293 ring->rx_stats.packets += packets; 2294 ring->rx_stats.bytes += bytes; 2295 u64_stats_update_end(&ring->rx_syncp); 2296 2297 q_vector->rx.total_packets += packets; 2298 q_vector->rx.total_bytes += bytes; 2299 } 2300 2301 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 2302 { 2303 unsigned int total_bytes = 0, total_packets = 0; 2304 struct igc_adapter *adapter = q_vector->adapter; 2305 struct igc_ring *rx_ring = q_vector->rx.ring; 2306 struct sk_buff *skb = rx_ring->skb; 2307 u16 cleaned_count = igc_desc_unused(rx_ring); 2308 int xdp_status = 0, rx_buffer_pgcnt; 2309 2310 while (likely(total_packets < budget)) { 2311 union igc_adv_rx_desc *rx_desc; 2312 struct igc_rx_buffer *rx_buffer; 2313 unsigned int size, truesize; 2314 ktime_t timestamp = 0; 2315 struct xdp_buff xdp; 2316 int pkt_offset = 0; 2317 void *pktbuf; 2318 2319 /* return some buffers to hardware, one at a time is too slow */ 2320 if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 2321 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2322 cleaned_count = 0; 2323 } 2324 2325 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 2326 size = le16_to_cpu(rx_desc->wb.upper.length); 2327 if (!size) 2328 break; 2329 2330 /* This memory barrier is needed to keep us from reading 2331 * any other fields out of the rx_desc until we know the 2332 * descriptor has been written back 2333 */ 2334 dma_rmb(); 2335 2336 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 2337 truesize = igc_get_rx_frame_truesize(rx_ring, size); 2338 2339 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; 2340 2341 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { 2342 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2343 pktbuf); 2344 pkt_offset = IGC_TS_HDR_LEN; 2345 size -= IGC_TS_HDR_LEN; 2346 } 2347 2348 if (!skb) { 2349 xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); 2350 xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), 2351 igc_rx_offset(rx_ring) + pkt_offset, size, false); 2352 2353 skb = igc_xdp_run_prog(adapter, &xdp); 2354 } 2355 2356 if (IS_ERR(skb)) { 2357 unsigned int xdp_res = -PTR_ERR(skb); 2358 2359 switch (xdp_res) { 2360 case IGC_XDP_CONSUMED: 2361 rx_buffer->pagecnt_bias++; 2362 break; 2363 case IGC_XDP_TX: 2364 case IGC_XDP_REDIRECT: 2365 igc_rx_buffer_flip(rx_buffer, truesize); 2366 xdp_status |= xdp_res; 2367 break; 2368 } 2369 2370 total_packets++; 2371 total_bytes += size; 2372 } else if (skb) 2373 igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 2374 else if (ring_uses_build_skb(rx_ring)) 2375 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); 2376 else 2377 skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, 2378 timestamp); 2379 2380 /* exit if we failed to retrieve a buffer */ 2381 if (!skb) { 2382 rx_ring->rx_stats.alloc_failed++; 2383 rx_buffer->pagecnt_bias++; 2384 break; 2385 } 2386 2387 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 2388 cleaned_count++; 2389 2390 /* fetch next buffer in frame if non-eop */ 2391 if (igc_is_non_eop(rx_ring, rx_desc)) 2392 continue; 2393 2394 /* verify the packet layout is correct */ 2395 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 2396 skb = NULL; 2397 continue; 2398 } 2399 2400 /* probably a little skewed due to removing CRC */ 2401 total_bytes += skb->len; 2402 2403 /* populate checksum, VLAN, and protocol */ 2404 igc_process_skb_fields(rx_ring, rx_desc, skb); 2405 2406 napi_gro_receive(&q_vector->napi, skb); 2407 2408 /* reset skb pointer */ 2409 skb = NULL; 2410 2411 /* update budget accounting */ 2412 total_packets++; 2413 } 2414 2415 if (xdp_status) 2416 igc_finalize_xdp(adapter, xdp_status); 2417 2418 /* place incomplete frames back on ring for completion */ 2419 rx_ring->skb = skb; 2420 2421 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2422 2423 if (cleaned_count) 2424 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2425 2426 return total_packets; 2427 } 2428 2429 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, 2430 struct xdp_buff *xdp) 2431 { 2432 unsigned int metasize = xdp->data - xdp->data_meta; 2433 unsigned int datasize = xdp->data_end - xdp->data; 2434 unsigned int totalsize = metasize + datasize; 2435 struct sk_buff *skb; 2436 2437 skb = __napi_alloc_skb(&ring->q_vector->napi, 2438 xdp->data_end - xdp->data_hard_start, 2439 GFP_ATOMIC | __GFP_NOWARN); 2440 if (unlikely(!skb)) 2441 return NULL; 2442 2443 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); 2444 memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); 2445 if (metasize) 2446 skb_metadata_set(skb, metasize); 2447 2448 return skb; 2449 } 2450 2451 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, 2452 union igc_adv_rx_desc *desc, 2453 struct xdp_buff *xdp, 2454 ktime_t timestamp) 2455 { 2456 struct igc_ring *ring = q_vector->rx.ring; 2457 struct sk_buff *skb; 2458 2459 skb = igc_construct_skb_zc(ring, xdp); 2460 if (!skb) { 2461 ring->rx_stats.alloc_failed++; 2462 return; 2463 } 2464 2465 if (timestamp) 2466 skb_hwtstamps(skb)->hwtstamp = timestamp; 2467 2468 if (igc_cleanup_headers(ring, desc, skb)) 2469 return; 2470 2471 igc_process_skb_fields(ring, desc, skb); 2472 napi_gro_receive(&q_vector->napi, skb); 2473 } 2474 2475 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) 2476 { 2477 struct igc_adapter *adapter = q_vector->adapter; 2478 struct igc_ring *ring = q_vector->rx.ring; 2479 u16 cleaned_count = igc_desc_unused(ring); 2480 int total_bytes = 0, total_packets = 0; 2481 u16 ntc = ring->next_to_clean; 2482 struct bpf_prog *prog; 2483 bool failure = false; 2484 int xdp_status = 0; 2485 2486 rcu_read_lock(); 2487 2488 prog = READ_ONCE(adapter->xdp_prog); 2489 2490 while (likely(total_packets < budget)) { 2491 union igc_adv_rx_desc *desc; 2492 struct igc_rx_buffer *bi; 2493 ktime_t timestamp = 0; 2494 unsigned int size; 2495 int res; 2496 2497 desc = IGC_RX_DESC(ring, ntc); 2498 size = le16_to_cpu(desc->wb.upper.length); 2499 if (!size) 2500 break; 2501 2502 /* This memory barrier is needed to keep us from reading 2503 * any other fields out of the rx_desc until we know the 2504 * descriptor has been written back 2505 */ 2506 dma_rmb(); 2507 2508 bi = &ring->rx_buffer_info[ntc]; 2509 2510 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { 2511 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2512 bi->xdp->data); 2513 2514 bi->xdp->data += IGC_TS_HDR_LEN; 2515 2516 /* HW timestamp has been copied into local variable. Metadata 2517 * length when XDP program is called should be 0. 2518 */ 2519 bi->xdp->data_meta += IGC_TS_HDR_LEN; 2520 size -= IGC_TS_HDR_LEN; 2521 } 2522 2523 bi->xdp->data_end = bi->xdp->data + size; 2524 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); 2525 2526 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); 2527 switch (res) { 2528 case IGC_XDP_PASS: 2529 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); 2530 fallthrough; 2531 case IGC_XDP_CONSUMED: 2532 xsk_buff_free(bi->xdp); 2533 break; 2534 case IGC_XDP_TX: 2535 case IGC_XDP_REDIRECT: 2536 xdp_status |= res; 2537 break; 2538 } 2539 2540 bi->xdp = NULL; 2541 total_bytes += size; 2542 total_packets++; 2543 cleaned_count++; 2544 ntc++; 2545 if (ntc == ring->count) 2546 ntc = 0; 2547 } 2548 2549 ring->next_to_clean = ntc; 2550 rcu_read_unlock(); 2551 2552 if (cleaned_count >= IGC_RX_BUFFER_WRITE) 2553 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); 2554 2555 if (xdp_status) 2556 igc_finalize_xdp(adapter, xdp_status); 2557 2558 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2559 2560 if (xsk_uses_need_wakeup(ring->xsk_pool)) { 2561 if (failure || ring->next_to_clean == ring->next_to_use) 2562 xsk_set_rx_need_wakeup(ring->xsk_pool); 2563 else 2564 xsk_clear_rx_need_wakeup(ring->xsk_pool); 2565 return total_packets; 2566 } 2567 2568 return failure ? budget : total_packets; 2569 } 2570 2571 static void igc_update_tx_stats(struct igc_q_vector *q_vector, 2572 unsigned int packets, unsigned int bytes) 2573 { 2574 struct igc_ring *ring = q_vector->tx.ring; 2575 2576 u64_stats_update_begin(&ring->tx_syncp); 2577 ring->tx_stats.bytes += bytes; 2578 ring->tx_stats.packets += packets; 2579 u64_stats_update_end(&ring->tx_syncp); 2580 2581 q_vector->tx.total_bytes += bytes; 2582 q_vector->tx.total_packets += packets; 2583 } 2584 2585 static void igc_xdp_xmit_zc(struct igc_ring *ring) 2586 { 2587 struct xsk_buff_pool *pool = ring->xsk_pool; 2588 struct netdev_queue *nq = txring_txq(ring); 2589 union igc_adv_tx_desc *tx_desc = NULL; 2590 int cpu = smp_processor_id(); 2591 u16 ntu = ring->next_to_use; 2592 struct xdp_desc xdp_desc; 2593 u16 budget; 2594 2595 if (!netif_carrier_ok(ring->netdev)) 2596 return; 2597 2598 __netif_tx_lock(nq, cpu); 2599 2600 budget = igc_desc_unused(ring); 2601 2602 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { 2603 u32 cmd_type, olinfo_status; 2604 struct igc_tx_buffer *bi; 2605 dma_addr_t dma; 2606 2607 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2608 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2609 xdp_desc.len; 2610 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; 2611 2612 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2613 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); 2614 2615 tx_desc = IGC_TX_DESC(ring, ntu); 2616 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2617 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2618 tx_desc->read.buffer_addr = cpu_to_le64(dma); 2619 2620 bi = &ring->tx_buffer_info[ntu]; 2621 bi->type = IGC_TX_BUFFER_TYPE_XSK; 2622 bi->protocol = 0; 2623 bi->bytecount = xdp_desc.len; 2624 bi->gso_segs = 1; 2625 bi->time_stamp = jiffies; 2626 bi->next_to_watch = tx_desc; 2627 2628 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); 2629 2630 ntu++; 2631 if (ntu == ring->count) 2632 ntu = 0; 2633 } 2634 2635 ring->next_to_use = ntu; 2636 if (tx_desc) { 2637 igc_flush_tx_descriptors(ring); 2638 xsk_tx_release(pool); 2639 } 2640 2641 __netif_tx_unlock(nq); 2642 } 2643 2644 /** 2645 * igc_clean_tx_irq - Reclaim resources after transmit completes 2646 * @q_vector: pointer to q_vector containing needed info 2647 * @napi_budget: Used to determine if we are in netpoll 2648 * 2649 * returns true if ring is completely cleaned 2650 */ 2651 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 2652 { 2653 struct igc_adapter *adapter = q_vector->adapter; 2654 unsigned int total_bytes = 0, total_packets = 0; 2655 unsigned int budget = q_vector->tx.work_limit; 2656 struct igc_ring *tx_ring = q_vector->tx.ring; 2657 unsigned int i = tx_ring->next_to_clean; 2658 struct igc_tx_buffer *tx_buffer; 2659 union igc_adv_tx_desc *tx_desc; 2660 u32 xsk_frames = 0; 2661 2662 if (test_bit(__IGC_DOWN, &adapter->state)) 2663 return true; 2664 2665 tx_buffer = &tx_ring->tx_buffer_info[i]; 2666 tx_desc = IGC_TX_DESC(tx_ring, i); 2667 i -= tx_ring->count; 2668 2669 do { 2670 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 2671 2672 /* if next_to_watch is not set then there is no work pending */ 2673 if (!eop_desc) 2674 break; 2675 2676 /* prevent any other reads prior to eop_desc */ 2677 smp_rmb(); 2678 2679 /* if DD is not set pending work has not been completed */ 2680 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 2681 break; 2682 2683 /* clear next_to_watch to prevent false hangs */ 2684 tx_buffer->next_to_watch = NULL; 2685 2686 /* update the statistics for this packet */ 2687 total_bytes += tx_buffer->bytecount; 2688 total_packets += tx_buffer->gso_segs; 2689 2690 switch (tx_buffer->type) { 2691 case IGC_TX_BUFFER_TYPE_XSK: 2692 xsk_frames++; 2693 break; 2694 case IGC_TX_BUFFER_TYPE_XDP: 2695 xdp_return_frame(tx_buffer->xdpf); 2696 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2697 break; 2698 case IGC_TX_BUFFER_TYPE_SKB: 2699 napi_consume_skb(tx_buffer->skb, napi_budget); 2700 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2701 break; 2702 default: 2703 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 2704 break; 2705 } 2706 2707 /* clear last DMA location and unmap remaining buffers */ 2708 while (tx_desc != eop_desc) { 2709 tx_buffer++; 2710 tx_desc++; 2711 i++; 2712 if (unlikely(!i)) { 2713 i -= tx_ring->count; 2714 tx_buffer = tx_ring->tx_buffer_info; 2715 tx_desc = IGC_TX_DESC(tx_ring, 0); 2716 } 2717 2718 /* unmap any remaining paged data */ 2719 if (dma_unmap_len(tx_buffer, len)) 2720 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2721 } 2722 2723 /* move us one more past the eop_desc for start of next pkt */ 2724 tx_buffer++; 2725 tx_desc++; 2726 i++; 2727 if (unlikely(!i)) { 2728 i -= tx_ring->count; 2729 tx_buffer = tx_ring->tx_buffer_info; 2730 tx_desc = IGC_TX_DESC(tx_ring, 0); 2731 } 2732 2733 /* issue prefetch for next Tx descriptor */ 2734 prefetch(tx_desc); 2735 2736 /* update budget accounting */ 2737 budget--; 2738 } while (likely(budget)); 2739 2740 netdev_tx_completed_queue(txring_txq(tx_ring), 2741 total_packets, total_bytes); 2742 2743 i += tx_ring->count; 2744 tx_ring->next_to_clean = i; 2745 2746 igc_update_tx_stats(q_vector, total_packets, total_bytes); 2747 2748 if (tx_ring->xsk_pool) { 2749 if (xsk_frames) 2750 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 2751 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 2752 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 2753 igc_xdp_xmit_zc(tx_ring); 2754 } 2755 2756 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 2757 struct igc_hw *hw = &adapter->hw; 2758 2759 /* Detect a transmit hang in hardware, this serializes the 2760 * check with the clearing of time_stamp and movement of i 2761 */ 2762 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 2763 if (tx_buffer->next_to_watch && 2764 time_after(jiffies, tx_buffer->time_stamp + 2765 (adapter->tx_timeout_factor * HZ)) && 2766 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { 2767 /* detected Tx unit hang */ 2768 netdev_err(tx_ring->netdev, 2769 "Detected Tx Unit Hang\n" 2770 " Tx Queue <%d>\n" 2771 " TDH <%x>\n" 2772 " TDT <%x>\n" 2773 " next_to_use <%x>\n" 2774 " next_to_clean <%x>\n" 2775 "buffer_info[next_to_clean]\n" 2776 " time_stamp <%lx>\n" 2777 " next_to_watch <%p>\n" 2778 " jiffies <%lx>\n" 2779 " desc.status <%x>\n", 2780 tx_ring->queue_index, 2781 rd32(IGC_TDH(tx_ring->reg_idx)), 2782 readl(tx_ring->tail), 2783 tx_ring->next_to_use, 2784 tx_ring->next_to_clean, 2785 tx_buffer->time_stamp, 2786 tx_buffer->next_to_watch, 2787 jiffies, 2788 tx_buffer->next_to_watch->wb.status); 2789 netif_stop_subqueue(tx_ring->netdev, 2790 tx_ring->queue_index); 2791 2792 /* we are about to reset, no point in enabling stuff */ 2793 return true; 2794 } 2795 } 2796 2797 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 2798 if (unlikely(total_packets && 2799 netif_carrier_ok(tx_ring->netdev) && 2800 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 2801 /* Make sure that anybody stopping the queue after this 2802 * sees the new next_to_clean. 2803 */ 2804 smp_mb(); 2805 if (__netif_subqueue_stopped(tx_ring->netdev, 2806 tx_ring->queue_index) && 2807 !(test_bit(__IGC_DOWN, &adapter->state))) { 2808 netif_wake_subqueue(tx_ring->netdev, 2809 tx_ring->queue_index); 2810 2811 u64_stats_update_begin(&tx_ring->tx_syncp); 2812 tx_ring->tx_stats.restart_queue++; 2813 u64_stats_update_end(&tx_ring->tx_syncp); 2814 } 2815 } 2816 2817 return !!budget; 2818 } 2819 2820 static int igc_find_mac_filter(struct igc_adapter *adapter, 2821 enum igc_mac_filter_type type, const u8 *addr) 2822 { 2823 struct igc_hw *hw = &adapter->hw; 2824 int max_entries = hw->mac.rar_entry_count; 2825 u32 ral, rah; 2826 int i; 2827 2828 for (i = 0; i < max_entries; i++) { 2829 ral = rd32(IGC_RAL(i)); 2830 rah = rd32(IGC_RAH(i)); 2831 2832 if (!(rah & IGC_RAH_AV)) 2833 continue; 2834 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) 2835 continue; 2836 if ((rah & IGC_RAH_RAH_MASK) != 2837 le16_to_cpup((__le16 *)(addr + 4))) 2838 continue; 2839 if (ral != le32_to_cpup((__le32 *)(addr))) 2840 continue; 2841 2842 return i; 2843 } 2844 2845 return -1; 2846 } 2847 2848 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) 2849 { 2850 struct igc_hw *hw = &adapter->hw; 2851 int max_entries = hw->mac.rar_entry_count; 2852 u32 rah; 2853 int i; 2854 2855 for (i = 0; i < max_entries; i++) { 2856 rah = rd32(IGC_RAH(i)); 2857 2858 if (!(rah & IGC_RAH_AV)) 2859 return i; 2860 } 2861 2862 return -1; 2863 } 2864 2865 /** 2866 * igc_add_mac_filter() - Add MAC address filter 2867 * @adapter: Pointer to adapter where the filter should be added 2868 * @type: MAC address filter type (source or destination) 2869 * @addr: MAC address 2870 * @queue: If non-negative, queue assignment feature is enabled and frames 2871 * matching the filter are enqueued onto 'queue'. Otherwise, queue 2872 * assignment is disabled. 2873 * 2874 * Return: 0 in case of success, negative errno code otherwise. 2875 */ 2876 static int igc_add_mac_filter(struct igc_adapter *adapter, 2877 enum igc_mac_filter_type type, const u8 *addr, 2878 int queue) 2879 { 2880 struct net_device *dev = adapter->netdev; 2881 int index; 2882 2883 index = igc_find_mac_filter(adapter, type, addr); 2884 if (index >= 0) 2885 goto update_filter; 2886 2887 index = igc_get_avail_mac_filter_slot(adapter); 2888 if (index < 0) 2889 return -ENOSPC; 2890 2891 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", 2892 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 2893 addr, queue); 2894 2895 update_filter: 2896 igc_set_mac_filter_hw(adapter, index, type, addr, queue); 2897 return 0; 2898 } 2899 2900 /** 2901 * igc_del_mac_filter() - Delete MAC address filter 2902 * @adapter: Pointer to adapter where the filter should be deleted from 2903 * @type: MAC address filter type (source or destination) 2904 * @addr: MAC address 2905 */ 2906 static void igc_del_mac_filter(struct igc_adapter *adapter, 2907 enum igc_mac_filter_type type, const u8 *addr) 2908 { 2909 struct net_device *dev = adapter->netdev; 2910 int index; 2911 2912 index = igc_find_mac_filter(adapter, type, addr); 2913 if (index < 0) 2914 return; 2915 2916 if (index == 0) { 2917 /* If this is the default filter, we don't actually delete it. 2918 * We just reset to its default value i.e. disable queue 2919 * assignment. 2920 */ 2921 netdev_dbg(dev, "Disable default MAC filter queue assignment"); 2922 2923 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); 2924 } else { 2925 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", 2926 index, 2927 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 2928 addr); 2929 2930 igc_clear_mac_filter_hw(adapter, index); 2931 } 2932 } 2933 2934 /** 2935 * igc_add_vlan_prio_filter() - Add VLAN priority filter 2936 * @adapter: Pointer to adapter where the filter should be added 2937 * @prio: VLAN priority value 2938 * @queue: Queue number which matching frames are assigned to 2939 * 2940 * Return: 0 in case of success, negative errno code otherwise. 2941 */ 2942 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, 2943 int queue) 2944 { 2945 struct net_device *dev = adapter->netdev; 2946 struct igc_hw *hw = &adapter->hw; 2947 u32 vlanpqf; 2948 2949 vlanpqf = rd32(IGC_VLANPQF); 2950 2951 if (vlanpqf & IGC_VLANPQF_VALID(prio)) { 2952 netdev_dbg(dev, "VLAN priority filter already in use\n"); 2953 return -EEXIST; 2954 } 2955 2956 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); 2957 vlanpqf |= IGC_VLANPQF_VALID(prio); 2958 2959 wr32(IGC_VLANPQF, vlanpqf); 2960 2961 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", 2962 prio, queue); 2963 return 0; 2964 } 2965 2966 /** 2967 * igc_del_vlan_prio_filter() - Delete VLAN priority filter 2968 * @adapter: Pointer to adapter where the filter should be deleted from 2969 * @prio: VLAN priority value 2970 */ 2971 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) 2972 { 2973 struct igc_hw *hw = &adapter->hw; 2974 u32 vlanpqf; 2975 2976 vlanpqf = rd32(IGC_VLANPQF); 2977 2978 vlanpqf &= ~IGC_VLANPQF_VALID(prio); 2979 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); 2980 2981 wr32(IGC_VLANPQF, vlanpqf); 2982 2983 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", 2984 prio); 2985 } 2986 2987 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) 2988 { 2989 struct igc_hw *hw = &adapter->hw; 2990 int i; 2991 2992 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 2993 u32 etqf = rd32(IGC_ETQF(i)); 2994 2995 if (!(etqf & IGC_ETQF_FILTER_ENABLE)) 2996 return i; 2997 } 2998 2999 return -1; 3000 } 3001 3002 /** 3003 * igc_add_etype_filter() - Add ethertype filter 3004 * @adapter: Pointer to adapter where the filter should be added 3005 * @etype: Ethertype value 3006 * @queue: If non-negative, queue assignment feature is enabled and frames 3007 * matching the filter are enqueued onto 'queue'. Otherwise, queue 3008 * assignment is disabled. 3009 * 3010 * Return: 0 in case of success, negative errno code otherwise. 3011 */ 3012 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, 3013 int queue) 3014 { 3015 struct igc_hw *hw = &adapter->hw; 3016 int index; 3017 u32 etqf; 3018 3019 index = igc_get_avail_etype_filter_slot(adapter); 3020 if (index < 0) 3021 return -ENOSPC; 3022 3023 etqf = rd32(IGC_ETQF(index)); 3024 3025 etqf &= ~IGC_ETQF_ETYPE_MASK; 3026 etqf |= etype; 3027 3028 if (queue >= 0) { 3029 etqf &= ~IGC_ETQF_QUEUE_MASK; 3030 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); 3031 etqf |= IGC_ETQF_QUEUE_ENABLE; 3032 } 3033 3034 etqf |= IGC_ETQF_FILTER_ENABLE; 3035 3036 wr32(IGC_ETQF(index), etqf); 3037 3038 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", 3039 etype, queue); 3040 return 0; 3041 } 3042 3043 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) 3044 { 3045 struct igc_hw *hw = &adapter->hw; 3046 int i; 3047 3048 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3049 u32 etqf = rd32(IGC_ETQF(i)); 3050 3051 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) 3052 return i; 3053 } 3054 3055 return -1; 3056 } 3057 3058 /** 3059 * igc_del_etype_filter() - Delete ethertype filter 3060 * @adapter: Pointer to adapter where the filter should be deleted from 3061 * @etype: Ethertype value 3062 */ 3063 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) 3064 { 3065 struct igc_hw *hw = &adapter->hw; 3066 int index; 3067 3068 index = igc_find_etype_filter(adapter, etype); 3069 if (index < 0) 3070 return; 3071 3072 wr32(IGC_ETQF(index), 0); 3073 3074 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", 3075 etype); 3076 } 3077 3078 static int igc_flex_filter_select(struct igc_adapter *adapter, 3079 struct igc_flex_filter *input, 3080 u32 *fhft) 3081 { 3082 struct igc_hw *hw = &adapter->hw; 3083 u8 fhft_index; 3084 u32 fhftsl; 3085 3086 if (input->index >= MAX_FLEX_FILTER) { 3087 dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); 3088 return -EINVAL; 3089 } 3090 3091 /* Indirect table select register */ 3092 fhftsl = rd32(IGC_FHFTSL); 3093 fhftsl &= ~IGC_FHFTSL_FTSL_MASK; 3094 switch (input->index) { 3095 case 0 ... 7: 3096 fhftsl |= 0x00; 3097 break; 3098 case 8 ... 15: 3099 fhftsl |= 0x01; 3100 break; 3101 case 16 ... 23: 3102 fhftsl |= 0x02; 3103 break; 3104 case 24 ... 31: 3105 fhftsl |= 0x03; 3106 break; 3107 } 3108 wr32(IGC_FHFTSL, fhftsl); 3109 3110 /* Normalize index down to host table register */ 3111 fhft_index = input->index % 8; 3112 3113 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : 3114 IGC_FHFT_EXT(fhft_index - 4); 3115 3116 return 0; 3117 } 3118 3119 static int igc_write_flex_filter_ll(struct igc_adapter *adapter, 3120 struct igc_flex_filter *input) 3121 { 3122 struct device *dev = &adapter->pdev->dev; 3123 struct igc_hw *hw = &adapter->hw; 3124 u8 *data = input->data; 3125 u8 *mask = input->mask; 3126 u32 queuing; 3127 u32 fhft; 3128 u32 wufc; 3129 int ret; 3130 int i; 3131 3132 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail 3133 * out early to avoid surprises later. 3134 */ 3135 if (input->length % 8 != 0) { 3136 dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); 3137 return -EINVAL; 3138 } 3139 3140 /* Select corresponding flex filter register and get base for host table. */ 3141 ret = igc_flex_filter_select(adapter, input, &fhft); 3142 if (ret) 3143 return ret; 3144 3145 /* When adding a filter globally disable flex filter feature. That is 3146 * recommended within the datasheet. 3147 */ 3148 wufc = rd32(IGC_WUFC); 3149 wufc &= ~IGC_WUFC_FLEX_HQ; 3150 wr32(IGC_WUFC, wufc); 3151 3152 /* Configure filter */ 3153 queuing = input->length & IGC_FHFT_LENGTH_MASK; 3154 queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; 3155 queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; 3156 3157 if (input->immediate_irq) 3158 queuing |= IGC_FHFT_IMM_INT; 3159 3160 if (input->drop) 3161 queuing |= IGC_FHFT_DROP; 3162 3163 wr32(fhft + 0xFC, queuing); 3164 3165 /* Write data (128 byte) and mask (128 bit) */ 3166 for (i = 0; i < 16; ++i) { 3167 const size_t data_idx = i * 8; 3168 const size_t row_idx = i * 16; 3169 u32 dw0 = 3170 (data[data_idx + 0] << 0) | 3171 (data[data_idx + 1] << 8) | 3172 (data[data_idx + 2] << 16) | 3173 (data[data_idx + 3] << 24); 3174 u32 dw1 = 3175 (data[data_idx + 4] << 0) | 3176 (data[data_idx + 5] << 8) | 3177 (data[data_idx + 6] << 16) | 3178 (data[data_idx + 7] << 24); 3179 u32 tmp; 3180 3181 /* Write row: dw0, dw1 and mask */ 3182 wr32(fhft + row_idx, dw0); 3183 wr32(fhft + row_idx + 4, dw1); 3184 3185 /* mask is only valid for MASK(7, 0) */ 3186 tmp = rd32(fhft + row_idx + 8); 3187 tmp &= ~GENMASK(7, 0); 3188 tmp |= mask[i]; 3189 wr32(fhft + row_idx + 8, tmp); 3190 } 3191 3192 /* Enable filter. */ 3193 wufc |= IGC_WUFC_FLEX_HQ; 3194 if (input->index > 8) { 3195 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ 3196 u32 wufc_ext = rd32(IGC_WUFC_EXT); 3197 3198 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); 3199 3200 wr32(IGC_WUFC_EXT, wufc_ext); 3201 } else { 3202 wufc |= (IGC_WUFC_FLX0 << input->index); 3203 } 3204 wr32(IGC_WUFC, wufc); 3205 3206 dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", 3207 input->index); 3208 3209 return 0; 3210 } 3211 3212 static void igc_flex_filter_add_field(struct igc_flex_filter *flex, 3213 const void *src, unsigned int offset, 3214 size_t len, const void *mask) 3215 { 3216 int i; 3217 3218 /* data */ 3219 memcpy(&flex->data[offset], src, len); 3220 3221 /* mask */ 3222 for (i = 0; i < len; ++i) { 3223 const unsigned int idx = i + offset; 3224 const u8 *ptr = mask; 3225 3226 if (mask) { 3227 if (ptr[i] & 0xff) 3228 flex->mask[idx / 8] |= BIT(idx % 8); 3229 3230 continue; 3231 } 3232 3233 flex->mask[idx / 8] |= BIT(idx % 8); 3234 } 3235 } 3236 3237 static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) 3238 { 3239 struct igc_hw *hw = &adapter->hw; 3240 u32 wufc, wufc_ext; 3241 int i; 3242 3243 wufc = rd32(IGC_WUFC); 3244 wufc_ext = rd32(IGC_WUFC_EXT); 3245 3246 for (i = 0; i < MAX_FLEX_FILTER; i++) { 3247 if (i < 8) { 3248 if (!(wufc & (IGC_WUFC_FLX0 << i))) 3249 return i; 3250 } else { 3251 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) 3252 return i; 3253 } 3254 } 3255 3256 return -ENOSPC; 3257 } 3258 3259 static bool igc_flex_filter_in_use(struct igc_adapter *adapter) 3260 { 3261 struct igc_hw *hw = &adapter->hw; 3262 u32 wufc, wufc_ext; 3263 3264 wufc = rd32(IGC_WUFC); 3265 wufc_ext = rd32(IGC_WUFC_EXT); 3266 3267 if (wufc & IGC_WUFC_FILTER_MASK) 3268 return true; 3269 3270 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) 3271 return true; 3272 3273 return false; 3274 } 3275 3276 static int igc_add_flex_filter(struct igc_adapter *adapter, 3277 struct igc_nfc_rule *rule) 3278 { 3279 struct igc_flex_filter flex = { }; 3280 struct igc_nfc_filter *filter = &rule->filter; 3281 unsigned int eth_offset, user_offset; 3282 int ret, index; 3283 bool vlan; 3284 3285 index = igc_find_avail_flex_filter_slot(adapter); 3286 if (index < 0) 3287 return -ENOSPC; 3288 3289 /* Construct the flex filter: 3290 * -> dest_mac [6] 3291 * -> src_mac [6] 3292 * -> tpid [2] 3293 * -> vlan tci [2] 3294 * -> ether type [2] 3295 * -> user data [8] 3296 * -> = 26 bytes => 32 length 3297 */ 3298 flex.index = index; 3299 flex.length = 32; 3300 flex.rx_queue = rule->action; 3301 3302 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; 3303 eth_offset = vlan ? 16 : 12; 3304 user_offset = vlan ? 18 : 14; 3305 3306 /* Add destination MAC */ 3307 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3308 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, 3309 ETH_ALEN, NULL); 3310 3311 /* Add source MAC */ 3312 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3313 igc_flex_filter_add_field(&flex, &filter->src_addr, 6, 3314 ETH_ALEN, NULL); 3315 3316 /* Add VLAN etype */ 3317 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) 3318 igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, 3319 sizeof(filter->vlan_etype), 3320 NULL); 3321 3322 /* Add VLAN TCI */ 3323 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) 3324 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, 3325 sizeof(filter->vlan_tci), NULL); 3326 3327 /* Add Ether type */ 3328 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3329 __be16 etype = cpu_to_be16(filter->etype); 3330 3331 igc_flex_filter_add_field(&flex, &etype, eth_offset, 3332 sizeof(etype), NULL); 3333 } 3334 3335 /* Add user data */ 3336 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) 3337 igc_flex_filter_add_field(&flex, &filter->user_data, 3338 user_offset, 3339 sizeof(filter->user_data), 3340 filter->user_mask); 3341 3342 /* Add it down to the hardware and enable it. */ 3343 ret = igc_write_flex_filter_ll(adapter, &flex); 3344 if (ret) 3345 return ret; 3346 3347 filter->flex_index = index; 3348 3349 return 0; 3350 } 3351 3352 static void igc_del_flex_filter(struct igc_adapter *adapter, 3353 u16 reg_index) 3354 { 3355 struct igc_hw *hw = &adapter->hw; 3356 u32 wufc; 3357 3358 /* Just disable the filter. The filter table itself is kept 3359 * intact. Another flex_filter_add() should override the "old" data 3360 * then. 3361 */ 3362 if (reg_index > 8) { 3363 u32 wufc_ext = rd32(IGC_WUFC_EXT); 3364 3365 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); 3366 wr32(IGC_WUFC_EXT, wufc_ext); 3367 } else { 3368 wufc = rd32(IGC_WUFC); 3369 3370 wufc &= ~(IGC_WUFC_FLX0 << reg_index); 3371 wr32(IGC_WUFC, wufc); 3372 } 3373 3374 if (igc_flex_filter_in_use(adapter)) 3375 return; 3376 3377 /* No filters are in use, we may disable flex filters */ 3378 wufc = rd32(IGC_WUFC); 3379 wufc &= ~IGC_WUFC_FLEX_HQ; 3380 wr32(IGC_WUFC, wufc); 3381 } 3382 3383 static int igc_enable_nfc_rule(struct igc_adapter *adapter, 3384 struct igc_nfc_rule *rule) 3385 { 3386 int err; 3387 3388 if (rule->flex) { 3389 return igc_add_flex_filter(adapter, rule); 3390 } 3391 3392 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3393 err = igc_add_etype_filter(adapter, rule->filter.etype, 3394 rule->action); 3395 if (err) 3396 return err; 3397 } 3398 3399 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { 3400 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3401 rule->filter.src_addr, rule->action); 3402 if (err) 3403 return err; 3404 } 3405 3406 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { 3407 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3408 rule->filter.dst_addr, rule->action); 3409 if (err) 3410 return err; 3411 } 3412 3413 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3414 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3415 VLAN_PRIO_SHIFT; 3416 3417 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); 3418 if (err) 3419 return err; 3420 } 3421 3422 return 0; 3423 } 3424 3425 static void igc_disable_nfc_rule(struct igc_adapter *adapter, 3426 const struct igc_nfc_rule *rule) 3427 { 3428 if (rule->flex) { 3429 igc_del_flex_filter(adapter, rule->filter.flex_index); 3430 return; 3431 } 3432 3433 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) 3434 igc_del_etype_filter(adapter, rule->filter.etype); 3435 3436 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3437 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3438 VLAN_PRIO_SHIFT; 3439 3440 igc_del_vlan_prio_filter(adapter, prio); 3441 } 3442 3443 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3444 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3445 rule->filter.src_addr); 3446 3447 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3448 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3449 rule->filter.dst_addr); 3450 } 3451 3452 /** 3453 * igc_get_nfc_rule() - Get NFC rule 3454 * @adapter: Pointer to adapter 3455 * @location: Rule location 3456 * 3457 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3458 * 3459 * Return: Pointer to NFC rule at @location. If not found, NULL. 3460 */ 3461 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, 3462 u32 location) 3463 { 3464 struct igc_nfc_rule *rule; 3465 3466 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { 3467 if (rule->location == location) 3468 return rule; 3469 if (rule->location > location) 3470 break; 3471 } 3472 3473 return NULL; 3474 } 3475 3476 /** 3477 * igc_del_nfc_rule() - Delete NFC rule 3478 * @adapter: Pointer to adapter 3479 * @rule: Pointer to rule to be deleted 3480 * 3481 * Disable NFC rule in hardware and delete it from adapter. 3482 * 3483 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3484 */ 3485 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3486 { 3487 igc_disable_nfc_rule(adapter, rule); 3488 3489 list_del(&rule->list); 3490 adapter->nfc_rule_count--; 3491 3492 kfree(rule); 3493 } 3494 3495 static void igc_flush_nfc_rules(struct igc_adapter *adapter) 3496 { 3497 struct igc_nfc_rule *rule, *tmp; 3498 3499 mutex_lock(&adapter->nfc_rule_lock); 3500 3501 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) 3502 igc_del_nfc_rule(adapter, rule); 3503 3504 mutex_unlock(&adapter->nfc_rule_lock); 3505 } 3506 3507 /** 3508 * igc_add_nfc_rule() - Add NFC rule 3509 * @adapter: Pointer to adapter 3510 * @rule: Pointer to rule to be added 3511 * 3512 * Enable NFC rule in hardware and add it to adapter. 3513 * 3514 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3515 * 3516 * Return: 0 on success, negative errno on failure. 3517 */ 3518 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3519 { 3520 struct igc_nfc_rule *pred, *cur; 3521 int err; 3522 3523 err = igc_enable_nfc_rule(adapter, rule); 3524 if (err) 3525 return err; 3526 3527 pred = NULL; 3528 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { 3529 if (cur->location >= rule->location) 3530 break; 3531 pred = cur; 3532 } 3533 3534 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); 3535 adapter->nfc_rule_count++; 3536 return 0; 3537 } 3538 3539 static void igc_restore_nfc_rules(struct igc_adapter *adapter) 3540 { 3541 struct igc_nfc_rule *rule; 3542 3543 mutex_lock(&adapter->nfc_rule_lock); 3544 3545 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) 3546 igc_enable_nfc_rule(adapter, rule); 3547 3548 mutex_unlock(&adapter->nfc_rule_lock); 3549 } 3550 3551 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) 3552 { 3553 struct igc_adapter *adapter = netdev_priv(netdev); 3554 3555 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); 3556 } 3557 3558 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) 3559 { 3560 struct igc_adapter *adapter = netdev_priv(netdev); 3561 3562 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); 3563 return 0; 3564 } 3565 3566 /** 3567 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 3568 * @netdev: network interface device structure 3569 * 3570 * The set_rx_mode entry point is called whenever the unicast or multicast 3571 * address lists or the network interface flags are updated. This routine is 3572 * responsible for configuring the hardware for proper unicast, multicast, 3573 * promiscuous mode, and all-multi behavior. 3574 */ 3575 static void igc_set_rx_mode(struct net_device *netdev) 3576 { 3577 struct igc_adapter *adapter = netdev_priv(netdev); 3578 struct igc_hw *hw = &adapter->hw; 3579 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; 3580 int count; 3581 3582 /* Check for Promiscuous and All Multicast modes */ 3583 if (netdev->flags & IFF_PROMISC) { 3584 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; 3585 } else { 3586 if (netdev->flags & IFF_ALLMULTI) { 3587 rctl |= IGC_RCTL_MPE; 3588 } else { 3589 /* Write addresses to the MTA, if the attempt fails 3590 * then we should just turn on promiscuous mode so 3591 * that we can at least receive multicast traffic 3592 */ 3593 count = igc_write_mc_addr_list(netdev); 3594 if (count < 0) 3595 rctl |= IGC_RCTL_MPE; 3596 } 3597 } 3598 3599 /* Write addresses to available RAR registers, if there is not 3600 * sufficient space to store all the addresses then enable 3601 * unicast promiscuous mode 3602 */ 3603 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) 3604 rctl |= IGC_RCTL_UPE; 3605 3606 /* update state of unicast and multicast */ 3607 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 3608 wr32(IGC_RCTL, rctl); 3609 3610 #if (PAGE_SIZE < 8192) 3611 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) 3612 rlpml = IGC_MAX_FRAME_BUILD_SKB; 3613 #endif 3614 wr32(IGC_RLPML, rlpml); 3615 } 3616 3617 /** 3618 * igc_configure - configure the hardware for RX and TX 3619 * @adapter: private board structure 3620 */ 3621 static void igc_configure(struct igc_adapter *adapter) 3622 { 3623 struct net_device *netdev = adapter->netdev; 3624 int i = 0; 3625 3626 igc_get_hw_control(adapter); 3627 igc_set_rx_mode(netdev); 3628 3629 igc_restore_vlan(adapter); 3630 3631 igc_setup_tctl(adapter); 3632 igc_setup_mrqc(adapter); 3633 igc_setup_rctl(adapter); 3634 3635 igc_set_default_mac_filter(adapter); 3636 igc_restore_nfc_rules(adapter); 3637 3638 igc_configure_tx(adapter); 3639 igc_configure_rx(adapter); 3640 3641 igc_rx_fifo_flush_base(&adapter->hw); 3642 3643 /* call igc_desc_unused which always leaves 3644 * at least 1 descriptor unused to make sure 3645 * next_to_use != next_to_clean 3646 */ 3647 for (i = 0; i < adapter->num_rx_queues; i++) { 3648 struct igc_ring *ring = adapter->rx_ring[i]; 3649 3650 if (ring->xsk_pool) 3651 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 3652 else 3653 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 3654 } 3655 } 3656 3657 /** 3658 * igc_write_ivar - configure ivar for given MSI-X vector 3659 * @hw: pointer to the HW structure 3660 * @msix_vector: vector number we are allocating to a given ring 3661 * @index: row index of IVAR register to write within IVAR table 3662 * @offset: column offset of in IVAR, should be multiple of 8 3663 * 3664 * The IVAR table consists of 2 columns, 3665 * each containing an cause allocation for an Rx and Tx ring, and a 3666 * variable number of rows depending on the number of queues supported. 3667 */ 3668 static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 3669 int index, int offset) 3670 { 3671 u32 ivar = array_rd32(IGC_IVAR0, index); 3672 3673 /* clear any bits that are currently set */ 3674 ivar &= ~((u32)0xFF << offset); 3675 3676 /* write vector and valid bit */ 3677 ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 3678 3679 array_wr32(IGC_IVAR0, index, ivar); 3680 } 3681 3682 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 3683 { 3684 struct igc_adapter *adapter = q_vector->adapter; 3685 struct igc_hw *hw = &adapter->hw; 3686 int rx_queue = IGC_N0_QUEUE; 3687 int tx_queue = IGC_N0_QUEUE; 3688 3689 if (q_vector->rx.ring) 3690 rx_queue = q_vector->rx.ring->reg_idx; 3691 if (q_vector->tx.ring) 3692 tx_queue = q_vector->tx.ring->reg_idx; 3693 3694 switch (hw->mac.type) { 3695 case igc_i225: 3696 if (rx_queue > IGC_N0_QUEUE) 3697 igc_write_ivar(hw, msix_vector, 3698 rx_queue >> 1, 3699 (rx_queue & 0x1) << 4); 3700 if (tx_queue > IGC_N0_QUEUE) 3701 igc_write_ivar(hw, msix_vector, 3702 tx_queue >> 1, 3703 ((tx_queue & 0x1) << 4) + 8); 3704 q_vector->eims_value = BIT(msix_vector); 3705 break; 3706 default: 3707 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 3708 break; 3709 } 3710 3711 /* add q_vector eims value to global eims_enable_mask */ 3712 adapter->eims_enable_mask |= q_vector->eims_value; 3713 3714 /* configure q_vector to set itr on first interrupt */ 3715 q_vector->set_itr = 1; 3716 } 3717 3718 /** 3719 * igc_configure_msix - Configure MSI-X hardware 3720 * @adapter: Pointer to adapter structure 3721 * 3722 * igc_configure_msix sets up the hardware to properly 3723 * generate MSI-X interrupts. 3724 */ 3725 static void igc_configure_msix(struct igc_adapter *adapter) 3726 { 3727 struct igc_hw *hw = &adapter->hw; 3728 int i, vector = 0; 3729 u32 tmp; 3730 3731 adapter->eims_enable_mask = 0; 3732 3733 /* set vector for other causes, i.e. link changes */ 3734 switch (hw->mac.type) { 3735 case igc_i225: 3736 /* Turn on MSI-X capability first, or our settings 3737 * won't stick. And it will take days to debug. 3738 */ 3739 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 3740 IGC_GPIE_PBA | IGC_GPIE_EIAME | 3741 IGC_GPIE_NSICR); 3742 3743 /* enable msix_other interrupt */ 3744 adapter->eims_other = BIT(vector); 3745 tmp = (vector++ | IGC_IVAR_VALID) << 8; 3746 3747 wr32(IGC_IVAR_MISC, tmp); 3748 break; 3749 default: 3750 /* do nothing, since nothing else supports MSI-X */ 3751 break; 3752 } /* switch (hw->mac.type) */ 3753 3754 adapter->eims_enable_mask |= adapter->eims_other; 3755 3756 for (i = 0; i < adapter->num_q_vectors; i++) 3757 igc_assign_vector(adapter->q_vector[i], vector++); 3758 3759 wrfl(); 3760 } 3761 3762 /** 3763 * igc_irq_enable - Enable default interrupt generation settings 3764 * @adapter: board private structure 3765 */ 3766 static void igc_irq_enable(struct igc_adapter *adapter) 3767 { 3768 struct igc_hw *hw = &adapter->hw; 3769 3770 if (adapter->msix_entries) { 3771 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 3772 u32 regval = rd32(IGC_EIAC); 3773 3774 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 3775 regval = rd32(IGC_EIAM); 3776 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 3777 wr32(IGC_EIMS, adapter->eims_enable_mask); 3778 wr32(IGC_IMS, ims); 3779 } else { 3780 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3781 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3782 } 3783 } 3784 3785 /** 3786 * igc_irq_disable - Mask off interrupt generation on the NIC 3787 * @adapter: board private structure 3788 */ 3789 static void igc_irq_disable(struct igc_adapter *adapter) 3790 { 3791 struct igc_hw *hw = &adapter->hw; 3792 3793 if (adapter->msix_entries) { 3794 u32 regval = rd32(IGC_EIAM); 3795 3796 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 3797 wr32(IGC_EIMC, adapter->eims_enable_mask); 3798 regval = rd32(IGC_EIAC); 3799 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 3800 } 3801 3802 wr32(IGC_IAM, 0); 3803 wr32(IGC_IMC, ~0); 3804 wrfl(); 3805 3806 if (adapter->msix_entries) { 3807 int vector = 0, i; 3808 3809 synchronize_irq(adapter->msix_entries[vector++].vector); 3810 3811 for (i = 0; i < adapter->num_q_vectors; i++) 3812 synchronize_irq(adapter->msix_entries[vector++].vector); 3813 } else { 3814 synchronize_irq(adapter->pdev->irq); 3815 } 3816 } 3817 3818 void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 3819 const u32 max_rss_queues) 3820 { 3821 /* Determine if we need to pair queues. */ 3822 /* If rss_queues > half of max_rss_queues, pair the queues in 3823 * order to conserve interrupts due to limited supply. 3824 */ 3825 if (adapter->rss_queues > (max_rss_queues / 2)) 3826 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 3827 else 3828 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 3829 } 3830 3831 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 3832 { 3833 return IGC_MAX_RX_QUEUES; 3834 } 3835 3836 static void igc_init_queue_configuration(struct igc_adapter *adapter) 3837 { 3838 u32 max_rss_queues; 3839 3840 max_rss_queues = igc_get_max_rss_queues(adapter); 3841 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 3842 3843 igc_set_flag_queue_pairs(adapter, max_rss_queues); 3844 } 3845 3846 /** 3847 * igc_reset_q_vector - Reset config for interrupt vector 3848 * @adapter: board private structure to initialize 3849 * @v_idx: Index of vector to be reset 3850 * 3851 * If NAPI is enabled it will delete any references to the 3852 * NAPI struct. This is preparation for igc_free_q_vector. 3853 */ 3854 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 3855 { 3856 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 3857 3858 /* if we're coming from igc_set_interrupt_capability, the vectors are 3859 * not yet allocated 3860 */ 3861 if (!q_vector) 3862 return; 3863 3864 if (q_vector->tx.ring) 3865 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 3866 3867 if (q_vector->rx.ring) 3868 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 3869 3870 netif_napi_del(&q_vector->napi); 3871 } 3872 3873 /** 3874 * igc_free_q_vector - Free memory allocated for specific interrupt vector 3875 * @adapter: board private structure to initialize 3876 * @v_idx: Index of vector to be freed 3877 * 3878 * This function frees the memory allocated to the q_vector. 3879 */ 3880 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 3881 { 3882 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 3883 3884 adapter->q_vector[v_idx] = NULL; 3885 3886 /* igc_get_stats64() might access the rings on this vector, 3887 * we must wait a grace period before freeing it. 3888 */ 3889 if (q_vector) 3890 kfree_rcu(q_vector, rcu); 3891 } 3892 3893 /** 3894 * igc_free_q_vectors - Free memory allocated for interrupt vectors 3895 * @adapter: board private structure to initialize 3896 * 3897 * This function frees the memory allocated to the q_vectors. In addition if 3898 * NAPI is enabled it will delete any references to the NAPI struct prior 3899 * to freeing the q_vector. 3900 */ 3901 static void igc_free_q_vectors(struct igc_adapter *adapter) 3902 { 3903 int v_idx = adapter->num_q_vectors; 3904 3905 adapter->num_tx_queues = 0; 3906 adapter->num_rx_queues = 0; 3907 adapter->num_q_vectors = 0; 3908 3909 while (v_idx--) { 3910 igc_reset_q_vector(adapter, v_idx); 3911 igc_free_q_vector(adapter, v_idx); 3912 } 3913 } 3914 3915 /** 3916 * igc_update_itr - update the dynamic ITR value based on statistics 3917 * @q_vector: pointer to q_vector 3918 * @ring_container: ring info to update the itr for 3919 * 3920 * Stores a new ITR value based on packets and byte 3921 * counts during the last interrupt. The advantage of per interrupt 3922 * computation is faster updates and more accurate ITR for the current 3923 * traffic pattern. Constants in this function were computed 3924 * based on theoretical maximum wire speed and thresholds were set based 3925 * on testing data as well as attempting to minimize response time 3926 * while increasing bulk throughput. 3927 * NOTE: These calculations are only valid when operating in a single- 3928 * queue environment. 3929 */ 3930 static void igc_update_itr(struct igc_q_vector *q_vector, 3931 struct igc_ring_container *ring_container) 3932 { 3933 unsigned int packets = ring_container->total_packets; 3934 unsigned int bytes = ring_container->total_bytes; 3935 u8 itrval = ring_container->itr; 3936 3937 /* no packets, exit with status unchanged */ 3938 if (packets == 0) 3939 return; 3940 3941 switch (itrval) { 3942 case lowest_latency: 3943 /* handle TSO and jumbo frames */ 3944 if (bytes / packets > 8000) 3945 itrval = bulk_latency; 3946 else if ((packets < 5) && (bytes > 512)) 3947 itrval = low_latency; 3948 break; 3949 case low_latency: /* 50 usec aka 20000 ints/s */ 3950 if (bytes > 10000) { 3951 /* this if handles the TSO accounting */ 3952 if (bytes / packets > 8000) 3953 itrval = bulk_latency; 3954 else if ((packets < 10) || ((bytes / packets) > 1200)) 3955 itrval = bulk_latency; 3956 else if ((packets > 35)) 3957 itrval = lowest_latency; 3958 } else if (bytes / packets > 2000) { 3959 itrval = bulk_latency; 3960 } else if (packets <= 2 && bytes < 512) { 3961 itrval = lowest_latency; 3962 } 3963 break; 3964 case bulk_latency: /* 250 usec aka 4000 ints/s */ 3965 if (bytes > 25000) { 3966 if (packets > 35) 3967 itrval = low_latency; 3968 } else if (bytes < 1500) { 3969 itrval = low_latency; 3970 } 3971 break; 3972 } 3973 3974 /* clear work counters since we have the values we need */ 3975 ring_container->total_bytes = 0; 3976 ring_container->total_packets = 0; 3977 3978 /* write updated itr to ring container */ 3979 ring_container->itr = itrval; 3980 } 3981 3982 static void igc_set_itr(struct igc_q_vector *q_vector) 3983 { 3984 struct igc_adapter *adapter = q_vector->adapter; 3985 u32 new_itr = q_vector->itr_val; 3986 u8 current_itr = 0; 3987 3988 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3989 switch (adapter->link_speed) { 3990 case SPEED_10: 3991 case SPEED_100: 3992 current_itr = 0; 3993 new_itr = IGC_4K_ITR; 3994 goto set_itr_now; 3995 default: 3996 break; 3997 } 3998 3999 igc_update_itr(q_vector, &q_vector->tx); 4000 igc_update_itr(q_vector, &q_vector->rx); 4001 4002 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 4003 4004 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 4005 if (current_itr == lowest_latency && 4006 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 4007 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 4008 current_itr = low_latency; 4009 4010 switch (current_itr) { 4011 /* counts and packets in update_itr are dependent on these numbers */ 4012 case lowest_latency: 4013 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 4014 break; 4015 case low_latency: 4016 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 4017 break; 4018 case bulk_latency: 4019 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 4020 break; 4021 default: 4022 break; 4023 } 4024 4025 set_itr_now: 4026 if (new_itr != q_vector->itr_val) { 4027 /* this attempts to bias the interrupt rate towards Bulk 4028 * by adding intermediate steps when interrupt rate is 4029 * increasing 4030 */ 4031 new_itr = new_itr > q_vector->itr_val ? 4032 max((new_itr * q_vector->itr_val) / 4033 (new_itr + (q_vector->itr_val >> 2)), 4034 new_itr) : new_itr; 4035 /* Don't write the value here; it resets the adapter's 4036 * internal timer, and causes us to delay far longer than 4037 * we should between interrupts. Instead, we write the ITR 4038 * value at the beginning of the next interrupt so the timing 4039 * ends up being correct. 4040 */ 4041 q_vector->itr_val = new_itr; 4042 q_vector->set_itr = 1; 4043 } 4044 } 4045 4046 static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 4047 { 4048 int v_idx = adapter->num_q_vectors; 4049 4050 if (adapter->msix_entries) { 4051 pci_disable_msix(adapter->pdev); 4052 kfree(adapter->msix_entries); 4053 adapter->msix_entries = NULL; 4054 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 4055 pci_disable_msi(adapter->pdev); 4056 } 4057 4058 while (v_idx--) 4059 igc_reset_q_vector(adapter, v_idx); 4060 } 4061 4062 /** 4063 * igc_set_interrupt_capability - set MSI or MSI-X if supported 4064 * @adapter: Pointer to adapter structure 4065 * @msix: boolean value for MSI-X capability 4066 * 4067 * Attempt to configure interrupts using the best available 4068 * capabilities of the hardware and kernel. 4069 */ 4070 static void igc_set_interrupt_capability(struct igc_adapter *adapter, 4071 bool msix) 4072 { 4073 int numvecs, i; 4074 int err; 4075 4076 if (!msix) 4077 goto msi_only; 4078 adapter->flags |= IGC_FLAG_HAS_MSIX; 4079 4080 /* Number of supported queues. */ 4081 adapter->num_rx_queues = adapter->rss_queues; 4082 4083 adapter->num_tx_queues = adapter->rss_queues; 4084 4085 /* start with one vector for every Rx queue */ 4086 numvecs = adapter->num_rx_queues; 4087 4088 /* if Tx handler is separate add 1 for every Tx queue */ 4089 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 4090 numvecs += adapter->num_tx_queues; 4091 4092 /* store the number of vectors reserved for queues */ 4093 adapter->num_q_vectors = numvecs; 4094 4095 /* add 1 vector for link status interrupts */ 4096 numvecs++; 4097 4098 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 4099 GFP_KERNEL); 4100 4101 if (!adapter->msix_entries) 4102 return; 4103 4104 /* populate entry values */ 4105 for (i = 0; i < numvecs; i++) 4106 adapter->msix_entries[i].entry = i; 4107 4108 err = pci_enable_msix_range(adapter->pdev, 4109 adapter->msix_entries, 4110 numvecs, 4111 numvecs); 4112 if (err > 0) 4113 return; 4114 4115 kfree(adapter->msix_entries); 4116 adapter->msix_entries = NULL; 4117 4118 igc_reset_interrupt_capability(adapter); 4119 4120 msi_only: 4121 adapter->flags &= ~IGC_FLAG_HAS_MSIX; 4122 4123 adapter->rss_queues = 1; 4124 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 4125 adapter->num_rx_queues = 1; 4126 adapter->num_tx_queues = 1; 4127 adapter->num_q_vectors = 1; 4128 if (!pci_enable_msi(adapter->pdev)) 4129 adapter->flags |= IGC_FLAG_HAS_MSI; 4130 } 4131 4132 /** 4133 * igc_update_ring_itr - update the dynamic ITR value based on packet size 4134 * @q_vector: pointer to q_vector 4135 * 4136 * Stores a new ITR value based on strictly on packet size. This 4137 * algorithm is less sophisticated than that used in igc_update_itr, 4138 * due to the difficulty of synchronizing statistics across multiple 4139 * receive rings. The divisors and thresholds used by this function 4140 * were determined based on theoretical maximum wire speed and testing 4141 * data, in order to minimize response time while increasing bulk 4142 * throughput. 4143 * NOTE: This function is called only when operating in a multiqueue 4144 * receive environment. 4145 */ 4146 static void igc_update_ring_itr(struct igc_q_vector *q_vector) 4147 { 4148 struct igc_adapter *adapter = q_vector->adapter; 4149 int new_val = q_vector->itr_val; 4150 int avg_wire_size = 0; 4151 unsigned int packets; 4152 4153 /* For non-gigabit speeds, just fix the interrupt rate at 4000 4154 * ints/sec - ITR timer value of 120 ticks. 4155 */ 4156 switch (adapter->link_speed) { 4157 case SPEED_10: 4158 case SPEED_100: 4159 new_val = IGC_4K_ITR; 4160 goto set_itr_val; 4161 default: 4162 break; 4163 } 4164 4165 packets = q_vector->rx.total_packets; 4166 if (packets) 4167 avg_wire_size = q_vector->rx.total_bytes / packets; 4168 4169 packets = q_vector->tx.total_packets; 4170 if (packets) 4171 avg_wire_size = max_t(u32, avg_wire_size, 4172 q_vector->tx.total_bytes / packets); 4173 4174 /* if avg_wire_size isn't set no work was done */ 4175 if (!avg_wire_size) 4176 goto clear_counts; 4177 4178 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 4179 avg_wire_size += 24; 4180 4181 /* Don't starve jumbo frames */ 4182 avg_wire_size = min(avg_wire_size, 3000); 4183 4184 /* Give a little boost to mid-size frames */ 4185 if (avg_wire_size > 300 && avg_wire_size < 1200) 4186 new_val = avg_wire_size / 3; 4187 else 4188 new_val = avg_wire_size / 2; 4189 4190 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 4191 if (new_val < IGC_20K_ITR && 4192 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 4193 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 4194 new_val = IGC_20K_ITR; 4195 4196 set_itr_val: 4197 if (new_val != q_vector->itr_val) { 4198 q_vector->itr_val = new_val; 4199 q_vector->set_itr = 1; 4200 } 4201 clear_counts: 4202 q_vector->rx.total_bytes = 0; 4203 q_vector->rx.total_packets = 0; 4204 q_vector->tx.total_bytes = 0; 4205 q_vector->tx.total_packets = 0; 4206 } 4207 4208 static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 4209 { 4210 struct igc_adapter *adapter = q_vector->adapter; 4211 struct igc_hw *hw = &adapter->hw; 4212 4213 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 4214 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 4215 if (adapter->num_q_vectors == 1) 4216 igc_set_itr(q_vector); 4217 else 4218 igc_update_ring_itr(q_vector); 4219 } 4220 4221 if (!test_bit(__IGC_DOWN, &adapter->state)) { 4222 if (adapter->msix_entries) 4223 wr32(IGC_EIMS, q_vector->eims_value); 4224 else 4225 igc_irq_enable(adapter); 4226 } 4227 } 4228 4229 static void igc_add_ring(struct igc_ring *ring, 4230 struct igc_ring_container *head) 4231 { 4232 head->ring = ring; 4233 head->count++; 4234 } 4235 4236 /** 4237 * igc_cache_ring_register - Descriptor ring to register mapping 4238 * @adapter: board private structure to initialize 4239 * 4240 * Once we know the feature-set enabled for the device, we'll cache 4241 * the register offset the descriptor ring is assigned to. 4242 */ 4243 static void igc_cache_ring_register(struct igc_adapter *adapter) 4244 { 4245 int i = 0, j = 0; 4246 4247 switch (adapter->hw.mac.type) { 4248 case igc_i225: 4249 default: 4250 for (; i < adapter->num_rx_queues; i++) 4251 adapter->rx_ring[i]->reg_idx = i; 4252 for (; j < adapter->num_tx_queues; j++) 4253 adapter->tx_ring[j]->reg_idx = j; 4254 break; 4255 } 4256 } 4257 4258 /** 4259 * igc_poll - NAPI Rx polling callback 4260 * @napi: napi polling structure 4261 * @budget: count of how many packets we should handle 4262 */ 4263 static int igc_poll(struct napi_struct *napi, int budget) 4264 { 4265 struct igc_q_vector *q_vector = container_of(napi, 4266 struct igc_q_vector, 4267 napi); 4268 struct igc_ring *rx_ring = q_vector->rx.ring; 4269 bool clean_complete = true; 4270 int work_done = 0; 4271 4272 if (q_vector->tx.ring) 4273 clean_complete = igc_clean_tx_irq(q_vector, budget); 4274 4275 if (rx_ring) { 4276 int cleaned = rx_ring->xsk_pool ? 4277 igc_clean_rx_irq_zc(q_vector, budget) : 4278 igc_clean_rx_irq(q_vector, budget); 4279 4280 work_done += cleaned; 4281 if (cleaned >= budget) 4282 clean_complete = false; 4283 } 4284 4285 /* If all work not completed, return budget and keep polling */ 4286 if (!clean_complete) 4287 return budget; 4288 4289 /* Exit the polling mode, but don't re-enable interrupts if stack might 4290 * poll us due to busy-polling 4291 */ 4292 if (likely(napi_complete_done(napi, work_done))) 4293 igc_ring_irq_enable(q_vector); 4294 4295 return min(work_done, budget - 1); 4296 } 4297 4298 /** 4299 * igc_alloc_q_vector - Allocate memory for a single interrupt vector 4300 * @adapter: board private structure to initialize 4301 * @v_count: q_vectors allocated on adapter, used for ring interleaving 4302 * @v_idx: index of vector in adapter struct 4303 * @txr_count: total number of Tx rings to allocate 4304 * @txr_idx: index of first Tx ring to allocate 4305 * @rxr_count: total number of Rx rings to allocate 4306 * @rxr_idx: index of first Rx ring to allocate 4307 * 4308 * We allocate one q_vector. If allocation fails we return -ENOMEM. 4309 */ 4310 static int igc_alloc_q_vector(struct igc_adapter *adapter, 4311 unsigned int v_count, unsigned int v_idx, 4312 unsigned int txr_count, unsigned int txr_idx, 4313 unsigned int rxr_count, unsigned int rxr_idx) 4314 { 4315 struct igc_q_vector *q_vector; 4316 struct igc_ring *ring; 4317 int ring_count; 4318 4319 /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 4320 if (txr_count > 1 || rxr_count > 1) 4321 return -ENOMEM; 4322 4323 ring_count = txr_count + rxr_count; 4324 4325 /* allocate q_vector and rings */ 4326 q_vector = adapter->q_vector[v_idx]; 4327 if (!q_vector) 4328 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 4329 GFP_KERNEL); 4330 else 4331 memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 4332 if (!q_vector) 4333 return -ENOMEM; 4334 4335 /* initialize NAPI */ 4336 netif_napi_add(adapter->netdev, &q_vector->napi, 4337 igc_poll, 64); 4338 4339 /* tie q_vector and adapter together */ 4340 adapter->q_vector[v_idx] = q_vector; 4341 q_vector->adapter = adapter; 4342 4343 /* initialize work limits */ 4344 q_vector->tx.work_limit = adapter->tx_work_limit; 4345 4346 /* initialize ITR configuration */ 4347 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 4348 q_vector->itr_val = IGC_START_ITR; 4349 4350 /* initialize pointer to rings */ 4351 ring = q_vector->ring; 4352 4353 /* initialize ITR */ 4354 if (rxr_count) { 4355 /* rx or rx/tx vector */ 4356 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 4357 q_vector->itr_val = adapter->rx_itr_setting; 4358 } else { 4359 /* tx only vector */ 4360 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 4361 q_vector->itr_val = adapter->tx_itr_setting; 4362 } 4363 4364 if (txr_count) { 4365 /* assign generic ring traits */ 4366 ring->dev = &adapter->pdev->dev; 4367 ring->netdev = adapter->netdev; 4368 4369 /* configure backlink on ring */ 4370 ring->q_vector = q_vector; 4371 4372 /* update q_vector Tx values */ 4373 igc_add_ring(ring, &q_vector->tx); 4374 4375 /* apply Tx specific ring traits */ 4376 ring->count = adapter->tx_ring_count; 4377 ring->queue_index = txr_idx; 4378 4379 /* assign ring to adapter */ 4380 adapter->tx_ring[txr_idx] = ring; 4381 4382 /* push pointer to next ring */ 4383 ring++; 4384 } 4385 4386 if (rxr_count) { 4387 /* assign generic ring traits */ 4388 ring->dev = &adapter->pdev->dev; 4389 ring->netdev = adapter->netdev; 4390 4391 /* configure backlink on ring */ 4392 ring->q_vector = q_vector; 4393 4394 /* update q_vector Rx values */ 4395 igc_add_ring(ring, &q_vector->rx); 4396 4397 /* apply Rx specific ring traits */ 4398 ring->count = adapter->rx_ring_count; 4399 ring->queue_index = rxr_idx; 4400 4401 /* assign ring to adapter */ 4402 adapter->rx_ring[rxr_idx] = ring; 4403 } 4404 4405 return 0; 4406 } 4407 4408 /** 4409 * igc_alloc_q_vectors - Allocate memory for interrupt vectors 4410 * @adapter: board private structure to initialize 4411 * 4412 * We allocate one q_vector per queue interrupt. If allocation fails we 4413 * return -ENOMEM. 4414 */ 4415 static int igc_alloc_q_vectors(struct igc_adapter *adapter) 4416 { 4417 int rxr_remaining = adapter->num_rx_queues; 4418 int txr_remaining = adapter->num_tx_queues; 4419 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 4420 int q_vectors = adapter->num_q_vectors; 4421 int err; 4422 4423 if (q_vectors >= (rxr_remaining + txr_remaining)) { 4424 for (; rxr_remaining; v_idx++) { 4425 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4426 0, 0, 1, rxr_idx); 4427 4428 if (err) 4429 goto err_out; 4430 4431 /* update counts and index */ 4432 rxr_remaining--; 4433 rxr_idx++; 4434 } 4435 } 4436 4437 for (; v_idx < q_vectors; v_idx++) { 4438 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 4439 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 4440 4441 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4442 tqpv, txr_idx, rqpv, rxr_idx); 4443 4444 if (err) 4445 goto err_out; 4446 4447 /* update counts and index */ 4448 rxr_remaining -= rqpv; 4449 txr_remaining -= tqpv; 4450 rxr_idx++; 4451 txr_idx++; 4452 } 4453 4454 return 0; 4455 4456 err_out: 4457 adapter->num_tx_queues = 0; 4458 adapter->num_rx_queues = 0; 4459 adapter->num_q_vectors = 0; 4460 4461 while (v_idx--) 4462 igc_free_q_vector(adapter, v_idx); 4463 4464 return -ENOMEM; 4465 } 4466 4467 /** 4468 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 4469 * @adapter: Pointer to adapter structure 4470 * @msix: boolean for MSI-X capability 4471 * 4472 * This function initializes the interrupts and allocates all of the queues. 4473 */ 4474 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 4475 { 4476 struct net_device *dev = adapter->netdev; 4477 int err = 0; 4478 4479 igc_set_interrupt_capability(adapter, msix); 4480 4481 err = igc_alloc_q_vectors(adapter); 4482 if (err) { 4483 netdev_err(dev, "Unable to allocate memory for vectors\n"); 4484 goto err_alloc_q_vectors; 4485 } 4486 4487 igc_cache_ring_register(adapter); 4488 4489 return 0; 4490 4491 err_alloc_q_vectors: 4492 igc_reset_interrupt_capability(adapter); 4493 return err; 4494 } 4495 4496 /** 4497 * igc_sw_init - Initialize general software structures (struct igc_adapter) 4498 * @adapter: board private structure to initialize 4499 * 4500 * igc_sw_init initializes the Adapter private data structure. 4501 * Fields are initialized based on PCI device information and 4502 * OS network device settings (MTU size). 4503 */ 4504 static int igc_sw_init(struct igc_adapter *adapter) 4505 { 4506 struct net_device *netdev = adapter->netdev; 4507 struct pci_dev *pdev = adapter->pdev; 4508 struct igc_hw *hw = &adapter->hw; 4509 4510 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 4511 4512 /* set default ring sizes */ 4513 adapter->tx_ring_count = IGC_DEFAULT_TXD; 4514 adapter->rx_ring_count = IGC_DEFAULT_RXD; 4515 4516 /* set default ITR values */ 4517 adapter->rx_itr_setting = IGC_DEFAULT_ITR; 4518 adapter->tx_itr_setting = IGC_DEFAULT_ITR; 4519 4520 /* set default work limits */ 4521 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 4522 4523 /* adjust max frame to be at least the size of a standard frame */ 4524 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 4525 VLAN_HLEN; 4526 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 4527 4528 mutex_init(&adapter->nfc_rule_lock); 4529 INIT_LIST_HEAD(&adapter->nfc_rule_list); 4530 adapter->nfc_rule_count = 0; 4531 4532 spin_lock_init(&adapter->stats64_lock); 4533 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 4534 adapter->flags |= IGC_FLAG_HAS_MSIX; 4535 4536 igc_init_queue_configuration(adapter); 4537 4538 /* This call may decrease the number of queues */ 4539 if (igc_init_interrupt_scheme(adapter, true)) { 4540 netdev_err(netdev, "Unable to allocate memory for queues\n"); 4541 return -ENOMEM; 4542 } 4543 4544 /* Explicitly disable IRQ since the NIC can be in any state. */ 4545 igc_irq_disable(adapter); 4546 4547 set_bit(__IGC_DOWN, &adapter->state); 4548 4549 return 0; 4550 } 4551 4552 /** 4553 * igc_up - Open the interface and prepare it to handle traffic 4554 * @adapter: board private structure 4555 */ 4556 void igc_up(struct igc_adapter *adapter) 4557 { 4558 struct igc_hw *hw = &adapter->hw; 4559 int i = 0; 4560 4561 /* hardware has been reset, we need to reload some things */ 4562 igc_configure(adapter); 4563 4564 clear_bit(__IGC_DOWN, &adapter->state); 4565 4566 for (i = 0; i < adapter->num_q_vectors; i++) 4567 napi_enable(&adapter->q_vector[i]->napi); 4568 4569 if (adapter->msix_entries) 4570 igc_configure_msix(adapter); 4571 else 4572 igc_assign_vector(adapter->q_vector[0], 0); 4573 4574 /* Clear any pending interrupts. */ 4575 rd32(IGC_ICR); 4576 igc_irq_enable(adapter); 4577 4578 netif_tx_start_all_queues(adapter->netdev); 4579 4580 /* start the watchdog. */ 4581 hw->mac.get_link_status = true; 4582 schedule_work(&adapter->watchdog_task); 4583 } 4584 4585 /** 4586 * igc_update_stats - Update the board statistics counters 4587 * @adapter: board private structure 4588 */ 4589 void igc_update_stats(struct igc_adapter *adapter) 4590 { 4591 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 4592 struct pci_dev *pdev = adapter->pdev; 4593 struct igc_hw *hw = &adapter->hw; 4594 u64 _bytes, _packets; 4595 u64 bytes, packets; 4596 unsigned int start; 4597 u32 mpc; 4598 int i; 4599 4600 /* Prevent stats update while adapter is being reset, or if the pci 4601 * connection is down. 4602 */ 4603 if (adapter->link_speed == 0) 4604 return; 4605 if (pci_channel_offline(pdev)) 4606 return; 4607 4608 packets = 0; 4609 bytes = 0; 4610 4611 rcu_read_lock(); 4612 for (i = 0; i < adapter->num_rx_queues; i++) { 4613 struct igc_ring *ring = adapter->rx_ring[i]; 4614 u32 rqdpc = rd32(IGC_RQDPC(i)); 4615 4616 if (hw->mac.type >= igc_i225) 4617 wr32(IGC_RQDPC(i), 0); 4618 4619 if (rqdpc) { 4620 ring->rx_stats.drops += rqdpc; 4621 net_stats->rx_fifo_errors += rqdpc; 4622 } 4623 4624 do { 4625 start = u64_stats_fetch_begin_irq(&ring->rx_syncp); 4626 _bytes = ring->rx_stats.bytes; 4627 _packets = ring->rx_stats.packets; 4628 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 4629 bytes += _bytes; 4630 packets += _packets; 4631 } 4632 4633 net_stats->rx_bytes = bytes; 4634 net_stats->rx_packets = packets; 4635 4636 packets = 0; 4637 bytes = 0; 4638 for (i = 0; i < adapter->num_tx_queues; i++) { 4639 struct igc_ring *ring = adapter->tx_ring[i]; 4640 4641 do { 4642 start = u64_stats_fetch_begin_irq(&ring->tx_syncp); 4643 _bytes = ring->tx_stats.bytes; 4644 _packets = ring->tx_stats.packets; 4645 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); 4646 bytes += _bytes; 4647 packets += _packets; 4648 } 4649 net_stats->tx_bytes = bytes; 4650 net_stats->tx_packets = packets; 4651 rcu_read_unlock(); 4652 4653 /* read stats registers */ 4654 adapter->stats.crcerrs += rd32(IGC_CRCERRS); 4655 adapter->stats.gprc += rd32(IGC_GPRC); 4656 adapter->stats.gorc += rd32(IGC_GORCL); 4657 rd32(IGC_GORCH); /* clear GORCL */ 4658 adapter->stats.bprc += rd32(IGC_BPRC); 4659 adapter->stats.mprc += rd32(IGC_MPRC); 4660 adapter->stats.roc += rd32(IGC_ROC); 4661 4662 adapter->stats.prc64 += rd32(IGC_PRC64); 4663 adapter->stats.prc127 += rd32(IGC_PRC127); 4664 adapter->stats.prc255 += rd32(IGC_PRC255); 4665 adapter->stats.prc511 += rd32(IGC_PRC511); 4666 adapter->stats.prc1023 += rd32(IGC_PRC1023); 4667 adapter->stats.prc1522 += rd32(IGC_PRC1522); 4668 adapter->stats.tlpic += rd32(IGC_TLPIC); 4669 adapter->stats.rlpic += rd32(IGC_RLPIC); 4670 adapter->stats.hgptc += rd32(IGC_HGPTC); 4671 4672 mpc = rd32(IGC_MPC); 4673 adapter->stats.mpc += mpc; 4674 net_stats->rx_fifo_errors += mpc; 4675 adapter->stats.scc += rd32(IGC_SCC); 4676 adapter->stats.ecol += rd32(IGC_ECOL); 4677 adapter->stats.mcc += rd32(IGC_MCC); 4678 adapter->stats.latecol += rd32(IGC_LATECOL); 4679 adapter->stats.dc += rd32(IGC_DC); 4680 adapter->stats.rlec += rd32(IGC_RLEC); 4681 adapter->stats.xonrxc += rd32(IGC_XONRXC); 4682 adapter->stats.xontxc += rd32(IGC_XONTXC); 4683 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); 4684 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); 4685 adapter->stats.fcruc += rd32(IGC_FCRUC); 4686 adapter->stats.gptc += rd32(IGC_GPTC); 4687 adapter->stats.gotc += rd32(IGC_GOTCL); 4688 rd32(IGC_GOTCH); /* clear GOTCL */ 4689 adapter->stats.rnbc += rd32(IGC_RNBC); 4690 adapter->stats.ruc += rd32(IGC_RUC); 4691 adapter->stats.rfc += rd32(IGC_RFC); 4692 adapter->stats.rjc += rd32(IGC_RJC); 4693 adapter->stats.tor += rd32(IGC_TORH); 4694 adapter->stats.tot += rd32(IGC_TOTH); 4695 adapter->stats.tpr += rd32(IGC_TPR); 4696 4697 adapter->stats.ptc64 += rd32(IGC_PTC64); 4698 adapter->stats.ptc127 += rd32(IGC_PTC127); 4699 adapter->stats.ptc255 += rd32(IGC_PTC255); 4700 adapter->stats.ptc511 += rd32(IGC_PTC511); 4701 adapter->stats.ptc1023 += rd32(IGC_PTC1023); 4702 adapter->stats.ptc1522 += rd32(IGC_PTC1522); 4703 4704 adapter->stats.mptc += rd32(IGC_MPTC); 4705 adapter->stats.bptc += rd32(IGC_BPTC); 4706 4707 adapter->stats.tpt += rd32(IGC_TPT); 4708 adapter->stats.colc += rd32(IGC_COLC); 4709 adapter->stats.colc += rd32(IGC_RERC); 4710 4711 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); 4712 4713 adapter->stats.tsctc += rd32(IGC_TSCTC); 4714 4715 adapter->stats.iac += rd32(IGC_IAC); 4716 4717 /* Fill out the OS statistics structure */ 4718 net_stats->multicast = adapter->stats.mprc; 4719 net_stats->collisions = adapter->stats.colc; 4720 4721 /* Rx Errors */ 4722 4723 /* RLEC on some newer hardware can be incorrect so build 4724 * our own version based on RUC and ROC 4725 */ 4726 net_stats->rx_errors = adapter->stats.rxerrc + 4727 adapter->stats.crcerrs + adapter->stats.algnerrc + 4728 adapter->stats.ruc + adapter->stats.roc + 4729 adapter->stats.cexterr; 4730 net_stats->rx_length_errors = adapter->stats.ruc + 4731 adapter->stats.roc; 4732 net_stats->rx_crc_errors = adapter->stats.crcerrs; 4733 net_stats->rx_frame_errors = adapter->stats.algnerrc; 4734 net_stats->rx_missed_errors = adapter->stats.mpc; 4735 4736 /* Tx Errors */ 4737 net_stats->tx_errors = adapter->stats.ecol + 4738 adapter->stats.latecol; 4739 net_stats->tx_aborted_errors = adapter->stats.ecol; 4740 net_stats->tx_window_errors = adapter->stats.latecol; 4741 net_stats->tx_carrier_errors = adapter->stats.tncrs; 4742 4743 /* Tx Dropped needs to be maintained elsewhere */ 4744 4745 /* Management Stats */ 4746 adapter->stats.mgptc += rd32(IGC_MGTPTC); 4747 adapter->stats.mgprc += rd32(IGC_MGTPRC); 4748 adapter->stats.mgpdc += rd32(IGC_MGTPDC); 4749 } 4750 4751 /** 4752 * igc_down - Close the interface 4753 * @adapter: board private structure 4754 */ 4755 void igc_down(struct igc_adapter *adapter) 4756 { 4757 struct net_device *netdev = adapter->netdev; 4758 struct igc_hw *hw = &adapter->hw; 4759 u32 tctl, rctl; 4760 int i = 0; 4761 4762 set_bit(__IGC_DOWN, &adapter->state); 4763 4764 igc_ptp_suspend(adapter); 4765 4766 /* disable receives in the hardware */ 4767 rctl = rd32(IGC_RCTL); 4768 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 4769 /* flush and sleep below */ 4770 4771 /* set trans_start so we don't get spurious watchdogs during reset */ 4772 netif_trans_update(netdev); 4773 4774 netif_carrier_off(netdev); 4775 netif_tx_stop_all_queues(netdev); 4776 4777 /* disable transmits in the hardware */ 4778 tctl = rd32(IGC_TCTL); 4779 tctl &= ~IGC_TCTL_EN; 4780 wr32(IGC_TCTL, tctl); 4781 /* flush both disables and wait for them to finish */ 4782 wrfl(); 4783 usleep_range(10000, 20000); 4784 4785 igc_irq_disable(adapter); 4786 4787 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 4788 4789 for (i = 0; i < adapter->num_q_vectors; i++) { 4790 if (adapter->q_vector[i]) { 4791 napi_synchronize(&adapter->q_vector[i]->napi); 4792 napi_disable(&adapter->q_vector[i]->napi); 4793 } 4794 } 4795 4796 del_timer_sync(&adapter->watchdog_timer); 4797 del_timer_sync(&adapter->phy_info_timer); 4798 4799 /* record the stats before reset*/ 4800 spin_lock(&adapter->stats64_lock); 4801 igc_update_stats(adapter); 4802 spin_unlock(&adapter->stats64_lock); 4803 4804 adapter->link_speed = 0; 4805 adapter->link_duplex = 0; 4806 4807 if (!pci_channel_offline(adapter->pdev)) 4808 igc_reset(adapter); 4809 4810 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 4811 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 4812 4813 igc_clean_all_tx_rings(adapter); 4814 igc_clean_all_rx_rings(adapter); 4815 } 4816 4817 void igc_reinit_locked(struct igc_adapter *adapter) 4818 { 4819 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 4820 usleep_range(1000, 2000); 4821 igc_down(adapter); 4822 igc_up(adapter); 4823 clear_bit(__IGC_RESETTING, &adapter->state); 4824 } 4825 4826 static void igc_reset_task(struct work_struct *work) 4827 { 4828 struct igc_adapter *adapter; 4829 4830 adapter = container_of(work, struct igc_adapter, reset_task); 4831 4832 rtnl_lock(); 4833 /* If we're already down or resetting, just bail */ 4834 if (test_bit(__IGC_DOWN, &adapter->state) || 4835 test_bit(__IGC_RESETTING, &adapter->state)) { 4836 rtnl_unlock(); 4837 return; 4838 } 4839 4840 igc_rings_dump(adapter); 4841 igc_regs_dump(adapter); 4842 netdev_err(adapter->netdev, "Reset adapter\n"); 4843 igc_reinit_locked(adapter); 4844 rtnl_unlock(); 4845 } 4846 4847 /** 4848 * igc_change_mtu - Change the Maximum Transfer Unit 4849 * @netdev: network interface device structure 4850 * @new_mtu: new value for maximum frame size 4851 * 4852 * Returns 0 on success, negative on failure 4853 */ 4854 static int igc_change_mtu(struct net_device *netdev, int new_mtu) 4855 { 4856 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4857 struct igc_adapter *adapter = netdev_priv(netdev); 4858 4859 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { 4860 netdev_dbg(netdev, "Jumbo frames not supported with XDP"); 4861 return -EINVAL; 4862 } 4863 4864 /* adjust max frame to be at least the size of a standard frame */ 4865 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 4866 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 4867 4868 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 4869 usleep_range(1000, 2000); 4870 4871 /* igc_down has a dependency on max_frame_size */ 4872 adapter->max_frame_size = max_frame; 4873 4874 if (netif_running(netdev)) 4875 igc_down(adapter); 4876 4877 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 4878 netdev->mtu = new_mtu; 4879 4880 if (netif_running(netdev)) 4881 igc_up(adapter); 4882 else 4883 igc_reset(adapter); 4884 4885 clear_bit(__IGC_RESETTING, &adapter->state); 4886 4887 return 0; 4888 } 4889 4890 /** 4891 * igc_get_stats64 - Get System Network Statistics 4892 * @netdev: network interface device structure 4893 * @stats: rtnl_link_stats64 pointer 4894 * 4895 * Returns the address of the device statistics structure. 4896 * The statistics are updated here and also from the timer callback. 4897 */ 4898 static void igc_get_stats64(struct net_device *netdev, 4899 struct rtnl_link_stats64 *stats) 4900 { 4901 struct igc_adapter *adapter = netdev_priv(netdev); 4902 4903 spin_lock(&adapter->stats64_lock); 4904 if (!test_bit(__IGC_RESETTING, &adapter->state)) 4905 igc_update_stats(adapter); 4906 memcpy(stats, &adapter->stats64, sizeof(*stats)); 4907 spin_unlock(&adapter->stats64_lock); 4908 } 4909 4910 static netdev_features_t igc_fix_features(struct net_device *netdev, 4911 netdev_features_t features) 4912 { 4913 /* Since there is no support for separate Rx/Tx vlan accel 4914 * enable/disable make sure Tx flag is always in same state as Rx. 4915 */ 4916 if (features & NETIF_F_HW_VLAN_CTAG_RX) 4917 features |= NETIF_F_HW_VLAN_CTAG_TX; 4918 else 4919 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 4920 4921 return features; 4922 } 4923 4924 static int igc_set_features(struct net_device *netdev, 4925 netdev_features_t features) 4926 { 4927 netdev_features_t changed = netdev->features ^ features; 4928 struct igc_adapter *adapter = netdev_priv(netdev); 4929 4930 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 4931 igc_vlan_mode(netdev, features); 4932 4933 /* Add VLAN support */ 4934 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) 4935 return 0; 4936 4937 if (!(features & NETIF_F_NTUPLE)) 4938 igc_flush_nfc_rules(adapter); 4939 4940 netdev->features = features; 4941 4942 if (netif_running(netdev)) 4943 igc_reinit_locked(adapter); 4944 else 4945 igc_reset(adapter); 4946 4947 return 1; 4948 } 4949 4950 static netdev_features_t 4951 igc_features_check(struct sk_buff *skb, struct net_device *dev, 4952 netdev_features_t features) 4953 { 4954 unsigned int network_hdr_len, mac_hdr_len; 4955 4956 /* Make certain the headers can be described by a context descriptor */ 4957 mac_hdr_len = skb_network_header(skb) - skb->data; 4958 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) 4959 return features & ~(NETIF_F_HW_CSUM | 4960 NETIF_F_SCTP_CRC | 4961 NETIF_F_HW_VLAN_CTAG_TX | 4962 NETIF_F_TSO | 4963 NETIF_F_TSO6); 4964 4965 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 4966 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) 4967 return features & ~(NETIF_F_HW_CSUM | 4968 NETIF_F_SCTP_CRC | 4969 NETIF_F_TSO | 4970 NETIF_F_TSO6); 4971 4972 /* We can only support IPv4 TSO in tunnels if we can mangle the 4973 * inner IP ID field, so strip TSO if MANGLEID is not supported. 4974 */ 4975 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 4976 features &= ~NETIF_F_TSO; 4977 4978 return features; 4979 } 4980 4981 static void igc_tsync_interrupt(struct igc_adapter *adapter) 4982 { 4983 u32 ack, tsauxc, sec, nsec, tsicr; 4984 struct igc_hw *hw = &adapter->hw; 4985 struct ptp_clock_event event; 4986 struct timespec64 ts; 4987 4988 tsicr = rd32(IGC_TSICR); 4989 ack = 0; 4990 4991 if (tsicr & IGC_TSICR_SYS_WRAP) { 4992 event.type = PTP_CLOCK_PPS; 4993 if (adapter->ptp_caps.pps) 4994 ptp_clock_event(adapter->ptp_clock, &event); 4995 ack |= IGC_TSICR_SYS_WRAP; 4996 } 4997 4998 if (tsicr & IGC_TSICR_TXTS) { 4999 /* retrieve hardware timestamp */ 5000 schedule_work(&adapter->ptp_tx_work); 5001 ack |= IGC_TSICR_TXTS; 5002 } 5003 5004 if (tsicr & IGC_TSICR_TT0) { 5005 spin_lock(&adapter->tmreg_lock); 5006 ts = timespec64_add(adapter->perout[0].start, 5007 adapter->perout[0].period); 5008 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 5009 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); 5010 tsauxc = rd32(IGC_TSAUXC); 5011 tsauxc |= IGC_TSAUXC_EN_TT0; 5012 wr32(IGC_TSAUXC, tsauxc); 5013 adapter->perout[0].start = ts; 5014 spin_unlock(&adapter->tmreg_lock); 5015 ack |= IGC_TSICR_TT0; 5016 } 5017 5018 if (tsicr & IGC_TSICR_TT1) { 5019 spin_lock(&adapter->tmreg_lock); 5020 ts = timespec64_add(adapter->perout[1].start, 5021 adapter->perout[1].period); 5022 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 5023 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); 5024 tsauxc = rd32(IGC_TSAUXC); 5025 tsauxc |= IGC_TSAUXC_EN_TT1; 5026 wr32(IGC_TSAUXC, tsauxc); 5027 adapter->perout[1].start = ts; 5028 spin_unlock(&adapter->tmreg_lock); 5029 ack |= IGC_TSICR_TT1; 5030 } 5031 5032 if (tsicr & IGC_TSICR_AUTT0) { 5033 nsec = rd32(IGC_AUXSTMPL0); 5034 sec = rd32(IGC_AUXSTMPH0); 5035 event.type = PTP_CLOCK_EXTTS; 5036 event.index = 0; 5037 event.timestamp = sec * NSEC_PER_SEC + nsec; 5038 ptp_clock_event(adapter->ptp_clock, &event); 5039 ack |= IGC_TSICR_AUTT0; 5040 } 5041 5042 if (tsicr & IGC_TSICR_AUTT1) { 5043 nsec = rd32(IGC_AUXSTMPL1); 5044 sec = rd32(IGC_AUXSTMPH1); 5045 event.type = PTP_CLOCK_EXTTS; 5046 event.index = 1; 5047 event.timestamp = sec * NSEC_PER_SEC + nsec; 5048 ptp_clock_event(adapter->ptp_clock, &event); 5049 ack |= IGC_TSICR_AUTT1; 5050 } 5051 5052 /* acknowledge the interrupts */ 5053 wr32(IGC_TSICR, ack); 5054 } 5055 5056 /** 5057 * igc_msix_other - msix other interrupt handler 5058 * @irq: interrupt number 5059 * @data: pointer to a q_vector 5060 */ 5061 static irqreturn_t igc_msix_other(int irq, void *data) 5062 { 5063 struct igc_adapter *adapter = data; 5064 struct igc_hw *hw = &adapter->hw; 5065 u32 icr = rd32(IGC_ICR); 5066 5067 /* reading ICR causes bit 31 of EICR to be cleared */ 5068 if (icr & IGC_ICR_DRSTA) 5069 schedule_work(&adapter->reset_task); 5070 5071 if (icr & IGC_ICR_DOUTSYNC) { 5072 /* HW is reporting DMA is out of sync */ 5073 adapter->stats.doosync++; 5074 } 5075 5076 if (icr & IGC_ICR_LSC) { 5077 hw->mac.get_link_status = true; 5078 /* guard against interrupt when we're going down */ 5079 if (!test_bit(__IGC_DOWN, &adapter->state)) 5080 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5081 } 5082 5083 if (icr & IGC_ICR_TS) 5084 igc_tsync_interrupt(adapter); 5085 5086 wr32(IGC_EIMS, adapter->eims_other); 5087 5088 return IRQ_HANDLED; 5089 } 5090 5091 static void igc_write_itr(struct igc_q_vector *q_vector) 5092 { 5093 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 5094 5095 if (!q_vector->set_itr) 5096 return; 5097 5098 if (!itr_val) 5099 itr_val = IGC_ITR_VAL_MASK; 5100 5101 itr_val |= IGC_EITR_CNT_IGNR; 5102 5103 writel(itr_val, q_vector->itr_register); 5104 q_vector->set_itr = 0; 5105 } 5106 5107 static irqreturn_t igc_msix_ring(int irq, void *data) 5108 { 5109 struct igc_q_vector *q_vector = data; 5110 5111 /* Write the ITR value calculated from the previous interrupt. */ 5112 igc_write_itr(q_vector); 5113 5114 napi_schedule(&q_vector->napi); 5115 5116 return IRQ_HANDLED; 5117 } 5118 5119 /** 5120 * igc_request_msix - Initialize MSI-X interrupts 5121 * @adapter: Pointer to adapter structure 5122 * 5123 * igc_request_msix allocates MSI-X vectors and requests interrupts from the 5124 * kernel. 5125 */ 5126 static int igc_request_msix(struct igc_adapter *adapter) 5127 { 5128 unsigned int num_q_vectors = adapter->num_q_vectors; 5129 int i = 0, err = 0, vector = 0, free_vector = 0; 5130 struct net_device *netdev = adapter->netdev; 5131 5132 err = request_irq(adapter->msix_entries[vector].vector, 5133 &igc_msix_other, 0, netdev->name, adapter); 5134 if (err) 5135 goto err_out; 5136 5137 if (num_q_vectors > MAX_Q_VECTORS) { 5138 num_q_vectors = MAX_Q_VECTORS; 5139 dev_warn(&adapter->pdev->dev, 5140 "The number of queue vectors (%d) is higher than max allowed (%d)\n", 5141 adapter->num_q_vectors, MAX_Q_VECTORS); 5142 } 5143 for (i = 0; i < num_q_vectors; i++) { 5144 struct igc_q_vector *q_vector = adapter->q_vector[i]; 5145 5146 vector++; 5147 5148 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 5149 5150 if (q_vector->rx.ring && q_vector->tx.ring) 5151 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 5152 q_vector->rx.ring->queue_index); 5153 else if (q_vector->tx.ring) 5154 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 5155 q_vector->tx.ring->queue_index); 5156 else if (q_vector->rx.ring) 5157 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 5158 q_vector->rx.ring->queue_index); 5159 else 5160 sprintf(q_vector->name, "%s-unused", netdev->name); 5161 5162 err = request_irq(adapter->msix_entries[vector].vector, 5163 igc_msix_ring, 0, q_vector->name, 5164 q_vector); 5165 if (err) 5166 goto err_free; 5167 } 5168 5169 igc_configure_msix(adapter); 5170 return 0; 5171 5172 err_free: 5173 /* free already assigned IRQs */ 5174 free_irq(adapter->msix_entries[free_vector++].vector, adapter); 5175 5176 vector--; 5177 for (i = 0; i < vector; i++) { 5178 free_irq(adapter->msix_entries[free_vector++].vector, 5179 adapter->q_vector[i]); 5180 } 5181 err_out: 5182 return err; 5183 } 5184 5185 /** 5186 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 5187 * @adapter: Pointer to adapter structure 5188 * 5189 * This function resets the device so that it has 0 rx queues, tx queues, and 5190 * MSI-X interrupts allocated. 5191 */ 5192 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 5193 { 5194 igc_free_q_vectors(adapter); 5195 igc_reset_interrupt_capability(adapter); 5196 } 5197 5198 /* Need to wait a few seconds after link up to get diagnostic information from 5199 * the phy 5200 */ 5201 static void igc_update_phy_info(struct timer_list *t) 5202 { 5203 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 5204 5205 igc_get_phy_info(&adapter->hw); 5206 } 5207 5208 /** 5209 * igc_has_link - check shared code for link and determine up/down 5210 * @adapter: pointer to driver private info 5211 */ 5212 bool igc_has_link(struct igc_adapter *adapter) 5213 { 5214 struct igc_hw *hw = &adapter->hw; 5215 bool link_active = false; 5216 5217 /* get_link_status is set on LSC (link status) interrupt or 5218 * rx sequence error interrupt. get_link_status will stay 5219 * false until the igc_check_for_link establishes link 5220 * for copper adapters ONLY 5221 */ 5222 if (!hw->mac.get_link_status) 5223 return true; 5224 hw->mac.ops.check_for_link(hw); 5225 link_active = !hw->mac.get_link_status; 5226 5227 if (hw->mac.type == igc_i225) { 5228 if (!netif_carrier_ok(adapter->netdev)) { 5229 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5230 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 5231 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 5232 adapter->link_check_timeout = jiffies; 5233 } 5234 } 5235 5236 return link_active; 5237 } 5238 5239 /** 5240 * igc_watchdog - Timer Call-back 5241 * @t: timer for the watchdog 5242 */ 5243 static void igc_watchdog(struct timer_list *t) 5244 { 5245 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 5246 /* Do the rest outside of interrupt context */ 5247 schedule_work(&adapter->watchdog_task); 5248 } 5249 5250 static void igc_watchdog_task(struct work_struct *work) 5251 { 5252 struct igc_adapter *adapter = container_of(work, 5253 struct igc_adapter, 5254 watchdog_task); 5255 struct net_device *netdev = adapter->netdev; 5256 struct igc_hw *hw = &adapter->hw; 5257 struct igc_phy_info *phy = &hw->phy; 5258 u16 phy_data, retry_count = 20; 5259 u32 link; 5260 int i; 5261 5262 link = igc_has_link(adapter); 5263 5264 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 5265 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 5266 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5267 else 5268 link = false; 5269 } 5270 5271 if (link) { 5272 /* Cancel scheduled suspend requests. */ 5273 pm_runtime_resume(netdev->dev.parent); 5274 5275 if (!netif_carrier_ok(netdev)) { 5276 u32 ctrl; 5277 5278 hw->mac.ops.get_speed_and_duplex(hw, 5279 &adapter->link_speed, 5280 &adapter->link_duplex); 5281 5282 ctrl = rd32(IGC_CTRL); 5283 /* Link status message must follow this format */ 5284 netdev_info(netdev, 5285 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 5286 adapter->link_speed, 5287 adapter->link_duplex == FULL_DUPLEX ? 5288 "Full" : "Half", 5289 (ctrl & IGC_CTRL_TFCE) && 5290 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 5291 (ctrl & IGC_CTRL_RFCE) ? "RX" : 5292 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 5293 5294 /* disable EEE if enabled */ 5295 if ((adapter->flags & IGC_FLAG_EEE) && 5296 adapter->link_duplex == HALF_DUPLEX) { 5297 netdev_info(netdev, 5298 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); 5299 adapter->hw.dev_spec._base.eee_enable = false; 5300 adapter->flags &= ~IGC_FLAG_EEE; 5301 } 5302 5303 /* check if SmartSpeed worked */ 5304 igc_check_downshift(hw); 5305 if (phy->speed_downgraded) 5306 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 5307 5308 /* adjust timeout factor according to speed/duplex */ 5309 adapter->tx_timeout_factor = 1; 5310 switch (adapter->link_speed) { 5311 case SPEED_10: 5312 adapter->tx_timeout_factor = 14; 5313 break; 5314 case SPEED_100: 5315 case SPEED_1000: 5316 case SPEED_2500: 5317 adapter->tx_timeout_factor = 7; 5318 break; 5319 } 5320 5321 if (adapter->link_speed != SPEED_1000) 5322 goto no_wait; 5323 5324 /* wait for Remote receiver status OK */ 5325 retry_read_status: 5326 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 5327 &phy_data)) { 5328 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 5329 retry_count) { 5330 msleep(100); 5331 retry_count--; 5332 goto retry_read_status; 5333 } else if (!retry_count) { 5334 netdev_err(netdev, "exceed max 2 second\n"); 5335 } 5336 } else { 5337 netdev_err(netdev, "read 1000Base-T Status Reg\n"); 5338 } 5339 no_wait: 5340 netif_carrier_on(netdev); 5341 5342 /* link state has changed, schedule phy info update */ 5343 if (!test_bit(__IGC_DOWN, &adapter->state)) 5344 mod_timer(&adapter->phy_info_timer, 5345 round_jiffies(jiffies + 2 * HZ)); 5346 } 5347 } else { 5348 if (netif_carrier_ok(netdev)) { 5349 adapter->link_speed = 0; 5350 adapter->link_duplex = 0; 5351 5352 /* Links status message must follow this format */ 5353 netdev_info(netdev, "NIC Link is Down\n"); 5354 netif_carrier_off(netdev); 5355 5356 /* link state has changed, schedule phy info update */ 5357 if (!test_bit(__IGC_DOWN, &adapter->state)) 5358 mod_timer(&adapter->phy_info_timer, 5359 round_jiffies(jiffies + 2 * HZ)); 5360 5361 /* link is down, time to check for alternate media */ 5362 if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 5363 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 5364 schedule_work(&adapter->reset_task); 5365 /* return immediately */ 5366 return; 5367 } 5368 } 5369 pm_schedule_suspend(netdev->dev.parent, 5370 MSEC_PER_SEC * 5); 5371 5372 /* also check for alternate media here */ 5373 } else if (!netif_carrier_ok(netdev) && 5374 (adapter->flags & IGC_FLAG_MAS_ENABLE)) { 5375 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 5376 schedule_work(&adapter->reset_task); 5377 /* return immediately */ 5378 return; 5379 } 5380 } 5381 } 5382 5383 spin_lock(&adapter->stats64_lock); 5384 igc_update_stats(adapter); 5385 spin_unlock(&adapter->stats64_lock); 5386 5387 for (i = 0; i < adapter->num_tx_queues; i++) { 5388 struct igc_ring *tx_ring = adapter->tx_ring[i]; 5389 5390 if (!netif_carrier_ok(netdev)) { 5391 /* We've lost link, so the controller stops DMA, 5392 * but we've got queued Tx work that's never going 5393 * to get done, so reset controller to flush Tx. 5394 * (Do the reset outside of interrupt context). 5395 */ 5396 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 5397 adapter->tx_timeout_count++; 5398 schedule_work(&adapter->reset_task); 5399 /* return immediately since reset is imminent */ 5400 return; 5401 } 5402 } 5403 5404 /* Force detection of hung controller every watchdog period */ 5405 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5406 } 5407 5408 /* Cause software interrupt to ensure Rx ring is cleaned */ 5409 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5410 u32 eics = 0; 5411 5412 for (i = 0; i < adapter->num_q_vectors; i++) 5413 eics |= adapter->q_vector[i]->eims_value; 5414 wr32(IGC_EICS, eics); 5415 } else { 5416 wr32(IGC_ICS, IGC_ICS_RXDMT0); 5417 } 5418 5419 igc_ptp_tx_hang(adapter); 5420 5421 /* Reset the timer */ 5422 if (!test_bit(__IGC_DOWN, &adapter->state)) { 5423 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 5424 mod_timer(&adapter->watchdog_timer, 5425 round_jiffies(jiffies + HZ)); 5426 else 5427 mod_timer(&adapter->watchdog_timer, 5428 round_jiffies(jiffies + 2 * HZ)); 5429 } 5430 } 5431 5432 /** 5433 * igc_intr_msi - Interrupt Handler 5434 * @irq: interrupt number 5435 * @data: pointer to a network interface device structure 5436 */ 5437 static irqreturn_t igc_intr_msi(int irq, void *data) 5438 { 5439 struct igc_adapter *adapter = data; 5440 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5441 struct igc_hw *hw = &adapter->hw; 5442 /* read ICR disables interrupts using IAM */ 5443 u32 icr = rd32(IGC_ICR); 5444 5445 igc_write_itr(q_vector); 5446 5447 if (icr & IGC_ICR_DRSTA) 5448 schedule_work(&adapter->reset_task); 5449 5450 if (icr & IGC_ICR_DOUTSYNC) { 5451 /* HW is reporting DMA is out of sync */ 5452 adapter->stats.doosync++; 5453 } 5454 5455 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5456 hw->mac.get_link_status = true; 5457 if (!test_bit(__IGC_DOWN, &adapter->state)) 5458 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5459 } 5460 5461 napi_schedule(&q_vector->napi); 5462 5463 return IRQ_HANDLED; 5464 } 5465 5466 /** 5467 * igc_intr - Legacy Interrupt Handler 5468 * @irq: interrupt number 5469 * @data: pointer to a network interface device structure 5470 */ 5471 static irqreturn_t igc_intr(int irq, void *data) 5472 { 5473 struct igc_adapter *adapter = data; 5474 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5475 struct igc_hw *hw = &adapter->hw; 5476 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5477 * need for the IMC write 5478 */ 5479 u32 icr = rd32(IGC_ICR); 5480 5481 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5482 * not set, then the adapter didn't send an interrupt 5483 */ 5484 if (!(icr & IGC_ICR_INT_ASSERTED)) 5485 return IRQ_NONE; 5486 5487 igc_write_itr(q_vector); 5488 5489 if (icr & IGC_ICR_DRSTA) 5490 schedule_work(&adapter->reset_task); 5491 5492 if (icr & IGC_ICR_DOUTSYNC) { 5493 /* HW is reporting DMA is out of sync */ 5494 adapter->stats.doosync++; 5495 } 5496 5497 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5498 hw->mac.get_link_status = true; 5499 /* guard against interrupt when we're going down */ 5500 if (!test_bit(__IGC_DOWN, &adapter->state)) 5501 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5502 } 5503 5504 napi_schedule(&q_vector->napi); 5505 5506 return IRQ_HANDLED; 5507 } 5508 5509 static void igc_free_irq(struct igc_adapter *adapter) 5510 { 5511 if (adapter->msix_entries) { 5512 int vector = 0, i; 5513 5514 free_irq(adapter->msix_entries[vector++].vector, adapter); 5515 5516 for (i = 0; i < adapter->num_q_vectors; i++) 5517 free_irq(adapter->msix_entries[vector++].vector, 5518 adapter->q_vector[i]); 5519 } else { 5520 free_irq(adapter->pdev->irq, adapter); 5521 } 5522 } 5523 5524 /** 5525 * igc_request_irq - initialize interrupts 5526 * @adapter: Pointer to adapter structure 5527 * 5528 * Attempts to configure interrupts using the best available 5529 * capabilities of the hardware and kernel. 5530 */ 5531 static int igc_request_irq(struct igc_adapter *adapter) 5532 { 5533 struct net_device *netdev = adapter->netdev; 5534 struct pci_dev *pdev = adapter->pdev; 5535 int err = 0; 5536 5537 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5538 err = igc_request_msix(adapter); 5539 if (!err) 5540 goto request_done; 5541 /* fall back to MSI */ 5542 igc_free_all_tx_resources(adapter); 5543 igc_free_all_rx_resources(adapter); 5544 5545 igc_clear_interrupt_scheme(adapter); 5546 err = igc_init_interrupt_scheme(adapter, false); 5547 if (err) 5548 goto request_done; 5549 igc_setup_all_tx_resources(adapter); 5550 igc_setup_all_rx_resources(adapter); 5551 igc_configure(adapter); 5552 } 5553 5554 igc_assign_vector(adapter->q_vector[0], 0); 5555 5556 if (adapter->flags & IGC_FLAG_HAS_MSI) { 5557 err = request_irq(pdev->irq, &igc_intr_msi, 0, 5558 netdev->name, adapter); 5559 if (!err) 5560 goto request_done; 5561 5562 /* fall back to legacy interrupts */ 5563 igc_reset_interrupt_capability(adapter); 5564 adapter->flags &= ~IGC_FLAG_HAS_MSI; 5565 } 5566 5567 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 5568 netdev->name, adapter); 5569 5570 if (err) 5571 netdev_err(netdev, "Error %d getting interrupt\n", err); 5572 5573 request_done: 5574 return err; 5575 } 5576 5577 /** 5578 * __igc_open - Called when a network interface is made active 5579 * @netdev: network interface device structure 5580 * @resuming: boolean indicating if the device is resuming 5581 * 5582 * Returns 0 on success, negative value on failure 5583 * 5584 * The open entry point is called when a network interface is made 5585 * active by the system (IFF_UP). At this point all resources needed 5586 * for transmit and receive operations are allocated, the interrupt 5587 * handler is registered with the OS, the watchdog timer is started, 5588 * and the stack is notified that the interface is ready. 5589 */ 5590 static int __igc_open(struct net_device *netdev, bool resuming) 5591 { 5592 struct igc_adapter *adapter = netdev_priv(netdev); 5593 struct pci_dev *pdev = adapter->pdev; 5594 struct igc_hw *hw = &adapter->hw; 5595 int err = 0; 5596 int i = 0; 5597 5598 /* disallow open during test */ 5599 5600 if (test_bit(__IGC_TESTING, &adapter->state)) { 5601 WARN_ON(resuming); 5602 return -EBUSY; 5603 } 5604 5605 if (!resuming) 5606 pm_runtime_get_sync(&pdev->dev); 5607 5608 netif_carrier_off(netdev); 5609 5610 /* allocate transmit descriptors */ 5611 err = igc_setup_all_tx_resources(adapter); 5612 if (err) 5613 goto err_setup_tx; 5614 5615 /* allocate receive descriptors */ 5616 err = igc_setup_all_rx_resources(adapter); 5617 if (err) 5618 goto err_setup_rx; 5619 5620 igc_power_up_link(adapter); 5621 5622 igc_configure(adapter); 5623 5624 err = igc_request_irq(adapter); 5625 if (err) 5626 goto err_req_irq; 5627 5628 /* Notify the stack of the actual queue counts. */ 5629 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 5630 if (err) 5631 goto err_set_queues; 5632 5633 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 5634 if (err) 5635 goto err_set_queues; 5636 5637 clear_bit(__IGC_DOWN, &adapter->state); 5638 5639 for (i = 0; i < adapter->num_q_vectors; i++) 5640 napi_enable(&adapter->q_vector[i]->napi); 5641 5642 /* Clear any pending interrupts. */ 5643 rd32(IGC_ICR); 5644 igc_irq_enable(adapter); 5645 5646 if (!resuming) 5647 pm_runtime_put(&pdev->dev); 5648 5649 netif_tx_start_all_queues(netdev); 5650 5651 /* start the watchdog. */ 5652 hw->mac.get_link_status = true; 5653 schedule_work(&adapter->watchdog_task); 5654 5655 return IGC_SUCCESS; 5656 5657 err_set_queues: 5658 igc_free_irq(adapter); 5659 err_req_irq: 5660 igc_release_hw_control(adapter); 5661 igc_power_down_phy_copper_base(&adapter->hw); 5662 igc_free_all_rx_resources(adapter); 5663 err_setup_rx: 5664 igc_free_all_tx_resources(adapter); 5665 err_setup_tx: 5666 igc_reset(adapter); 5667 if (!resuming) 5668 pm_runtime_put(&pdev->dev); 5669 5670 return err; 5671 } 5672 5673 int igc_open(struct net_device *netdev) 5674 { 5675 return __igc_open(netdev, false); 5676 } 5677 5678 /** 5679 * __igc_close - Disables a network interface 5680 * @netdev: network interface device structure 5681 * @suspending: boolean indicating the device is suspending 5682 * 5683 * Returns 0, this is not allowed to fail 5684 * 5685 * The close entry point is called when an interface is de-activated 5686 * by the OS. The hardware is still under the driver's control, but 5687 * needs to be disabled. A global MAC reset is issued to stop the 5688 * hardware, and all transmit and receive resources are freed. 5689 */ 5690 static int __igc_close(struct net_device *netdev, bool suspending) 5691 { 5692 struct igc_adapter *adapter = netdev_priv(netdev); 5693 struct pci_dev *pdev = adapter->pdev; 5694 5695 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 5696 5697 if (!suspending) 5698 pm_runtime_get_sync(&pdev->dev); 5699 5700 igc_down(adapter); 5701 5702 igc_release_hw_control(adapter); 5703 5704 igc_free_irq(adapter); 5705 5706 igc_free_all_tx_resources(adapter); 5707 igc_free_all_rx_resources(adapter); 5708 5709 if (!suspending) 5710 pm_runtime_put_sync(&pdev->dev); 5711 5712 return 0; 5713 } 5714 5715 int igc_close(struct net_device *netdev) 5716 { 5717 if (netif_device_present(netdev) || netdev->dismantle) 5718 return __igc_close(netdev, false); 5719 return 0; 5720 } 5721 5722 /** 5723 * igc_ioctl - Access the hwtstamp interface 5724 * @netdev: network interface device structure 5725 * @ifr: interface request data 5726 * @cmd: ioctl command 5727 **/ 5728 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 5729 { 5730 switch (cmd) { 5731 case SIOCGHWTSTAMP: 5732 return igc_ptp_get_ts_config(netdev, ifr); 5733 case SIOCSHWTSTAMP: 5734 return igc_ptp_set_ts_config(netdev, ifr); 5735 default: 5736 return -EOPNOTSUPP; 5737 } 5738 } 5739 5740 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, 5741 bool enable) 5742 { 5743 struct igc_ring *ring; 5744 int i; 5745 5746 if (queue < 0 || queue >= adapter->num_tx_queues) 5747 return -EINVAL; 5748 5749 ring = adapter->tx_ring[queue]; 5750 ring->launchtime_enable = enable; 5751 5752 if (adapter->base_time) 5753 return 0; 5754 5755 adapter->cycle_time = NSEC_PER_SEC; 5756 5757 for (i = 0; i < adapter->num_tx_queues; i++) { 5758 ring = adapter->tx_ring[i]; 5759 ring->start_time = 0; 5760 ring->end_time = NSEC_PER_SEC; 5761 } 5762 5763 return 0; 5764 } 5765 5766 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) 5767 { 5768 struct timespec64 b; 5769 5770 b = ktime_to_timespec64(base_time); 5771 5772 return timespec64_compare(now, &b) > 0; 5773 } 5774 5775 static bool validate_schedule(struct igc_adapter *adapter, 5776 const struct tc_taprio_qopt_offload *qopt) 5777 { 5778 int queue_uses[IGC_MAX_TX_QUEUES] = { }; 5779 struct timespec64 now; 5780 size_t n; 5781 5782 if (qopt->cycle_time_extension) 5783 return false; 5784 5785 igc_ptp_read(adapter, &now); 5786 5787 /* If we program the controller's BASET registers with a time 5788 * in the future, it will hold all the packets until that 5789 * time, causing a lot of TX Hangs, so to avoid that, we 5790 * reject schedules that would start in the future. 5791 */ 5792 if (!is_base_time_past(qopt->base_time, &now)) 5793 return false; 5794 5795 for (n = 0; n < qopt->num_entries; n++) { 5796 const struct tc_taprio_sched_entry *e; 5797 int i; 5798 5799 e = &qopt->entries[n]; 5800 5801 /* i225 only supports "global" frame preemption 5802 * settings. 5803 */ 5804 if (e->command != TC_TAPRIO_CMD_SET_GATES) 5805 return false; 5806 5807 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { 5808 if (e->gate_mask & BIT(i)) 5809 queue_uses[i]++; 5810 5811 if (queue_uses[i] > 1) 5812 return false; 5813 } 5814 } 5815 5816 return true; 5817 } 5818 5819 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, 5820 struct tc_etf_qopt_offload *qopt) 5821 { 5822 struct igc_hw *hw = &adapter->hw; 5823 int err; 5824 5825 if (hw->mac.type != igc_i225) 5826 return -EOPNOTSUPP; 5827 5828 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); 5829 if (err) 5830 return err; 5831 5832 return igc_tsn_offload_apply(adapter); 5833 } 5834 5835 static int igc_save_qbv_schedule(struct igc_adapter *adapter, 5836 struct tc_taprio_qopt_offload *qopt) 5837 { 5838 u32 start_time = 0, end_time = 0; 5839 size_t n; 5840 5841 if (!qopt->enable) { 5842 adapter->base_time = 0; 5843 return 0; 5844 } 5845 5846 if (adapter->base_time) 5847 return -EALREADY; 5848 5849 if (!validate_schedule(adapter, qopt)) 5850 return -EINVAL; 5851 5852 adapter->cycle_time = qopt->cycle_time; 5853 adapter->base_time = qopt->base_time; 5854 5855 /* FIXME: be a little smarter about cases when the gate for a 5856 * queue stays open for more than one entry. 5857 */ 5858 for (n = 0; n < qopt->num_entries; n++) { 5859 struct tc_taprio_sched_entry *e = &qopt->entries[n]; 5860 int i; 5861 5862 end_time += e->interval; 5863 5864 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { 5865 struct igc_ring *ring = adapter->tx_ring[i]; 5866 5867 if (!(e->gate_mask & BIT(i))) 5868 continue; 5869 5870 ring->start_time = start_time; 5871 ring->end_time = end_time; 5872 } 5873 5874 start_time += e->interval; 5875 } 5876 5877 return 0; 5878 } 5879 5880 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, 5881 struct tc_taprio_qopt_offload *qopt) 5882 { 5883 struct igc_hw *hw = &adapter->hw; 5884 int err; 5885 5886 if (hw->mac.type != igc_i225) 5887 return -EOPNOTSUPP; 5888 5889 err = igc_save_qbv_schedule(adapter, qopt); 5890 if (err) 5891 return err; 5892 5893 return igc_tsn_offload_apply(adapter); 5894 } 5895 5896 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, 5897 void *type_data) 5898 { 5899 struct igc_adapter *adapter = netdev_priv(dev); 5900 5901 switch (type) { 5902 case TC_SETUP_QDISC_TAPRIO: 5903 return igc_tsn_enable_qbv_scheduling(adapter, type_data); 5904 5905 case TC_SETUP_QDISC_ETF: 5906 return igc_tsn_enable_launchtime(adapter, type_data); 5907 5908 default: 5909 return -EOPNOTSUPP; 5910 } 5911 } 5912 5913 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) 5914 { 5915 struct igc_adapter *adapter = netdev_priv(dev); 5916 5917 switch (bpf->command) { 5918 case XDP_SETUP_PROG: 5919 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); 5920 case XDP_SETUP_XSK_POOL: 5921 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, 5922 bpf->xsk.queue_id); 5923 default: 5924 return -EOPNOTSUPP; 5925 } 5926 } 5927 5928 static int igc_xdp_xmit(struct net_device *dev, int num_frames, 5929 struct xdp_frame **frames, u32 flags) 5930 { 5931 struct igc_adapter *adapter = netdev_priv(dev); 5932 int cpu = smp_processor_id(); 5933 struct netdev_queue *nq; 5934 struct igc_ring *ring; 5935 int i, drops; 5936 5937 if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) 5938 return -ENETDOWN; 5939 5940 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 5941 return -EINVAL; 5942 5943 ring = igc_xdp_get_tx_ring(adapter, cpu); 5944 nq = txring_txq(ring); 5945 5946 __netif_tx_lock(nq, cpu); 5947 5948 drops = 0; 5949 for (i = 0; i < num_frames; i++) { 5950 int err; 5951 struct xdp_frame *xdpf = frames[i]; 5952 5953 err = igc_xdp_init_tx_descriptor(ring, xdpf); 5954 if (err) { 5955 xdp_return_frame_rx_napi(xdpf); 5956 drops++; 5957 } 5958 } 5959 5960 if (flags & XDP_XMIT_FLUSH) 5961 igc_flush_tx_descriptors(ring); 5962 5963 __netif_tx_unlock(nq); 5964 5965 return num_frames - drops; 5966 } 5967 5968 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, 5969 struct igc_q_vector *q_vector) 5970 { 5971 struct igc_hw *hw = &adapter->hw; 5972 u32 eics = 0; 5973 5974 eics |= q_vector->eims_value; 5975 wr32(IGC_EICS, eics); 5976 } 5977 5978 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 5979 { 5980 struct igc_adapter *adapter = netdev_priv(dev); 5981 struct igc_q_vector *q_vector; 5982 struct igc_ring *ring; 5983 5984 if (test_bit(__IGC_DOWN, &adapter->state)) 5985 return -ENETDOWN; 5986 5987 if (!igc_xdp_is_enabled(adapter)) 5988 return -ENXIO; 5989 5990 if (queue_id >= adapter->num_rx_queues) 5991 return -EINVAL; 5992 5993 ring = adapter->rx_ring[queue_id]; 5994 5995 if (!ring->xsk_pool) 5996 return -ENXIO; 5997 5998 q_vector = adapter->q_vector[queue_id]; 5999 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) 6000 igc_trigger_rxtxq_interrupt(adapter, q_vector); 6001 6002 return 0; 6003 } 6004 6005 static const struct net_device_ops igc_netdev_ops = { 6006 .ndo_open = igc_open, 6007 .ndo_stop = igc_close, 6008 .ndo_start_xmit = igc_xmit_frame, 6009 .ndo_set_rx_mode = igc_set_rx_mode, 6010 .ndo_set_mac_address = igc_set_mac, 6011 .ndo_change_mtu = igc_change_mtu, 6012 .ndo_get_stats64 = igc_get_stats64, 6013 .ndo_fix_features = igc_fix_features, 6014 .ndo_set_features = igc_set_features, 6015 .ndo_features_check = igc_features_check, 6016 .ndo_eth_ioctl = igc_ioctl, 6017 .ndo_setup_tc = igc_setup_tc, 6018 .ndo_bpf = igc_bpf, 6019 .ndo_xdp_xmit = igc_xdp_xmit, 6020 .ndo_xsk_wakeup = igc_xsk_wakeup, 6021 }; 6022 6023 /* PCIe configuration access */ 6024 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6025 { 6026 struct igc_adapter *adapter = hw->back; 6027 6028 pci_read_config_word(adapter->pdev, reg, value); 6029 } 6030 6031 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6032 { 6033 struct igc_adapter *adapter = hw->back; 6034 6035 pci_write_config_word(adapter->pdev, reg, *value); 6036 } 6037 6038 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6039 { 6040 struct igc_adapter *adapter = hw->back; 6041 6042 if (!pci_is_pcie(adapter->pdev)) 6043 return -IGC_ERR_CONFIG; 6044 6045 pcie_capability_read_word(adapter->pdev, reg, value); 6046 6047 return IGC_SUCCESS; 6048 } 6049 6050 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6051 { 6052 struct igc_adapter *adapter = hw->back; 6053 6054 if (!pci_is_pcie(adapter->pdev)) 6055 return -IGC_ERR_CONFIG; 6056 6057 pcie_capability_write_word(adapter->pdev, reg, *value); 6058 6059 return IGC_SUCCESS; 6060 } 6061 6062 u32 igc_rd32(struct igc_hw *hw, u32 reg) 6063 { 6064 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 6065 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 6066 u32 value = 0; 6067 6068 value = readl(&hw_addr[reg]); 6069 6070 /* reads should not return all F's */ 6071 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 6072 struct net_device *netdev = igc->netdev; 6073 6074 hw->hw_addr = NULL; 6075 netif_device_detach(netdev); 6076 netdev_err(netdev, "PCIe link lost, device now detached\n"); 6077 WARN(pci_device_is_present(igc->pdev), 6078 "igc: Failed to read reg 0x%x!\n", reg); 6079 } 6080 6081 return value; 6082 } 6083 6084 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) 6085 { 6086 struct igc_mac_info *mac = &adapter->hw.mac; 6087 6088 mac->autoneg = false; 6089 6090 /* Make sure dplx is at most 1 bit and lsb of speed is not set 6091 * for the switch() below to work 6092 */ 6093 if ((spd & 1) || (dplx & ~1)) 6094 goto err_inval; 6095 6096 switch (spd + dplx) { 6097 case SPEED_10 + DUPLEX_HALF: 6098 mac->forced_speed_duplex = ADVERTISE_10_HALF; 6099 break; 6100 case SPEED_10 + DUPLEX_FULL: 6101 mac->forced_speed_duplex = ADVERTISE_10_FULL; 6102 break; 6103 case SPEED_100 + DUPLEX_HALF: 6104 mac->forced_speed_duplex = ADVERTISE_100_HALF; 6105 break; 6106 case SPEED_100 + DUPLEX_FULL: 6107 mac->forced_speed_duplex = ADVERTISE_100_FULL; 6108 break; 6109 case SPEED_1000 + DUPLEX_FULL: 6110 mac->autoneg = true; 6111 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 6112 break; 6113 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 6114 goto err_inval; 6115 case SPEED_2500 + DUPLEX_FULL: 6116 mac->autoneg = true; 6117 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 6118 break; 6119 case SPEED_2500 + DUPLEX_HALF: /* not supported */ 6120 default: 6121 goto err_inval; 6122 } 6123 6124 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 6125 adapter->hw.phy.mdix = AUTO_ALL_MODES; 6126 6127 return 0; 6128 6129 err_inval: 6130 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); 6131 return -EINVAL; 6132 } 6133 6134 /** 6135 * igc_probe - Device Initialization Routine 6136 * @pdev: PCI device information struct 6137 * @ent: entry in igc_pci_tbl 6138 * 6139 * Returns 0 on success, negative on failure 6140 * 6141 * igc_probe initializes an adapter identified by a pci_dev structure. 6142 * The OS initialization, configuring the adapter private structure, 6143 * and a hardware reset occur. 6144 */ 6145 static int igc_probe(struct pci_dev *pdev, 6146 const struct pci_device_id *ent) 6147 { 6148 struct igc_adapter *adapter; 6149 struct net_device *netdev; 6150 struct igc_hw *hw; 6151 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 6152 int err, pci_using_dac; 6153 6154 err = pci_enable_device_mem(pdev); 6155 if (err) 6156 return err; 6157 6158 pci_using_dac = 0; 6159 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 6160 if (!err) { 6161 pci_using_dac = 1; 6162 } else { 6163 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 6164 if (err) { 6165 dev_err(&pdev->dev, 6166 "No usable DMA configuration, aborting\n"); 6167 goto err_dma; 6168 } 6169 } 6170 6171 err = pci_request_mem_regions(pdev, igc_driver_name); 6172 if (err) 6173 goto err_pci_reg; 6174 6175 pci_enable_pcie_error_reporting(pdev); 6176 6177 pci_set_master(pdev); 6178 6179 err = -ENOMEM; 6180 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 6181 IGC_MAX_TX_QUEUES); 6182 6183 if (!netdev) 6184 goto err_alloc_etherdev; 6185 6186 SET_NETDEV_DEV(netdev, &pdev->dev); 6187 6188 pci_set_drvdata(pdev, netdev); 6189 adapter = netdev_priv(netdev); 6190 adapter->netdev = netdev; 6191 adapter->pdev = pdev; 6192 hw = &adapter->hw; 6193 hw->back = adapter; 6194 adapter->port_num = hw->bus.func; 6195 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 6196 6197 err = pci_save_state(pdev); 6198 if (err) 6199 goto err_ioremap; 6200 6201 err = -EIO; 6202 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 6203 pci_resource_len(pdev, 0)); 6204 if (!adapter->io_addr) 6205 goto err_ioremap; 6206 6207 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 6208 hw->hw_addr = adapter->io_addr; 6209 6210 netdev->netdev_ops = &igc_netdev_ops; 6211 igc_ethtool_set_ops(netdev); 6212 netdev->watchdog_timeo = 5 * HZ; 6213 6214 netdev->mem_start = pci_resource_start(pdev, 0); 6215 netdev->mem_end = pci_resource_end(pdev, 0); 6216 6217 /* PCI config space info */ 6218 hw->vendor_id = pdev->vendor; 6219 hw->device_id = pdev->device; 6220 hw->revision_id = pdev->revision; 6221 hw->subsystem_vendor_id = pdev->subsystem_vendor; 6222 hw->subsystem_device_id = pdev->subsystem_device; 6223 6224 /* Copy the default MAC and PHY function pointers */ 6225 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 6226 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 6227 6228 /* Initialize skew-specific constants */ 6229 err = ei->get_invariants(hw); 6230 if (err) 6231 goto err_sw_init; 6232 6233 /* Add supported features to the features list*/ 6234 netdev->features |= NETIF_F_SG; 6235 netdev->features |= NETIF_F_TSO; 6236 netdev->features |= NETIF_F_TSO6; 6237 netdev->features |= NETIF_F_TSO_ECN; 6238 netdev->features |= NETIF_F_RXCSUM; 6239 netdev->features |= NETIF_F_HW_CSUM; 6240 netdev->features |= NETIF_F_SCTP_CRC; 6241 netdev->features |= NETIF_F_HW_TC; 6242 6243 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 6244 NETIF_F_GSO_GRE_CSUM | \ 6245 NETIF_F_GSO_IPXIP4 | \ 6246 NETIF_F_GSO_IPXIP6 | \ 6247 NETIF_F_GSO_UDP_TUNNEL | \ 6248 NETIF_F_GSO_UDP_TUNNEL_CSUM) 6249 6250 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; 6251 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; 6252 6253 /* setup the private structure */ 6254 err = igc_sw_init(adapter); 6255 if (err) 6256 goto err_sw_init; 6257 6258 /* copy netdev features into list of user selectable features */ 6259 netdev->hw_features |= NETIF_F_NTUPLE; 6260 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 6261 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 6262 netdev->hw_features |= netdev->features; 6263 6264 if (pci_using_dac) 6265 netdev->features |= NETIF_F_HIGHDMA; 6266 6267 netdev->vlan_features |= netdev->features; 6268 6269 /* MTU range: 68 - 9216 */ 6270 netdev->min_mtu = ETH_MIN_MTU; 6271 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 6272 6273 /* before reading the NVM, reset the controller to put the device in a 6274 * known good starting state 6275 */ 6276 hw->mac.ops.reset_hw(hw); 6277 6278 if (igc_get_flash_presence_i225(hw)) { 6279 if (hw->nvm.ops.validate(hw) < 0) { 6280 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 6281 err = -EIO; 6282 goto err_eeprom; 6283 } 6284 } 6285 6286 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 6287 /* copy the MAC address out of the NVM */ 6288 if (hw->mac.ops.read_mac_addr(hw)) 6289 dev_err(&pdev->dev, "NVM Read Error\n"); 6290 } 6291 6292 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 6293 6294 if (!is_valid_ether_addr(netdev->dev_addr)) { 6295 dev_err(&pdev->dev, "Invalid MAC Address\n"); 6296 err = -EIO; 6297 goto err_eeprom; 6298 } 6299 6300 /* configure RXPBSIZE and TXPBSIZE */ 6301 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 6302 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 6303 6304 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 6305 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 6306 6307 INIT_WORK(&adapter->reset_task, igc_reset_task); 6308 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 6309 6310 /* Initialize link properties that are user-changeable */ 6311 adapter->fc_autoneg = true; 6312 hw->mac.autoneg = true; 6313 hw->phy.autoneg_advertised = 0xaf; 6314 6315 hw->fc.requested_mode = igc_fc_default; 6316 hw->fc.current_mode = igc_fc_default; 6317 6318 /* By default, support wake on port A */ 6319 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; 6320 6321 /* initialize the wol settings based on the eeprom settings */ 6322 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) 6323 adapter->wol |= IGC_WUFC_MAG; 6324 6325 device_set_wakeup_enable(&adapter->pdev->dev, 6326 adapter->flags & IGC_FLAG_WOL_SUPPORTED); 6327 6328 igc_ptp_init(adapter); 6329 6330 /* reset the hardware with the new settings */ 6331 igc_reset(adapter); 6332 6333 /* let the f/w know that the h/w is now under the control of the 6334 * driver. 6335 */ 6336 igc_get_hw_control(adapter); 6337 6338 strncpy(netdev->name, "eth%d", IFNAMSIZ); 6339 err = register_netdev(netdev); 6340 if (err) 6341 goto err_register; 6342 6343 /* carrier off reporting is important to ethtool even BEFORE open */ 6344 netif_carrier_off(netdev); 6345 6346 /* Check if Media Autosense is enabled */ 6347 adapter->ei = *ei; 6348 6349 /* print pcie link status and MAC address */ 6350 pcie_print_link_status(pdev); 6351 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 6352 6353 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 6354 /* Disable EEE for internal PHY devices */ 6355 hw->dev_spec._base.eee_enable = false; 6356 adapter->flags &= ~IGC_FLAG_EEE; 6357 igc_set_eee_i225(hw, false, false, false); 6358 6359 pm_runtime_put_noidle(&pdev->dev); 6360 6361 return 0; 6362 6363 err_register: 6364 igc_release_hw_control(adapter); 6365 err_eeprom: 6366 if (!igc_check_reset_block(hw)) 6367 igc_reset_phy(hw); 6368 err_sw_init: 6369 igc_clear_interrupt_scheme(adapter); 6370 iounmap(adapter->io_addr); 6371 err_ioremap: 6372 free_netdev(netdev); 6373 err_alloc_etherdev: 6374 pci_disable_pcie_error_reporting(pdev); 6375 pci_release_mem_regions(pdev); 6376 err_pci_reg: 6377 err_dma: 6378 pci_disable_device(pdev); 6379 return err; 6380 } 6381 6382 /** 6383 * igc_remove - Device Removal Routine 6384 * @pdev: PCI device information struct 6385 * 6386 * igc_remove is called by the PCI subsystem to alert the driver 6387 * that it should release a PCI device. This could be caused by a 6388 * Hot-Plug event, or because the driver is going to be removed from 6389 * memory. 6390 */ 6391 static void igc_remove(struct pci_dev *pdev) 6392 { 6393 struct net_device *netdev = pci_get_drvdata(pdev); 6394 struct igc_adapter *adapter = netdev_priv(netdev); 6395 6396 pm_runtime_get_noresume(&pdev->dev); 6397 6398 igc_flush_nfc_rules(adapter); 6399 6400 igc_ptp_stop(adapter); 6401 6402 set_bit(__IGC_DOWN, &adapter->state); 6403 6404 del_timer_sync(&adapter->watchdog_timer); 6405 del_timer_sync(&adapter->phy_info_timer); 6406 6407 cancel_work_sync(&adapter->reset_task); 6408 cancel_work_sync(&adapter->watchdog_task); 6409 6410 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6411 * would have already happened in close and is redundant. 6412 */ 6413 igc_release_hw_control(adapter); 6414 unregister_netdev(netdev); 6415 6416 igc_clear_interrupt_scheme(adapter); 6417 pci_iounmap(pdev, adapter->io_addr); 6418 pci_release_mem_regions(pdev); 6419 6420 free_netdev(netdev); 6421 6422 pci_disable_pcie_error_reporting(pdev); 6423 6424 pci_disable_device(pdev); 6425 } 6426 6427 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, 6428 bool runtime) 6429 { 6430 struct net_device *netdev = pci_get_drvdata(pdev); 6431 struct igc_adapter *adapter = netdev_priv(netdev); 6432 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; 6433 struct igc_hw *hw = &adapter->hw; 6434 u32 ctrl, rctl, status; 6435 bool wake; 6436 6437 rtnl_lock(); 6438 netif_device_detach(netdev); 6439 6440 if (netif_running(netdev)) 6441 __igc_close(netdev, true); 6442 6443 igc_ptp_suspend(adapter); 6444 6445 igc_clear_interrupt_scheme(adapter); 6446 rtnl_unlock(); 6447 6448 status = rd32(IGC_STATUS); 6449 if (status & IGC_STATUS_LU) 6450 wufc &= ~IGC_WUFC_LNKC; 6451 6452 if (wufc) { 6453 igc_setup_rctl(adapter); 6454 igc_set_rx_mode(netdev); 6455 6456 /* turn on all-multi mode if wake on multicast is enabled */ 6457 if (wufc & IGC_WUFC_MC) { 6458 rctl = rd32(IGC_RCTL); 6459 rctl |= IGC_RCTL_MPE; 6460 wr32(IGC_RCTL, rctl); 6461 } 6462 6463 ctrl = rd32(IGC_CTRL); 6464 ctrl |= IGC_CTRL_ADVD3WUC; 6465 wr32(IGC_CTRL, ctrl); 6466 6467 /* Allow time for pending master requests to run */ 6468 igc_disable_pcie_master(hw); 6469 6470 wr32(IGC_WUC, IGC_WUC_PME_EN); 6471 wr32(IGC_WUFC, wufc); 6472 } else { 6473 wr32(IGC_WUC, 0); 6474 wr32(IGC_WUFC, 0); 6475 } 6476 6477 wake = wufc || adapter->en_mng_pt; 6478 if (!wake) 6479 igc_power_down_phy_copper_base(&adapter->hw); 6480 else 6481 igc_power_up_link(adapter); 6482 6483 if (enable_wake) 6484 *enable_wake = wake; 6485 6486 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6487 * would have already happened in close and is redundant. 6488 */ 6489 igc_release_hw_control(adapter); 6490 6491 pci_disable_device(pdev); 6492 6493 return 0; 6494 } 6495 6496 #ifdef CONFIG_PM 6497 static int __maybe_unused igc_runtime_suspend(struct device *dev) 6498 { 6499 return __igc_shutdown(to_pci_dev(dev), NULL, 1); 6500 } 6501 6502 static void igc_deliver_wake_packet(struct net_device *netdev) 6503 { 6504 struct igc_adapter *adapter = netdev_priv(netdev); 6505 struct igc_hw *hw = &adapter->hw; 6506 struct sk_buff *skb; 6507 u32 wupl; 6508 6509 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; 6510 6511 /* WUPM stores only the first 128 bytes of the wake packet. 6512 * Read the packet only if we have the whole thing. 6513 */ 6514 if (wupl == 0 || wupl > IGC_WUPM_BYTES) 6515 return; 6516 6517 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); 6518 if (!skb) 6519 return; 6520 6521 skb_put(skb, wupl); 6522 6523 /* Ensure reads are 32-bit aligned */ 6524 wupl = roundup(wupl, 4); 6525 6526 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); 6527 6528 skb->protocol = eth_type_trans(skb, netdev); 6529 netif_rx(skb); 6530 } 6531 6532 static int __maybe_unused igc_resume(struct device *dev) 6533 { 6534 struct pci_dev *pdev = to_pci_dev(dev); 6535 struct net_device *netdev = pci_get_drvdata(pdev); 6536 struct igc_adapter *adapter = netdev_priv(netdev); 6537 struct igc_hw *hw = &adapter->hw; 6538 u32 err, val; 6539 6540 pci_set_power_state(pdev, PCI_D0); 6541 pci_restore_state(pdev); 6542 pci_save_state(pdev); 6543 6544 if (!pci_device_is_present(pdev)) 6545 return -ENODEV; 6546 err = pci_enable_device_mem(pdev); 6547 if (err) { 6548 netdev_err(netdev, "Cannot enable PCI device from suspend\n"); 6549 return err; 6550 } 6551 pci_set_master(pdev); 6552 6553 pci_enable_wake(pdev, PCI_D3hot, 0); 6554 pci_enable_wake(pdev, PCI_D3cold, 0); 6555 6556 if (igc_init_interrupt_scheme(adapter, true)) { 6557 netdev_err(netdev, "Unable to allocate memory for queues\n"); 6558 return -ENOMEM; 6559 } 6560 6561 igc_reset(adapter); 6562 6563 /* let the f/w know that the h/w is now under the control of the 6564 * driver. 6565 */ 6566 igc_get_hw_control(adapter); 6567 6568 val = rd32(IGC_WUS); 6569 if (val & WAKE_PKT_WUS) 6570 igc_deliver_wake_packet(netdev); 6571 6572 wr32(IGC_WUS, ~0); 6573 6574 rtnl_lock(); 6575 if (!err && netif_running(netdev)) 6576 err = __igc_open(netdev, true); 6577 6578 if (!err) 6579 netif_device_attach(netdev); 6580 rtnl_unlock(); 6581 6582 return err; 6583 } 6584 6585 static int __maybe_unused igc_runtime_resume(struct device *dev) 6586 { 6587 return igc_resume(dev); 6588 } 6589 6590 static int __maybe_unused igc_suspend(struct device *dev) 6591 { 6592 return __igc_shutdown(to_pci_dev(dev), NULL, 0); 6593 } 6594 6595 static int __maybe_unused igc_runtime_idle(struct device *dev) 6596 { 6597 struct net_device *netdev = dev_get_drvdata(dev); 6598 struct igc_adapter *adapter = netdev_priv(netdev); 6599 6600 if (!igc_has_link(adapter)) 6601 pm_schedule_suspend(dev, MSEC_PER_SEC * 5); 6602 6603 return -EBUSY; 6604 } 6605 #endif /* CONFIG_PM */ 6606 6607 static void igc_shutdown(struct pci_dev *pdev) 6608 { 6609 bool wake; 6610 6611 __igc_shutdown(pdev, &wake, 0); 6612 6613 if (system_state == SYSTEM_POWER_OFF) { 6614 pci_wake_from_d3(pdev, wake); 6615 pci_set_power_state(pdev, PCI_D3hot); 6616 } 6617 } 6618 6619 /** 6620 * igc_io_error_detected - called when PCI error is detected 6621 * @pdev: Pointer to PCI device 6622 * @state: The current PCI connection state 6623 * 6624 * This function is called after a PCI bus error affecting 6625 * this device has been detected. 6626 **/ 6627 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, 6628 pci_channel_state_t state) 6629 { 6630 struct net_device *netdev = pci_get_drvdata(pdev); 6631 struct igc_adapter *adapter = netdev_priv(netdev); 6632 6633 netif_device_detach(netdev); 6634 6635 if (state == pci_channel_io_perm_failure) 6636 return PCI_ERS_RESULT_DISCONNECT; 6637 6638 if (netif_running(netdev)) 6639 igc_down(adapter); 6640 pci_disable_device(pdev); 6641 6642 /* Request a slot reset. */ 6643 return PCI_ERS_RESULT_NEED_RESET; 6644 } 6645 6646 /** 6647 * igc_io_slot_reset - called after the PCI bus has been reset. 6648 * @pdev: Pointer to PCI device 6649 * 6650 * Restart the card from scratch, as if from a cold-boot. Implementation 6651 * resembles the first-half of the igc_resume routine. 6652 **/ 6653 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) 6654 { 6655 struct net_device *netdev = pci_get_drvdata(pdev); 6656 struct igc_adapter *adapter = netdev_priv(netdev); 6657 struct igc_hw *hw = &adapter->hw; 6658 pci_ers_result_t result; 6659 6660 if (pci_enable_device_mem(pdev)) { 6661 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); 6662 result = PCI_ERS_RESULT_DISCONNECT; 6663 } else { 6664 pci_set_master(pdev); 6665 pci_restore_state(pdev); 6666 pci_save_state(pdev); 6667 6668 pci_enable_wake(pdev, PCI_D3hot, 0); 6669 pci_enable_wake(pdev, PCI_D3cold, 0); 6670 6671 /* In case of PCI error, adapter loses its HW address 6672 * so we should re-assign it here. 6673 */ 6674 hw->hw_addr = adapter->io_addr; 6675 6676 igc_reset(adapter); 6677 wr32(IGC_WUS, ~0); 6678 result = PCI_ERS_RESULT_RECOVERED; 6679 } 6680 6681 return result; 6682 } 6683 6684 /** 6685 * igc_io_resume - called when traffic can start to flow again. 6686 * @pdev: Pointer to PCI device 6687 * 6688 * This callback is called when the error recovery driver tells us that 6689 * its OK to resume normal operation. Implementation resembles the 6690 * second-half of the igc_resume routine. 6691 */ 6692 static void igc_io_resume(struct pci_dev *pdev) 6693 { 6694 struct net_device *netdev = pci_get_drvdata(pdev); 6695 struct igc_adapter *adapter = netdev_priv(netdev); 6696 6697 rtnl_lock(); 6698 if (netif_running(netdev)) { 6699 if (igc_open(netdev)) { 6700 netdev_err(netdev, "igc_open failed after reset\n"); 6701 return; 6702 } 6703 } 6704 6705 netif_device_attach(netdev); 6706 6707 /* let the f/w know that the h/w is now under the control of the 6708 * driver. 6709 */ 6710 igc_get_hw_control(adapter); 6711 rtnl_unlock(); 6712 } 6713 6714 static const struct pci_error_handlers igc_err_handler = { 6715 .error_detected = igc_io_error_detected, 6716 .slot_reset = igc_io_slot_reset, 6717 .resume = igc_io_resume, 6718 }; 6719 6720 #ifdef CONFIG_PM 6721 static const struct dev_pm_ops igc_pm_ops = { 6722 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) 6723 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, 6724 igc_runtime_idle) 6725 }; 6726 #endif 6727 6728 static struct pci_driver igc_driver = { 6729 .name = igc_driver_name, 6730 .id_table = igc_pci_tbl, 6731 .probe = igc_probe, 6732 .remove = igc_remove, 6733 #ifdef CONFIG_PM 6734 .driver.pm = &igc_pm_ops, 6735 #endif 6736 .shutdown = igc_shutdown, 6737 .err_handler = &igc_err_handler, 6738 }; 6739 6740 /** 6741 * igc_reinit_queues - return error 6742 * @adapter: pointer to adapter structure 6743 */ 6744 int igc_reinit_queues(struct igc_adapter *adapter) 6745 { 6746 struct net_device *netdev = adapter->netdev; 6747 int err = 0; 6748 6749 if (netif_running(netdev)) 6750 igc_close(netdev); 6751 6752 igc_reset_interrupt_capability(adapter); 6753 6754 if (igc_init_interrupt_scheme(adapter, true)) { 6755 netdev_err(netdev, "Unable to allocate memory for queues\n"); 6756 return -ENOMEM; 6757 } 6758 6759 if (netif_running(netdev)) 6760 err = igc_open(netdev); 6761 6762 return err; 6763 } 6764 6765 /** 6766 * igc_get_hw_dev - return device 6767 * @hw: pointer to hardware structure 6768 * 6769 * used by hardware layer to print debugging information 6770 */ 6771 struct net_device *igc_get_hw_dev(struct igc_hw *hw) 6772 { 6773 struct igc_adapter *adapter = hw->back; 6774 6775 return adapter->netdev; 6776 } 6777 6778 static void igc_disable_rx_ring_hw(struct igc_ring *ring) 6779 { 6780 struct igc_hw *hw = &ring->q_vector->adapter->hw; 6781 u8 idx = ring->reg_idx; 6782 u32 rxdctl; 6783 6784 rxdctl = rd32(IGC_RXDCTL(idx)); 6785 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; 6786 rxdctl |= IGC_RXDCTL_SWFLUSH; 6787 wr32(IGC_RXDCTL(idx), rxdctl); 6788 } 6789 6790 void igc_disable_rx_ring(struct igc_ring *ring) 6791 { 6792 igc_disable_rx_ring_hw(ring); 6793 igc_clean_rx_ring(ring); 6794 } 6795 6796 void igc_enable_rx_ring(struct igc_ring *ring) 6797 { 6798 struct igc_adapter *adapter = ring->q_vector->adapter; 6799 6800 igc_configure_rx_ring(adapter, ring); 6801 6802 if (ring->xsk_pool) 6803 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 6804 else 6805 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 6806 } 6807 6808 static void igc_disable_tx_ring_hw(struct igc_ring *ring) 6809 { 6810 struct igc_hw *hw = &ring->q_vector->adapter->hw; 6811 u8 idx = ring->reg_idx; 6812 u32 txdctl; 6813 6814 txdctl = rd32(IGC_TXDCTL(idx)); 6815 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; 6816 txdctl |= IGC_TXDCTL_SWFLUSH; 6817 wr32(IGC_TXDCTL(idx), txdctl); 6818 } 6819 6820 void igc_disable_tx_ring(struct igc_ring *ring) 6821 { 6822 igc_disable_tx_ring_hw(ring); 6823 igc_clean_tx_ring(ring); 6824 } 6825 6826 void igc_enable_tx_ring(struct igc_ring *ring) 6827 { 6828 struct igc_adapter *adapter = ring->q_vector->adapter; 6829 6830 igc_configure_tx_ring(adapter, ring); 6831 } 6832 6833 /** 6834 * igc_init_module - Driver Registration Routine 6835 * 6836 * igc_init_module is the first routine called when the driver is 6837 * loaded. All it does is register with the PCI subsystem. 6838 */ 6839 static int __init igc_init_module(void) 6840 { 6841 int ret; 6842 6843 pr_info("%s\n", igc_driver_string); 6844 pr_info("%s\n", igc_copyright); 6845 6846 ret = pci_register_driver(&igc_driver); 6847 return ret; 6848 } 6849 6850 module_init(igc_init_module); 6851 6852 /** 6853 * igc_exit_module - Driver Exit Cleanup Routine 6854 * 6855 * igc_exit_module is called just before the driver is removed 6856 * from memory. 6857 */ 6858 static void __exit igc_exit_module(void) 6859 { 6860 pci_unregister_driver(&igc_driver); 6861 } 6862 6863 module_exit(igc_exit_module); 6864 /* igc_main.c */ 6865