Lines Matching +full:disable +full:- +full:eop

1 // SPDX-License-Identifier: GPL-2.0
33 static int debug = -1;
81 struct net_device *dev = adapter->netdev; in igc_reset()
82 struct igc_hw *hw = &adapter->hw; in igc_reset()
83 struct igc_fc_info *fc = &hw->fc; in igc_reset()
95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame in igc_reset()
97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); in igc_reset()
99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ in igc_reset()
100 fc->low_water = fc->high_water - 16; in igc_reset()
101 fc->pause_time = 0xFFFF; in igc_reset()
102 fc->send_xon = 1; in igc_reset()
103 fc->current_mode = fc->requested_mode; in igc_reset()
105 hw->mac.ops.reset_hw(hw); in igc_reset()
107 if (hw->mac.ops.init_hw(hw)) in igc_reset()
110 /* Re-establish EEE setting */ in igc_reset()
113 if (!netif_running(adapter->netdev)) in igc_reset()
114 igc_power_down_phy_copper_base(&adapter->hw); in igc_reset()
119 /* Re-enable PTP, where applicable. */ in igc_reset()
122 /* Re-enable TSN offloading, where applicable. */ in igc_reset()
129 * igc_power_up_link - Power up the phy link
134 igc_reset_phy(&adapter->hw); in igc_power_up_link()
136 igc_power_up_phy_copper(&adapter->hw); in igc_power_up_link()
138 igc_setup_link(&adapter->hw); in igc_power_up_link()
142 * igc_release_hw_control - release control of the h/w to f/w
151 struct igc_hw *hw = &adapter->hw; in igc_release_hw_control()
154 if (!pci_device_is_present(adapter->pdev)) in igc_release_hw_control()
164 * igc_get_hw_control - get control of the h/w from f/w
173 struct igc_hw *hw = &adapter->hw; in igc_get_hw_control()
191 * igc_clean_tx_ring - Free Tx Buffers
196 u16 i = tx_ring->next_to_clean; in igc_clean_tx_ring()
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_ring()
200 while (i != tx_ring->next_to_use) { in igc_clean_tx_ring()
203 switch (tx_buffer->type) { in igc_clean_tx_ring()
208 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_ring()
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
212 dev_kfree_skb_any(tx_buffer->skb); in igc_clean_tx_ring()
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_ring()
221 eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_ring()
229 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
231 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
240 tx_buffer->next_to_watch = NULL; in igc_clean_tx_ring()
245 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
247 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring()
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring()
258 memset(tx_ring->tx_buffer_info, 0, in igc_clean_tx_ring()
259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); in igc_clean_tx_ring()
262 memset(tx_ring->desc, 0, tx_ring->size); in igc_clean_tx_ring()
265 tx_ring->next_to_use = 0; in igc_clean_tx_ring()
266 tx_ring->next_to_clean = 0; in igc_clean_tx_ring()
270 * igc_free_tx_resources - Free Tx Resources per Queue
279 vfree(tx_ring->tx_buffer_info); in igc_free_tx_resources()
280 tx_ring->tx_buffer_info = NULL; in igc_free_tx_resources()
283 if (!tx_ring->desc) in igc_free_tx_resources()
286 dma_free_coherent(tx_ring->dev, tx_ring->size, in igc_free_tx_resources()
287 tx_ring->desc, tx_ring->dma); in igc_free_tx_resources()
289 tx_ring->desc = NULL; in igc_free_tx_resources()
293 * igc_free_all_tx_resources - Free Tx Resources for All Queues
302 for (i = 0; i < adapter->num_tx_queues; i++) in igc_free_all_tx_resources()
303 igc_free_tx_resources(adapter->tx_ring[i]); in igc_free_all_tx_resources()
307 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
314 for (i = 0; i < adapter->num_tx_queues; i++) in igc_clean_all_tx_rings()
315 if (adapter->tx_ring[i]) in igc_clean_all_tx_rings()
316 igc_clean_tx_ring(adapter->tx_ring[i]); in igc_clean_all_tx_rings()
321 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_tx_ring_hw()
322 u8 idx = ring->reg_idx; in igc_disable_tx_ring_hw()
332 * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
339 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_disable_all_tx_rings_hw()
340 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_disable_all_tx_rings_hw()
347 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
354 struct net_device *ndev = tx_ring->netdev; in igc_setup_tx_resources()
355 struct device *dev = tx_ring->dev; in igc_setup_tx_resources()
358 size = sizeof(struct igc_tx_buffer) * tx_ring->count; in igc_setup_tx_resources()
359 tx_ring->tx_buffer_info = vzalloc(size); in igc_setup_tx_resources()
360 if (!tx_ring->tx_buffer_info) in igc_setup_tx_resources()
364 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); in igc_setup_tx_resources()
365 tx_ring->size = ALIGN(tx_ring->size, 4096); in igc_setup_tx_resources()
367 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igc_setup_tx_resources()
368 &tx_ring->dma, GFP_KERNEL); in igc_setup_tx_resources()
370 if (!tx_ring->desc) in igc_setup_tx_resources()
373 tx_ring->next_to_use = 0; in igc_setup_tx_resources()
374 tx_ring->next_to_clean = 0; in igc_setup_tx_resources()
379 vfree(tx_ring->tx_buffer_info); in igc_setup_tx_resources()
381 return -ENOMEM; in igc_setup_tx_resources()
385 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
392 struct net_device *dev = adapter->netdev; in igc_setup_all_tx_resources()
395 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_setup_all_tx_resources()
396 err = igc_setup_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
399 for (i--; i >= 0; i--) in igc_setup_all_tx_resources()
400 igc_free_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
410 u16 i = rx_ring->next_to_clean; in igc_clean_rx_ring_page_shared()
412 dev_kfree_skb(rx_ring->skb); in igc_clean_rx_ring_page_shared()
413 rx_ring->skb = NULL; in igc_clean_rx_ring_page_shared()
416 while (i != rx_ring->next_to_alloc) { in igc_clean_rx_ring_page_shared()
417 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igc_clean_rx_ring_page_shared()
422 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_clean_rx_ring_page_shared()
423 buffer_info->dma, in igc_clean_rx_ring_page_shared()
424 buffer_info->page_offset, in igc_clean_rx_ring_page_shared()
429 dma_unmap_page_attrs(rx_ring->dev, in igc_clean_rx_ring_page_shared()
430 buffer_info->dma, in igc_clean_rx_ring_page_shared()
434 __page_frag_cache_drain(buffer_info->page, in igc_clean_rx_ring_page_shared()
435 buffer_info->pagecnt_bias); in igc_clean_rx_ring_page_shared()
438 if (i == rx_ring->count) in igc_clean_rx_ring_page_shared()
448 for (i = 0; i < ring->count; i++) { in igc_clean_rx_ring_xsk_pool()
449 bi = &ring->rx_buffer_info[i]; in igc_clean_rx_ring_xsk_pool()
450 if (!bi->xdp) in igc_clean_rx_ring_xsk_pool()
453 xsk_buff_free(bi->xdp); in igc_clean_rx_ring_xsk_pool()
454 bi->xdp = NULL; in igc_clean_rx_ring_xsk_pool()
459 * igc_clean_rx_ring - Free Rx Buffers per Queue
464 if (ring->xsk_pool) in igc_clean_rx_ring()
471 ring->next_to_alloc = 0; in igc_clean_rx_ring()
472 ring->next_to_clean = 0; in igc_clean_rx_ring()
473 ring->next_to_use = 0; in igc_clean_rx_ring()
477 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
484 for (i = 0; i < adapter->num_rx_queues; i++) in igc_clean_all_rx_rings()
485 if (adapter->rx_ring[i]) in igc_clean_all_rx_rings()
486 igc_clean_rx_ring(adapter->rx_ring[i]); in igc_clean_all_rx_rings()
490 * igc_free_rx_resources - Free Rx Resources
499 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_free_rx_resources()
501 vfree(rx_ring->rx_buffer_info); in igc_free_rx_resources()
502 rx_ring->rx_buffer_info = NULL; in igc_free_rx_resources()
505 if (!rx_ring->desc) in igc_free_rx_resources()
508 dma_free_coherent(rx_ring->dev, rx_ring->size, in igc_free_rx_resources()
509 rx_ring->desc, rx_ring->dma); in igc_free_rx_resources()
511 rx_ring->desc = NULL; in igc_free_rx_resources()
515 * igc_free_all_rx_resources - Free Rx Resources for All Queues
524 for (i = 0; i < adapter->num_rx_queues; i++) in igc_free_all_rx_resources()
525 igc_free_rx_resources(adapter->rx_ring[i]); in igc_free_all_rx_resources()
529 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
536 struct net_device *ndev = rx_ring->netdev; in igc_setup_rx_resources()
537 struct device *dev = rx_ring->dev; in igc_setup_rx_resources()
538 u8 index = rx_ring->queue_index; in igc_setup_rx_resources()
541 /* XDP RX-queue info */ in igc_setup_rx_resources()
542 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in igc_setup_rx_resources()
543 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
544 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, in igc_setup_rx_resources()
545 rx_ring->q_vector->napi.napi_id); in igc_setup_rx_resources()
552 size = sizeof(struct igc_rx_buffer) * rx_ring->count; in igc_setup_rx_resources()
553 rx_ring->rx_buffer_info = vzalloc(size); in igc_setup_rx_resources()
554 if (!rx_ring->rx_buffer_info) in igc_setup_rx_resources()
560 rx_ring->size = rx_ring->count * desc_len; in igc_setup_rx_resources()
561 rx_ring->size = ALIGN(rx_ring->size, 4096); in igc_setup_rx_resources()
563 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in igc_setup_rx_resources()
564 &rx_ring->dma, GFP_KERNEL); in igc_setup_rx_resources()
566 if (!rx_ring->desc) in igc_setup_rx_resources()
569 rx_ring->next_to_alloc = 0; in igc_setup_rx_resources()
570 rx_ring->next_to_clean = 0; in igc_setup_rx_resources()
571 rx_ring->next_to_use = 0; in igc_setup_rx_resources()
576 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
577 vfree(rx_ring->rx_buffer_info); in igc_setup_rx_resources()
578 rx_ring->rx_buffer_info = NULL; in igc_setup_rx_resources()
580 return -ENOMEM; in igc_setup_rx_resources()
584 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
592 struct net_device *dev = adapter->netdev; in igc_setup_all_rx_resources()
595 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_setup_all_rx_resources()
596 err = igc_setup_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
599 for (i--; i >= 0; i--) in igc_setup_all_rx_resources()
600 igc_free_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
612 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) in igc_get_xsk_pool()
615 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); in igc_get_xsk_pool()
619 * igc_configure_rx_ring - Configure a receive ring after Reset
628 struct igc_hw *hw = &adapter->hw; in igc_configure_rx_ring()
630 int reg_idx = ring->reg_idx; in igc_configure_rx_ring()
632 u64 rdba = ring->dma; in igc_configure_rx_ring()
635 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in igc_configure_rx_ring()
636 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring()
637 if (ring->xsk_pool) { in igc_configure_rx_ring()
638 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
641 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring()
643 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
651 /* disable the queue */ in igc_configure_rx_ring()
659 ring->count * sizeof(union igc_adv_rx_desc)); in igc_configure_rx_ring()
662 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); in igc_configure_rx_ring()
664 writel(0, ring->tail); in igc_configure_rx_ring()
666 /* reset next-to- use/clean to place SW in sync with hardware */ in igc_configure_rx_ring()
667 ring->next_to_clean = 0; in igc_configure_rx_ring()
668 ring->next_to_use = 0; in igc_configure_rx_ring()
670 if (ring->xsk_pool) in igc_configure_rx_ring()
671 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring()
691 memset(ring->rx_buffer_info, 0, in igc_configure_rx_ring()
692 sizeof(struct igc_rx_buffer) * ring->count); in igc_configure_rx_ring()
696 rx_desc->wb.upper.length = 0; in igc_configure_rx_ring()
705 * igc_configure_rx - Configure receive Unit after Reset
717 for (i = 0; i < adapter->num_rx_queues; i++) in igc_configure_rx()
718 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); in igc_configure_rx()
722 * igc_configure_tx_ring - Configure transmit ring after Reset
731 struct igc_hw *hw = &adapter->hw; in igc_configure_tx_ring()
732 int reg_idx = ring->reg_idx; in igc_configure_tx_ring()
733 u64 tdba = ring->dma; in igc_configure_tx_ring()
736 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring()
738 /* disable the queue */ in igc_configure_tx_ring()
743 ring->count * sizeof(union igc_adv_tx_desc)); in igc_configure_tx_ring()
748 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); in igc_configure_tx_ring()
750 writel(0, ring->tail); in igc_configure_tx_ring()
761 * igc_configure_tx - Configure transmit Unit after Reset
770 for (i = 0; i < adapter->num_tx_queues; i++) in igc_configure_tx()
771 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); in igc_configure_tx()
775 * igc_setup_mrqc - configure the multiple receive queue control registers
780 struct igc_hw *hw = &adapter->hw; in igc_setup_mrqc()
789 num_rx_queues = adapter->rss_queues; in igc_setup_mrqc()
791 if (adapter->rss_indir_tbl_init != num_rx_queues) { in igc_setup_mrqc()
793 adapter->rss_indir_tbl[j] = in igc_setup_mrqc()
795 adapter->rss_indir_tbl_init = num_rx_queues; in igc_setup_mrqc()
799 /* Disable raw packet checksumming so that RSS hash is placed in in igc_setup_mrqc()
821 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) in igc_setup_mrqc()
823 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) in igc_setup_mrqc()
832 * igc_setup_rctl - configure the receive control registers
837 struct igc_hw *hw = &adapter->hw; in igc_setup_rctl()
846 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); in igc_setup_rctl()
853 /* disable store bad packets and clear size bits. */ in igc_setup_rctl()
859 /* disable queue 0 to prevent tail write w/o re-config */ in igc_setup_rctl()
863 if (adapter->netdev->features & NETIF_F_RXALL) { in igc_setup_rctl()
872 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ in igc_setup_rctl()
879 * igc_setup_tctl - configure the transmit control registers
884 struct igc_hw *hw = &adapter->hw; in igc_setup_tctl()
887 /* disable queue 0 which icould be enabled by default */ in igc_setup_tctl()
903 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
908 * @queue: If non-negative, queue assignment feature is enabled and frames
916 struct net_device *dev = adapter->netdev; in igc_set_mac_filter_hw()
917 struct igc_hw *hw = &adapter->hw; in igc_set_mac_filter_hw()
920 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_set_mac_filter_hw()
946 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
952 struct net_device *dev = adapter->netdev; in igc_clear_mac_filter_hw()
953 struct igc_hw *hw = &adapter->hw; in igc_clear_mac_filter_hw()
955 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_clear_mac_filter_hw()
967 struct net_device *dev = adapter->netdev; in igc_set_default_mac_filter()
968 u8 *addr = adapter->hw.mac.addr; in igc_set_default_mac_filter()
972 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_set_default_mac_filter()
976 * igc_set_mac - Change the Ethernet Address of the NIC
985 struct igc_hw *hw = &adapter->hw; in igc_set_mac()
988 if (!is_valid_ether_addr(addr->sa_data)) in igc_set_mac()
989 return -EADDRNOTAVAIL; in igc_set_mac()
991 eth_hw_addr_set(netdev, addr->sa_data); in igc_set_mac()
992 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in igc_set_mac()
1001 * igc_write_mc_addr_list - write multicast addresses to MTA
1005 * Returns: -ENOMEM on failure
1012 struct igc_hw *hw = &adapter->hw; in igc_write_mc_addr_list()
1025 return -ENOMEM; in igc_write_mc_addr_list()
1030 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in igc_write_mc_addr_list()
1041 struct igc_adapter *adapter = netdev_priv(ring->netdev); in igc_tx_launchtime()
1042 ktime_t cycle_time = adapter->cycle_time; in igc_tx_launchtime()
1043 ktime_t base_time = adapter->base_time; in igc_tx_launchtime()
1055 if (baset_est != ring->last_ff_cycle) { in igc_tx_launchtime()
1057 ring->last_ff_cycle = baset_est; in igc_tx_launchtime()
1059 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) in igc_tx_launchtime()
1070 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", in igc_tx_launchtime()
1073 ring->last_tx_cycle = end_of_cycle; in igc_tx_launchtime()
1093 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_init_empty_frame()
1094 if (dma_mapping_error(ring->dev, dma)) { in igc_init_empty_frame()
1095 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); in igc_init_empty_frame()
1096 return -ENOMEM; in igc_init_empty_frame()
1099 buffer->type = IGC_TX_BUFFER_TYPE_SKB; in igc_init_empty_frame()
1100 buffer->skb = skb; in igc_init_empty_frame()
1101 buffer->protocol = 0; in igc_init_empty_frame()
1102 buffer->bytecount = skb->len; in igc_init_empty_frame()
1103 buffer->gso_segs = 1; in igc_init_empty_frame()
1104 buffer->time_stamp = jiffies; in igc_init_empty_frame()
1105 dma_unmap_len_set(buffer, len, skb->len); in igc_init_empty_frame()
1120 return -EBUSY; in igc_init_tx_empty_descriptor()
1128 first->bytecount; in igc_init_tx_empty_descriptor()
1129 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_init_tx_empty_descriptor()
1131 desc = IGC_TX_DESC(ring, ring->next_to_use); in igc_init_tx_empty_descriptor()
1132 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_init_tx_empty_descriptor()
1133 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_init_tx_empty_descriptor()
1134 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); in igc_init_tx_empty_descriptor()
1136 netdev_tx_sent_queue(txring_txq(ring), skb->len); in igc_init_tx_empty_descriptor()
1138 first->next_to_watch = desc; in igc_init_tx_empty_descriptor()
1140 ring->next_to_use++; in igc_init_tx_empty_descriptor()
1141 if (ring->next_to_use == ring->count) in igc_init_tx_empty_descriptor()
1142 ring->next_to_use = 0; in igc_init_tx_empty_descriptor()
1155 u16 i = tx_ring->next_to_use; in igc_tx_ctxtdesc()
1160 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igc_tx_ctxtdesc()
1166 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igc_tx_ctxtdesc()
1167 mss_l4len_idx |= tx_ring->reg_idx << 4; in igc_tx_ctxtdesc()
1172 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in igc_tx_ctxtdesc()
1173 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in igc_tx_ctxtdesc()
1174 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in igc_tx_ctxtdesc()
1175 context_desc->launch_time = launch_time; in igc_tx_ctxtdesc()
1181 struct sk_buff *skb = first->skb; in igc_tx_csum()
1185 if (skb->ip_summed != CHECKSUM_PARTIAL) { in igc_tx_csum()
1187 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && in igc_tx_csum()
1188 !tx_ring->launchtime_enable) in igc_tx_csum()
1193 switch (skb->csum_offset) { in igc_tx_csum()
1212 first->tx_flags |= IGC_TX_FLAGS_CSUM; in igc_tx_csum()
1213 vlan_macip_lens = skb_checksum_start_offset(skb) - in igc_tx_csum()
1217 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tx_csum()
1225 struct net_device *netdev = tx_ring->netdev; in __igc_maybe_stop_tx()
1227 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1236 return -EBUSY; in __igc_maybe_stop_tx()
1239 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1241 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1242 tx_ring->tx_stats.restart_queue2++; in __igc_maybe_stop_tx()
1243 u64_stats_update_end(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1291 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); in igc_tx_cmd_type()
1314 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_tx_olinfo_status()
1321 struct sk_buff *skb = first->skb; in igc_tx_map()
1324 u32 tx_flags = first->tx_flags; in igc_tx_map()
1326 u16 i = tx_ring->next_to_use; in igc_tx_map()
1334 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igc_tx_map()
1337 data_len = skb->data_len; in igc_tx_map()
1339 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_tx_map()
1343 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in igc_tx_map()
1344 if (dma_mapping_error(tx_ring->dev, dma)) in igc_tx_map()
1351 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1354 tx_desc->read.cmd_type_len = in igc_tx_map()
1359 if (i == tx_ring->count) { in igc_tx_map()
1363 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1366 size -= IGC_MAX_DATA_PER_TXD; in igc_tx_map()
1368 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1374 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); in igc_tx_map()
1378 if (i == tx_ring->count) { in igc_tx_map()
1382 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1385 data_len -= size; in igc_tx_map()
1387 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igc_tx_map()
1390 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1393 /* write last descriptor with RS and EOP bits */ in igc_tx_map()
1395 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_tx_map()
1397 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igc_tx_map()
1400 first->time_stamp = jiffies; in igc_tx_map()
1405 * are new descriptors to fetch. (Only applicable for weak-ordered in igc_tx_map()
1406 * memory model archs, such as IA-64). in igc_tx_map()
1414 first->next_to_watch = tx_desc; in igc_tx_map()
1417 if (i == tx_ring->count) in igc_tx_map()
1420 tx_ring->next_to_use = i; in igc_tx_map()
1426 writel(i, tx_ring->tail); in igc_tx_map()
1431 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); in igc_tx_map()
1432 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1437 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1439 if (i-- == 0) in igc_tx_map()
1440 i += tx_ring->count; in igc_tx_map()
1441 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1445 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1447 dev_kfree_skb_any(tx_buffer->skb); in igc_tx_map()
1448 tx_buffer->skb = NULL; in igc_tx_map()
1450 tx_ring->next_to_use = i; in igc_tx_map()
1452 return -1; in igc_tx_map()
1461 struct sk_buff *skb = first->skb; in igc_tso()
1475 if (skb->ip_summed != CHECKSUM_PARTIAL) in igc_tso()
1492 if (ip.v4->version == 4) { in igc_tso()
1494 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in igc_tso()
1499 ip.v4->check = csum_fold(csum_partial(trans_start, in igc_tso()
1500 csum_start - trans_start, in igc_tso()
1504 ip.v4->tot_len = 0; in igc_tso()
1505 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1509 ip.v6->payload_len = 0; in igc_tso()
1510 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1515 l4_offset = l4.hdr - skb->data; in igc_tso()
1518 paylen = skb->len - l4_offset; in igc_tso()
1521 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in igc_tso()
1522 csum_replace_by_diff(&l4.tcp->check, in igc_tso()
1527 csum_replace_by_diff(&l4.udp->check, in igc_tso()
1532 first->gso_segs = skb_shinfo(skb)->gso_segs; in igc_tso()
1533 first->bytecount += (first->gso_segs - 1) * *hdr_len; in igc_tso()
1536 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; in igc_tso()
1537 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; in igc_tso()
1540 vlan_macip_lens = l4.hdr - ip.hdr; in igc_tso()
1541 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; in igc_tso()
1542 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tso()
1555 struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; in igc_request_tx_tstamp()
1557 if (tstamp->skb) in igc_request_tx_tstamp()
1560 tstamp->skb = skb_get(skb); in igc_request_tx_tstamp()
1561 tstamp->start = jiffies; in igc_request_tx_tstamp()
1562 *flags = tstamp->flags; in igc_request_tx_tstamp()
1573 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); in igc_xmit_frame_ring()
1591 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in igc_xmit_frame_ring()
1593 &skb_shinfo(skb)->frags[f])); in igc_xmit_frame_ring()
1600 if (!tx_ring->launchtime_enable) in igc_xmit_frame_ring()
1603 txtime = skb->tstamp; in igc_xmit_frame_ring()
1604 skb->tstamp = ktime_set(0, 0); in igc_xmit_frame_ring()
1612 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xmit_frame_ring()
1630 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xmit_frame_ring()
1631 first->type = IGC_TX_BUFFER_TYPE_SKB; in igc_xmit_frame_ring()
1632 first->skb = skb; in igc_xmit_frame_ring()
1633 first->bytecount = skb->len; in igc_xmit_frame_ring()
1634 first->gso_segs = 1; in igc_xmit_frame_ring()
1636 if (adapter->qbv_transition || tx_ring->oper_gate_closed) in igc_xmit_frame_ring()
1639 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { in igc_xmit_frame_ring()
1640 adapter->stats.txdrop++; in igc_xmit_frame_ring()
1644 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && in igc_xmit_frame_ring()
1645 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in igc_xmit_frame_ring()
1649 spin_lock_irqsave(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1651 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in igc_xmit_frame_ring()
1653 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES) in igc_xmit_frame_ring()
1656 adapter->tx_hwtstamp_skipped++; in igc_xmit_frame_ring()
1659 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1668 first->tx_flags = tx_flags; in igc_xmit_frame_ring()
1669 first->protocol = protocol; in igc_xmit_frame_ring()
1682 dev_kfree_skb_any(first->skb); in igc_xmit_frame_ring()
1683 first->skb = NULL; in igc_xmit_frame_ring()
1691 unsigned int r_idx = skb->queue_mapping; in igc_tx_queue_mapping()
1693 if (r_idx >= adapter->num_tx_queues) in igc_tx_queue_mapping()
1694 r_idx = r_idx % adapter->num_tx_queues; in igc_tx_queue_mapping()
1696 return adapter->tx_ring[r_idx]; in igc_tx_queue_mapping()
1707 if (skb->len < 17) { in igc_xmit_frame()
1710 skb->len = 17; in igc_xmit_frame()
1727 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in igc_rx_checksum()
1738 if (!(skb->len == 60 && in igc_rx_checksum()
1739 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { in igc_rx_checksum()
1740 u64_stats_update_begin(&ring->rx_syncp); in igc_rx_checksum()
1741 ring->rx_stats.csum_err++; in igc_rx_checksum()
1742 u64_stats_update_end(&ring->rx_syncp); in igc_rx_checksum()
1750 skb->ip_summed = CHECKSUM_UNNECESSARY; in igc_rx_checksum()
1752 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", in igc_rx_checksum()
1753 le32_to_cpu(rx_desc->wb.upper.status_error)); in igc_rx_checksum()
1769 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1780 if (ring->netdev->features & NETIF_F_RXHASH) { in igc_rx_hash()
1781 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); in igc_rx_hash()
1792 struct net_device *dev = rx_ring->netdev; in igc_rx_vlan()
1795 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && in igc_rx_vlan()
1798 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) in igc_rx_vlan()
1799 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); in igc_rx_vlan()
1801 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in igc_rx_vlan()
1808 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1810 * @rx_desc: pointer to the EOP Rx descriptor
1827 skb_record_rx_queue(skb, rx_ring->queue_index); in igc_process_skb_fields()
1829 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in igc_process_skb_fields()
1836 struct igc_hw *hw = &adapter->hw; in igc_vlan_mode()
1845 /* disable VLAN tag insert/strip */ in igc_vlan_mode()
1853 igc_vlan_mode(adapter->netdev, adapter->netdev->features); in igc_restore_vlan()
1862 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in igc_get_rx_buffer()
1865 page_count(rx_buffer->page); in igc_get_rx_buffer()
1869 prefetchw(rx_buffer->page); in igc_get_rx_buffer()
1872 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_get_rx_buffer()
1873 rx_buffer->dma, in igc_get_rx_buffer()
1874 rx_buffer->page_offset, in igc_get_rx_buffer()
1878 rx_buffer->pagecnt_bias--; in igc_get_rx_buffer()
1887 buffer->page_offset ^= truesize; in igc_rx_buffer_flip()
1889 buffer->page_offset += truesize; in igc_rx_buffer_flip()
1910 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1916 * This function will add the data contained in rx_buffer->page to the skb.
1932 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in igc_add_rx_frag()
1933 rx_buffer->page_offset, size, truesize); in igc_add_rx_frag()
1942 unsigned int size = xdp->data_end - xdp->data; in igc_build_skb()
1944 unsigned int metasize = xdp->data - xdp->data_meta; in igc_build_skb()
1948 net_prefetch(xdp->data_meta); in igc_build_skb()
1951 skb = napi_build_skb(xdp->data_hard_start, truesize); in igc_build_skb()
1956 skb_reserve(skb, xdp->data - xdp->data_hard_start); in igc_build_skb()
1969 struct xdp_buff *xdp = &ctx->xdp; in igc_construct_skb()
1970 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb()
1971 unsigned int size = xdp->data_end - xdp->data; in igc_construct_skb()
1973 void *va = xdp->data; in igc_construct_skb()
1978 net_prefetch(xdp->data_meta); in igc_construct_skb()
1981 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in igc_construct_skb()
1986 if (ctx->rx_ts) { in igc_construct_skb()
1987 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; in igc_construct_skb()
1988 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; in igc_construct_skb()
1994 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); in igc_construct_skb()
1997 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, in igc_construct_skb()
2006 size -= headlen; in igc_construct_skb()
2008 skb_add_rx_frag(skb, 0, rx_buffer->page, in igc_construct_skb()
2009 (va + headlen) - page_address(rx_buffer->page), in igc_construct_skb()
2013 rx_buffer->pagecnt_bias++; in igc_construct_skb()
2020 * igc_reuse_rx_page - page flip buffer and store it back on the ring
2029 u16 nta = rx_ring->next_to_alloc; in igc_reuse_rx_page()
2032 new_buff = &rx_ring->rx_buffer_info[nta]; in igc_reuse_rx_page()
2036 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in igc_reuse_rx_page()
2042 new_buff->dma = old_buff->dma; in igc_reuse_rx_page()
2043 new_buff->page = old_buff->page; in igc_reuse_rx_page()
2044 new_buff->page_offset = old_buff->page_offset; in igc_reuse_rx_page()
2045 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igc_reuse_rx_page()
2051 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igc_can_reuse_rx_page()
2052 struct page *page = rx_buffer->page; in igc_can_reuse_rx_page()
2054 /* avoid re-using remote and pfmemalloc pages */ in igc_can_reuse_rx_page()
2060 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in igc_can_reuse_rx_page()
2064 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) in igc_can_reuse_rx_page()
2066 if (rx_buffer->page_offset > IGC_LAST_OFFSET) in igc_can_reuse_rx_page()
2075 page_ref_add(page, USHRT_MAX - 1); in igc_can_reuse_rx_page()
2076 rx_buffer->pagecnt_bias = USHRT_MAX; in igc_can_reuse_rx_page()
2083 * igc_is_non_eop - process handling of non-EOP buffers
2087 * This function updates next to clean. If the buffer is an EOP buffer
2090 * that this is in fact a non-EOP buffer.
2095 u32 ntc = rx_ring->next_to_clean + 1; in igc_is_non_eop()
2098 ntc = (ntc < rx_ring->count) ? ntc : 0; in igc_is_non_eop()
2099 rx_ring->next_to_clean = ntc; in igc_is_non_eop()
2110 * igc_cleanup_headers - Correct corrupted or empty headers
2112 * @rx_desc: pointer to the EOP Rx descriptor
2128 struct net_device *netdev = rx_ring->netdev; in igc_cleanup_headers()
2130 if (!(netdev->features & NETIF_F_RXALL)) { in igc_cleanup_headers()
2154 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in igc_put_rx_buffer()
2157 __page_frag_cache_drain(rx_buffer->page, in igc_put_rx_buffer()
2158 rx_buffer->pagecnt_bias); in igc_put_rx_buffer()
2162 rx_buffer->page = NULL; in igc_put_rx_buffer()
2167 struct igc_adapter *adapter = rx_ring->q_vector->adapter; in igc_rx_offset()
2180 struct page *page = bi->page; in igc_alloc_mapped_page()
2190 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2191 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_alloc_mapped_page()
2196 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in igc_alloc_mapped_page()
2204 if (dma_mapping_error(rx_ring->dev, dma)) { in igc_alloc_mapped_page()
2207 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2208 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_alloc_mapped_page()
2212 bi->dma = dma; in igc_alloc_mapped_page()
2213 bi->page = page; in igc_alloc_mapped_page()
2214 bi->page_offset = igc_rx_offset(rx_ring); in igc_alloc_mapped_page()
2215 page_ref_add(page, USHRT_MAX - 1); in igc_alloc_mapped_page()
2216 bi->pagecnt_bias = USHRT_MAX; in igc_alloc_mapped_page()
2222 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2229 u16 i = rx_ring->next_to_use; in igc_alloc_rx_buffers()
2238 bi = &rx_ring->rx_buffer_info[i]; in igc_alloc_rx_buffers()
2239 i -= rx_ring->count; in igc_alloc_rx_buffers()
2248 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in igc_alloc_rx_buffers()
2249 bi->page_offset, bufsz, in igc_alloc_rx_buffers()
2253 * because each write-back erases this info. in igc_alloc_rx_buffers()
2255 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in igc_alloc_rx_buffers()
2262 bi = rx_ring->rx_buffer_info; in igc_alloc_rx_buffers()
2263 i -= rx_ring->count; in igc_alloc_rx_buffers()
2267 rx_desc->wb.upper.length = 0; in igc_alloc_rx_buffers()
2269 cleaned_count--; in igc_alloc_rx_buffers()
2272 i += rx_ring->count; in igc_alloc_rx_buffers()
2274 if (rx_ring->next_to_use != i) { in igc_alloc_rx_buffers()
2276 rx_ring->next_to_use = i; in igc_alloc_rx_buffers()
2279 rx_ring->next_to_alloc = i; in igc_alloc_rx_buffers()
2283 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers()
2284 * such as IA-64). in igc_alloc_rx_buffers()
2287 writel(i, rx_ring->tail); in igc_alloc_rx_buffers()
2294 u16 i = ring->next_to_use; in igc_alloc_rx_buffers_zc()
2305 bi = &ring->rx_buffer_info[i]; in igc_alloc_rx_buffers_zc()
2306 i -= ring->count; in igc_alloc_rx_buffers_zc()
2309 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc()
2310 if (!bi->xdp) { in igc_alloc_rx_buffers_zc()
2315 dma = xsk_buff_xdp_get_dma(bi->xdp); in igc_alloc_rx_buffers_zc()
2316 desc->read.pkt_addr = cpu_to_le64(dma); in igc_alloc_rx_buffers_zc()
2323 bi = ring->rx_buffer_info; in igc_alloc_rx_buffers_zc()
2324 i -= ring->count; in igc_alloc_rx_buffers_zc()
2328 desc->wb.upper.length = 0; in igc_alloc_rx_buffers_zc()
2330 count--; in igc_alloc_rx_buffers_zc()
2333 i += ring->count; in igc_alloc_rx_buffers_zc()
2335 if (ring->next_to_use != i) { in igc_alloc_rx_buffers_zc()
2336 ring->next_to_use = i; in igc_alloc_rx_buffers_zc()
2340 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers_zc()
2341 * such as IA-64). in igc_alloc_rx_buffers_zc()
2344 writel(i, ring->tail); in igc_alloc_rx_buffers_zc()
2355 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in igc_xdp_init_tx_descriptor()
2356 u16 count, index = ring->next_to_use; in igc_xdp_init_tx_descriptor()
2357 struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2360 u32 olinfo_status, len = xdpf->len, cmd_type; in igc_xdp_init_tx_descriptor()
2361 void *data = xdpf->data; in igc_xdp_init_tx_descriptor()
2366 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); in igc_xdp_init_tx_descriptor()
2370 return -EBUSY; in igc_xdp_init_tx_descriptor()
2374 head->bytecount = xdp_get_frame_len(xdpf); in igc_xdp_init_tx_descriptor()
2375 head->type = IGC_TX_BUFFER_TYPE_XDP; in igc_xdp_init_tx_descriptor()
2376 head->gso_segs = 1; in igc_xdp_init_tx_descriptor()
2377 head->xdpf = xdpf; in igc_xdp_init_tx_descriptor()
2379 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_xdp_init_tx_descriptor()
2380 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_init_tx_descriptor()
2385 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); in igc_xdp_init_tx_descriptor()
2386 if (dma_mapping_error(ring->dev, dma)) { in igc_xdp_init_tx_descriptor()
2387 netdev_err_once(ring->netdev, in igc_xdp_init_tx_descriptor()
2398 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_xdp_init_tx_descriptor()
2399 desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_init_tx_descriptor()
2401 buffer->protocol = 0; in igc_xdp_init_tx_descriptor()
2403 if (++index == ring->count) in igc_xdp_init_tx_descriptor()
2409 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2411 desc->read.olinfo_status = 0; in igc_xdp_init_tx_descriptor()
2413 data = skb_frag_address(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2414 len = skb_frag_size(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2417 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); in igc_xdp_init_tx_descriptor()
2419 netdev_tx_sent_queue(txring_txq(ring), head->bytecount); in igc_xdp_init_tx_descriptor()
2421 head->time_stamp = jiffies; in igc_xdp_init_tx_descriptor()
2423 head->next_to_watch = desc; in igc_xdp_init_tx_descriptor()
2424 ring->next_to_use = index; in igc_xdp_init_tx_descriptor()
2430 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2432 dma_unmap_page(ring->dev, in igc_xdp_init_tx_descriptor()
2441 index += ring->count; in igc_xdp_init_tx_descriptor()
2442 index--; in igc_xdp_init_tx_descriptor()
2445 return -ENOMEM; in igc_xdp_init_tx_descriptor()
2456 while (index >= adapter->num_tx_queues) in igc_xdp_get_tx_ring()
2457 index -= adapter->num_tx_queues; in igc_xdp_get_tx_ring()
2459 return adapter->tx_ring[index]; in igc_xdp_get_tx_ring()
2471 return -EFAULT; in igc_xdp_xmit_back()
2499 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) in __igc_xdp_run_prog()
2504 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2508 trace_xdp_exception(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2520 prog = READ_ONCE(adapter->xdp_prog); in igc_xdp_run_prog()
2540 writel(ring->next_to_use, ring->tail); in igc_flush_tx_descriptors()
2565 struct igc_ring *ring = q_vector->rx.ring; in igc_update_rx_stats()
2567 u64_stats_update_begin(&ring->rx_syncp); in igc_update_rx_stats()
2568 ring->rx_stats.packets += packets; in igc_update_rx_stats()
2569 ring->rx_stats.bytes += bytes; in igc_update_rx_stats()
2570 u64_stats_update_end(&ring->rx_syncp); in igc_update_rx_stats()
2572 q_vector->rx.total_packets += packets; in igc_update_rx_stats()
2573 q_vector->rx.total_bytes += bytes; in igc_update_rx_stats()
2579 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq()
2580 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_clean_rx_irq()
2581 struct sk_buff *skb = rx_ring->skb; in igc_clean_rx_irq()
2600 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); in igc_clean_rx_irq()
2601 size = le16_to_cpu(rx_desc->wb.upper.length); in igc_clean_rx_irq()
2614 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; in igc_clean_rx_irq()
2619 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq()
2623 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); in igc_clean_rx_irq()
2624 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), in igc_clean_rx_irq()
2636 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2656 rx_ring->rx_stats.alloc_failed++; in igc_clean_rx_irq()
2657 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2658 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_clean_rx_irq()
2665 /* fetch next buffer in frame if non-eop */ in igc_clean_rx_irq()
2676 total_bytes += skb->len; in igc_clean_rx_irq()
2681 napi_gro_receive(&q_vector->napi, skb); in igc_clean_rx_irq()
2694 rx_ring->skb = skb; in igc_clean_rx_irq()
2707 struct xdp_buff *xdp = &ctx->xdp; in igc_construct_skb_zc()
2708 unsigned int totalsize = xdp->data_end - xdp->data_meta; in igc_construct_skb_zc()
2709 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb_zc()
2712 net_prefetch(xdp->data_meta); in igc_construct_skb_zc()
2714 skb = napi_alloc_skb(&ring->q_vector->napi, totalsize); in igc_construct_skb_zc()
2718 memcpy(__skb_put(skb, totalsize), xdp->data_meta, in igc_construct_skb_zc()
2726 if (ctx->rx_ts) { in igc_construct_skb_zc()
2727 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; in igc_construct_skb_zc()
2728 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; in igc_construct_skb_zc()
2738 struct igc_ring *ring = q_vector->rx.ring; in igc_dispatch_skb_zc()
2743 ring->rx_stats.alloc_failed++; in igc_dispatch_skb_zc()
2744 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags); in igc_dispatch_skb_zc()
2752 napi_gro_receive(&q_vector->napi, skb); in igc_dispatch_skb_zc()
2759 * igc_xdp_buff fields fall into xdp_buff_xsk->cb in xsk_buff_to_igc_ctx()
2766 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq_zc()
2767 struct igc_ring *ring = q_vector->rx.ring; in igc_clean_rx_irq_zc()
2770 u16 ntc = ring->next_to_clean; in igc_clean_rx_irq_zc()
2777 prog = READ_ONCE(adapter->xdp_prog); in igc_clean_rx_irq_zc()
2787 size = le16_to_cpu(desc->wb.upper.length); in igc_clean_rx_irq_zc()
2797 bi = &ring->rx_buffer_info[ntc]; in igc_clean_rx_irq_zc()
2799 ctx = xsk_buff_to_igc_ctx(bi->xdp); in igc_clean_rx_irq_zc()
2800 ctx->rx_desc = desc; in igc_clean_rx_irq_zc()
2803 ctx->rx_ts = bi->xdp->data; in igc_clean_rx_irq_zc()
2805 bi->xdp->data += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2810 bi->xdp->data_meta += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2811 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2813 ctx->rx_ts = NULL; in igc_clean_rx_irq_zc()
2816 bi->xdp->data_end = bi->xdp->data + size; in igc_clean_rx_irq_zc()
2817 xsk_buff_dma_sync_for_cpu(bi->xdp); in igc_clean_rx_irq_zc()
2819 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); in igc_clean_rx_irq_zc()
2825 xsk_buff_free(bi->xdp); in igc_clean_rx_irq_zc()
2833 bi->xdp = NULL; in igc_clean_rx_irq_zc()
2838 if (ntc == ring->count) in igc_clean_rx_irq_zc()
2842 ring->next_to_clean = ntc; in igc_clean_rx_irq_zc()
2853 if (xsk_uses_need_wakeup(ring->xsk_pool)) { in igc_clean_rx_irq_zc()
2854 if (failure || ring->next_to_clean == ring->next_to_use) in igc_clean_rx_irq_zc()
2855 xsk_set_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2857 xsk_clear_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2867 struct igc_ring *ring = q_vector->tx.ring; in igc_update_tx_stats()
2869 u64_stats_update_begin(&ring->tx_syncp); in igc_update_tx_stats()
2870 ring->tx_stats.bytes += bytes; in igc_update_tx_stats()
2871 ring->tx_stats.packets += packets; in igc_update_tx_stats()
2872 u64_stats_update_end(&ring->tx_syncp); in igc_update_tx_stats()
2874 q_vector->tx.total_bytes += bytes; in igc_update_tx_stats()
2875 q_vector->tx.total_packets += packets; in igc_update_tx_stats()
2881 struct igc_ring *tx_ring = meta_req->tx_ring; in igc_xsk_request_timestamp()
2889 if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) { in igc_xsk_request_timestamp()
2890 adapter = netdev_priv(tx_ring->netdev); in igc_xsk_request_timestamp()
2892 spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags); in igc_xsk_request_timestamp()
2896 tstamp = &adapter->tx_tstamp[i]; in igc_xsk_request_timestamp()
2898 /* tstamp->skb and tstamp->xsk_tx_buffer are in union. in igc_xsk_request_timestamp()
2899 * When tstamp->skb is equal to NULL, in igc_xsk_request_timestamp()
2900 * tstamp->xsk_tx_buffer is equal to NULL as well. in igc_xsk_request_timestamp()
2904 if (!tstamp->skb) { in igc_xsk_request_timestamp()
2912 adapter->tx_hwtstamp_skipped++; in igc_xsk_request_timestamp()
2913 spin_unlock_irqrestore(&adapter->ptp_tx_lock, in igc_xsk_request_timestamp()
2918 tstamp->start = jiffies; in igc_xsk_request_timestamp()
2919 tstamp->xsk_queue_index = tx_ring->queue_index; in igc_xsk_request_timestamp()
2920 tstamp->xsk_tx_buffer = meta_req->tx_buffer; in igc_xsk_request_timestamp()
2921 tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK; in igc_xsk_request_timestamp()
2924 meta_req->tx_buffer->xsk_pending_ts = true; in igc_xsk_request_timestamp()
2930 xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta); in igc_xsk_request_timestamp()
2933 tx_flags |= tstamp->flags; in igc_xsk_request_timestamp()
2934 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2937 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2940 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2943 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2947 spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags); in igc_xsk_request_timestamp()
2963 struct xsk_buff_pool *pool = ring->xsk_pool; in igc_xdp_xmit_zc()
2970 if (!netif_carrier_ok(ring->netdev)) in igc_xdp_xmit_zc()
2978 ntu = ring->next_to_use; in igc_xdp_xmit_zc()
2981 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { in igc_xdp_xmit_zc()
2997 bi = &ring->tx_buffer_info[ntu]; in igc_xdp_xmit_zc()
3006 tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type); in igc_xdp_xmit_zc()
3007 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_xmit_zc()
3008 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_xmit_zc()
3010 bi->type = IGC_TX_BUFFER_TYPE_XSK; in igc_xdp_xmit_zc()
3011 bi->protocol = 0; in igc_xdp_xmit_zc()
3012 bi->bytecount = xdp_desc.len; in igc_xdp_xmit_zc()
3013 bi->gso_segs = 1; in igc_xdp_xmit_zc()
3014 bi->time_stamp = jiffies; in igc_xdp_xmit_zc()
3015 bi->next_to_watch = tx_desc; in igc_xdp_xmit_zc()
3020 if (ntu == ring->count) in igc_xdp_xmit_zc()
3024 ring->next_to_use = ntu; in igc_xdp_xmit_zc()
3034 * igc_clean_tx_irq - Reclaim resources after transmit completes
3042 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_tx_irq()
3044 unsigned int budget = q_vector->tx.work_limit; in igc_clean_tx_irq()
3045 struct igc_ring *tx_ring = q_vector->tx.ring; in igc_clean_tx_irq()
3046 unsigned int i = tx_ring->next_to_clean; in igc_clean_tx_irq()
3051 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_clean_tx_irq()
3054 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_irq()
3056 i -= tx_ring->count; in igc_clean_tx_irq()
3059 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_irq()
3069 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) in igc_clean_tx_irq()
3075 if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK && in igc_clean_tx_irq()
3076 tx_buffer->xsk_pending_ts) in igc_clean_tx_irq()
3080 tx_buffer->next_to_watch = NULL; in igc_clean_tx_irq()
3083 total_bytes += tx_buffer->bytecount; in igc_clean_tx_irq()
3084 total_packets += tx_buffer->gso_segs; in igc_clean_tx_irq()
3086 switch (tx_buffer->type) { in igc_clean_tx_irq()
3091 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_irq()
3092 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3095 napi_consume_skb(tx_buffer->skb, napi_budget); in igc_clean_tx_irq()
3096 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3099 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_irq()
3109 i -= tx_ring->count; in igc_clean_tx_irq()
3110 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3116 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3124 i -= tx_ring->count; in igc_clean_tx_irq()
3125 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3133 budget--; in igc_clean_tx_irq()
3139 i += tx_ring->count; in igc_clean_tx_irq()
3140 tx_ring->next_to_clean = i; in igc_clean_tx_irq()
3144 if (tx_ring->xsk_pool) { in igc_clean_tx_irq()
3146 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_irq()
3147 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) in igc_clean_tx_irq()
3148 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); in igc_clean_tx_irq()
3152 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igc_clean_tx_irq()
3153 struct igc_hw *hw = &adapter->hw; in igc_clean_tx_irq()
3158 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_clean_tx_irq()
3159 if (tx_buffer->next_to_watch && in igc_clean_tx_irq()
3160 time_after(jiffies, tx_buffer->time_stamp + in igc_clean_tx_irq()
3161 (adapter->tx_timeout_factor * HZ)) && in igc_clean_tx_irq()
3163 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && in igc_clean_tx_irq()
3164 !tx_ring->oper_gate_closed) { in igc_clean_tx_irq()
3166 netdev_err(tx_ring->netdev, in igc_clean_tx_irq()
3178 tx_ring->queue_index, in igc_clean_tx_irq()
3179 rd32(IGC_TDH(tx_ring->reg_idx)), in igc_clean_tx_irq()
3180 readl(tx_ring->tail), in igc_clean_tx_irq()
3181 tx_ring->next_to_use, in igc_clean_tx_irq()
3182 tx_ring->next_to_clean, in igc_clean_tx_irq()
3183 tx_buffer->time_stamp, in igc_clean_tx_irq()
3184 tx_buffer->next_to_watch, in igc_clean_tx_irq()
3186 tx_buffer->next_to_watch->wb.status); in igc_clean_tx_irq()
3187 netif_stop_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3188 tx_ring->queue_index); in igc_clean_tx_irq()
3197 netif_carrier_ok(tx_ring->netdev) && in igc_clean_tx_irq()
3203 if (__netif_subqueue_stopped(tx_ring->netdev, in igc_clean_tx_irq()
3204 tx_ring->queue_index) && in igc_clean_tx_irq()
3205 !(test_bit(__IGC_DOWN, &adapter->state))) { in igc_clean_tx_irq()
3206 netif_wake_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3207 tx_ring->queue_index); in igc_clean_tx_irq()
3209 u64_stats_update_begin(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3210 tx_ring->tx_stats.restart_queue++; in igc_clean_tx_irq()
3211 u64_stats_update_end(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3221 struct igc_hw *hw = &adapter->hw; in igc_find_mac_filter()
3222 int max_entries = hw->mac.rar_entry_count; in igc_find_mac_filter()
3243 return -1; in igc_find_mac_filter()
3248 struct igc_hw *hw = &adapter->hw; in igc_get_avail_mac_filter_slot()
3249 int max_entries = hw->mac.rar_entry_count; in igc_get_avail_mac_filter_slot()
3260 return -1; in igc_get_avail_mac_filter_slot()
3264 * igc_add_mac_filter() - Add MAC address filter
3268 * @queue: If non-negative, queue assignment feature is enabled and frames
3278 struct net_device *dev = adapter->netdev; in igc_add_mac_filter()
3287 return -ENOSPC; in igc_add_mac_filter()
3299 * igc_del_mac_filter() - Delete MAC address filter
3307 struct net_device *dev = adapter->netdev; in igc_del_mac_filter()
3316 * We just reset to its default value i.e. disable queue in igc_del_mac_filter()
3319 netdev_dbg(dev, "Disable default MAC filter queue assignment"); in igc_del_mac_filter()
3321 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); in igc_del_mac_filter()
3333 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3343 struct net_device *dev = adapter->netdev; in igc_add_vlan_prio_filter()
3344 struct igc_hw *hw = &adapter->hw; in igc_add_vlan_prio_filter()
3351 return -EEXIST; in igc_add_vlan_prio_filter()
3365 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3371 struct igc_hw *hw = &adapter->hw; in igc_del_vlan_prio_filter()
3381 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", in igc_del_vlan_prio_filter()
3387 struct igc_hw *hw = &adapter->hw; in igc_get_avail_etype_filter_slot()
3397 return -1; in igc_get_avail_etype_filter_slot()
3401 * igc_add_etype_filter() - Add ethertype filter
3404 * @queue: If non-negative, queue assignment feature is enabled and frames
3413 struct igc_hw *hw = &adapter->hw; in igc_add_etype_filter()
3419 return -ENOSPC; in igc_add_etype_filter()
3436 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", in igc_add_etype_filter()
3443 struct igc_hw *hw = &adapter->hw; in igc_find_etype_filter()
3453 return -1; in igc_find_etype_filter()
3457 * igc_del_etype_filter() - Delete ethertype filter
3463 struct igc_hw *hw = &adapter->hw; in igc_del_etype_filter()
3472 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", in igc_del_etype_filter()
3480 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_select()
3484 if (input->index >= MAX_FLEX_FILTER) { in igc_flex_filter_select()
3485 netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n"); in igc_flex_filter_select()
3486 return -EINVAL; in igc_flex_filter_select()
3492 switch (input->index) { in igc_flex_filter_select()
3509 fhft_index = input->index % 8; in igc_flex_filter_select()
3512 IGC_FHFT_EXT(fhft_index - 4); in igc_flex_filter_select()
3520 struct igc_hw *hw = &adapter->hw; in igc_write_flex_filter_ll()
3521 u8 *data = input->data; in igc_write_flex_filter_ll()
3522 u8 *mask = input->mask; in igc_write_flex_filter_ll()
3532 if (input->length % 8 != 0) { in igc_write_flex_filter_ll()
3533 netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n"); in igc_write_flex_filter_ll()
3534 return -EINVAL; in igc_write_flex_filter_ll()
3542 /* When adding a filter globally disable flex filter feature. That is in igc_write_flex_filter_ll()
3550 queuing = input->length & IGC_FHFT_LENGTH_MASK; in igc_write_flex_filter_ll()
3551 queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue); in igc_write_flex_filter_ll()
3552 queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio); in igc_write_flex_filter_ll()
3554 if (input->immediate_irq) in igc_write_flex_filter_ll()
3557 if (input->drop) in igc_write_flex_filter_ll()
3591 if (input->index > 8) { in igc_write_flex_filter_ll()
3592 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ in igc_write_flex_filter_ll()
3595 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); in igc_write_flex_filter_ll()
3599 wufc |= (IGC_WUFC_FLX0 << input->index); in igc_write_flex_filter_ll()
3603 netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n", in igc_write_flex_filter_ll()
3604 input->index); in igc_write_flex_filter_ll()
3616 memcpy(&flex->data[offset], src, len); in igc_flex_filter_add_field()
3625 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3630 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3636 struct igc_hw *hw = &adapter->hw; in igc_find_avail_flex_filter_slot()
3648 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) in igc_find_avail_flex_filter_slot()
3653 return -ENOSPC; in igc_find_avail_flex_filter_slot()
3658 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_in_use()
3676 struct igc_nfc_filter *filter = &rule->filter; in igc_add_flex_filter()
3684 return -ENOSPC; in igc_add_flex_filter()
3687 * -> dest_mac [6] in igc_add_flex_filter()
3688 * -> src_mac [6] in igc_add_flex_filter()
3689 * -> tpid [2] in igc_add_flex_filter()
3690 * -> vlan tci [2] in igc_add_flex_filter()
3691 * -> ether type [2] in igc_add_flex_filter()
3692 * -> user data [8] in igc_add_flex_filter()
3693 * -> = 26 bytes => 32 length in igc_add_flex_filter()
3697 flex.rx_queue = rule->action; in igc_add_flex_filter()
3699 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; in igc_add_flex_filter()
3704 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_add_flex_filter()
3705 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, in igc_add_flex_filter()
3709 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_add_flex_filter()
3710 igc_flex_filter_add_field(&flex, &filter->src_addr, 6, in igc_add_flex_filter()
3714 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { in igc_add_flex_filter()
3715 __be16 vlan_etype = cpu_to_be16(filter->vlan_etype); in igc_add_flex_filter()
3722 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) in igc_add_flex_filter()
3723 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, in igc_add_flex_filter()
3724 sizeof(filter->vlan_tci), NULL); in igc_add_flex_filter()
3727 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_add_flex_filter()
3728 __be16 etype = cpu_to_be16(filter->etype); in igc_add_flex_filter()
3735 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) in igc_add_flex_filter()
3736 igc_flex_filter_add_field(&flex, &filter->user_data, in igc_add_flex_filter()
3738 sizeof(filter->user_data), in igc_add_flex_filter()
3739 filter->user_mask); in igc_add_flex_filter()
3746 filter->flex_index = index; in igc_add_flex_filter()
3754 struct igc_hw *hw = &adapter->hw; in igc_del_flex_filter()
3757 /* Just disable the filter. The filter table itself is kept in igc_del_flex_filter()
3764 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); in igc_del_flex_filter()
3776 /* No filters are in use, we may disable flex filters */ in igc_del_flex_filter()
3787 if (rule->flex) { in igc_enable_nfc_rule()
3791 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_enable_nfc_rule()
3792 err = igc_add_etype_filter(adapter, rule->filter.etype, in igc_enable_nfc_rule()
3793 rule->action); in igc_enable_nfc_rule()
3798 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { in igc_enable_nfc_rule()
3800 rule->filter.src_addr, rule->action); in igc_enable_nfc_rule()
3805 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { in igc_enable_nfc_rule()
3807 rule->filter.dst_addr, rule->action); in igc_enable_nfc_rule()
3812 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_enable_nfc_rule()
3813 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_enable_nfc_rule()
3815 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); in igc_enable_nfc_rule()
3826 if (rule->flex) { in igc_disable_nfc_rule()
3827 igc_del_flex_filter(adapter, rule->filter.flex_index); in igc_disable_nfc_rule()
3831 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) in igc_disable_nfc_rule()
3832 igc_del_etype_filter(adapter, rule->filter.etype); in igc_disable_nfc_rule()
3834 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_disable_nfc_rule()
3835 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_disable_nfc_rule()
3840 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_disable_nfc_rule()
3842 rule->filter.src_addr); in igc_disable_nfc_rule()
3844 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_disable_nfc_rule()
3846 rule->filter.dst_addr); in igc_disable_nfc_rule()
3850 * igc_get_nfc_rule() - Get NFC rule
3854 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3863 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { in igc_get_nfc_rule()
3864 if (rule->location == location) in igc_get_nfc_rule()
3866 if (rule->location > location) in igc_get_nfc_rule()
3874 * igc_del_nfc_rule() - Delete NFC rule
3878 * Disable NFC rule in hardware and delete it from adapter.
3880 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3886 list_del(&rule->list); in igc_del_nfc_rule()
3887 adapter->nfc_rule_count--; in igc_del_nfc_rule()
3896 mutex_lock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
3898 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) in igc_flush_nfc_rules()
3901 mutex_unlock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
3905 * igc_add_nfc_rule() - Add NFC rule
3911 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3925 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { in igc_add_nfc_rule()
3926 if (cur->location >= rule->location) in igc_add_nfc_rule()
3931 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); in igc_add_nfc_rule()
3932 adapter->nfc_rule_count++; in igc_add_nfc_rule()
3940 mutex_lock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
3942 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) in igc_restore_nfc_rules()
3945 mutex_unlock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
3952 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_uc_sync()
3964 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3970 * promiscuous mode, and all-multi behavior.
3975 struct igc_hw *hw = &adapter->hw; in igc_set_rx_mode()
3980 if (netdev->flags & IFF_PROMISC) { in igc_set_rx_mode()
3983 if (netdev->flags & IFF_ALLMULTI) { in igc_set_rx_mode()
4008 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) in igc_set_rx_mode()
4015 * igc_configure - configure the hardware for RX and TX
4020 struct net_device *netdev = adapter->netdev; in igc_configure()
4038 igc_rx_fifo_flush_base(&adapter->hw); in igc_configure()
4044 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_configure()
4045 struct igc_ring *ring = adapter->rx_ring[i]; in igc_configure()
4047 if (ring->xsk_pool) in igc_configure()
4055 * igc_write_ivar - configure ivar for given MSI-X vector
4081 struct igc_adapter *adapter = q_vector->adapter; in igc_assign_vector()
4082 struct igc_hw *hw = &adapter->hw; in igc_assign_vector()
4086 if (q_vector->rx.ring) in igc_assign_vector()
4087 rx_queue = q_vector->rx.ring->reg_idx; in igc_assign_vector()
4088 if (q_vector->tx.ring) in igc_assign_vector()
4089 tx_queue = q_vector->tx.ring->reg_idx; in igc_assign_vector()
4091 switch (hw->mac.type) { in igc_assign_vector()
4101 q_vector->eims_value = BIT(msix_vector); in igc_assign_vector()
4104 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); in igc_assign_vector()
4109 adapter->eims_enable_mask |= q_vector->eims_value; in igc_assign_vector()
4112 q_vector->set_itr = 1; in igc_assign_vector()
4116 * igc_configure_msix - Configure MSI-X hardware
4120 * generate MSI-X interrupts.
4124 struct igc_hw *hw = &adapter->hw; in igc_configure_msix()
4128 adapter->eims_enable_mask = 0; in igc_configure_msix()
4131 switch (hw->mac.type) { in igc_configure_msix()
4133 /* Turn on MSI-X capability first, or our settings in igc_configure_msix()
4141 adapter->eims_other = BIT(vector); in igc_configure_msix()
4147 /* do nothing, since nothing else supports MSI-X */ in igc_configure_msix()
4149 } /* switch (hw->mac.type) */ in igc_configure_msix()
4151 adapter->eims_enable_mask |= adapter->eims_other; in igc_configure_msix()
4153 for (i = 0; i < adapter->num_q_vectors; i++) in igc_configure_msix()
4154 igc_assign_vector(adapter->q_vector[i], vector++); in igc_configure_msix()
4160 * igc_irq_enable - Enable default interrupt generation settings
4165 struct igc_hw *hw = &adapter->hw; in igc_irq_enable()
4167 if (adapter->msix_entries) { in igc_irq_enable()
4171 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); in igc_irq_enable()
4173 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); in igc_irq_enable()
4174 wr32(IGC_EIMS, adapter->eims_enable_mask); in igc_irq_enable()
4183 * igc_irq_disable - Mask off interrupt generation on the NIC
4188 struct igc_hw *hw = &adapter->hw; in igc_irq_disable()
4190 if (adapter->msix_entries) { in igc_irq_disable()
4193 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4194 wr32(IGC_EIMC, adapter->eims_enable_mask); in igc_irq_disable()
4196 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4203 if (adapter->msix_entries) { in igc_irq_disable()
4206 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4208 for (i = 0; i < adapter->num_q_vectors; i++) in igc_irq_disable()
4209 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4211 synchronize_irq(adapter->pdev->irq); in igc_irq_disable()
4222 if (adapter->rss_queues > (max_rss_queues / 2)) in igc_set_flag_queue_pairs()
4223 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4225 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4238 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); in igc_init_queue_configuration()
4244 * igc_reset_q_vector - Reset config for interrupt vector
4253 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_reset_q_vector()
4261 if (q_vector->tx.ring) in igc_reset_q_vector()
4262 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igc_reset_q_vector()
4264 if (q_vector->rx.ring) in igc_reset_q_vector()
4265 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igc_reset_q_vector()
4267 netif_napi_del(&q_vector->napi); in igc_reset_q_vector()
4271 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4279 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_free_q_vector()
4281 adapter->q_vector[v_idx] = NULL; in igc_free_q_vector()
4291 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4300 int v_idx = adapter->num_q_vectors; in igc_free_q_vectors()
4302 adapter->num_tx_queues = 0; in igc_free_q_vectors()
4303 adapter->num_rx_queues = 0; in igc_free_q_vectors()
4304 adapter->num_q_vectors = 0; in igc_free_q_vectors()
4306 while (v_idx--) { in igc_free_q_vectors()
4313 * igc_update_itr - update the dynamic ITR value based on statistics
4324 * NOTE: These calculations are only valid when operating in a single-
4330 unsigned int packets = ring_container->total_packets; in igc_update_itr()
4331 unsigned int bytes = ring_container->total_bytes; in igc_update_itr()
4332 u8 itrval = ring_container->itr; in igc_update_itr()
4372 ring_container->total_bytes = 0; in igc_update_itr()
4373 ring_container->total_packets = 0; in igc_update_itr()
4376 ring_container->itr = itrval; in igc_update_itr()
4381 struct igc_adapter *adapter = q_vector->adapter; in igc_set_itr()
4382 u32 new_itr = q_vector->itr_val; in igc_set_itr()
4385 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in igc_set_itr()
4386 switch (adapter->link_speed) { in igc_set_itr()
4396 igc_update_itr(q_vector, &q_vector->tx); in igc_set_itr()
4397 igc_update_itr(q_vector, &q_vector->rx); in igc_set_itr()
4399 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in igc_set_itr()
4403 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_set_itr()
4404 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_set_itr()
4423 if (new_itr != q_vector->itr_val) { in igc_set_itr()
4428 new_itr = new_itr > q_vector->itr_val ? in igc_set_itr()
4429 max((new_itr * q_vector->itr_val) / in igc_set_itr()
4430 (new_itr + (q_vector->itr_val >> 2)), in igc_set_itr()
4438 q_vector->itr_val = new_itr; in igc_set_itr()
4439 q_vector->set_itr = 1; in igc_set_itr()
4445 int v_idx = adapter->num_q_vectors; in igc_reset_interrupt_capability()
4447 if (adapter->msix_entries) { in igc_reset_interrupt_capability()
4448 pci_disable_msix(adapter->pdev); in igc_reset_interrupt_capability()
4449 kfree(adapter->msix_entries); in igc_reset_interrupt_capability()
4450 adapter->msix_entries = NULL; in igc_reset_interrupt_capability()
4451 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_reset_interrupt_capability()
4452 pci_disable_msi(adapter->pdev); in igc_reset_interrupt_capability()
4455 while (v_idx--) in igc_reset_interrupt_capability()
4460 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4462 * @msix: boolean value for MSI-X capability
4475 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4478 adapter->num_rx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4480 adapter->num_tx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4483 numvecs = adapter->num_rx_queues; in igc_set_interrupt_capability()
4486 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) in igc_set_interrupt_capability()
4487 numvecs += adapter->num_tx_queues; in igc_set_interrupt_capability()
4490 adapter->num_q_vectors = numvecs; in igc_set_interrupt_capability()
4495 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), in igc_set_interrupt_capability()
4498 if (!adapter->msix_entries) in igc_set_interrupt_capability()
4503 adapter->msix_entries[i].entry = i; in igc_set_interrupt_capability()
4505 err = pci_enable_msix_range(adapter->pdev, in igc_set_interrupt_capability()
4506 adapter->msix_entries, in igc_set_interrupt_capability()
4512 kfree(adapter->msix_entries); in igc_set_interrupt_capability()
4513 adapter->msix_entries = NULL; in igc_set_interrupt_capability()
4518 adapter->flags &= ~IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4520 adapter->rss_queues = 1; in igc_set_interrupt_capability()
4521 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_interrupt_capability()
4522 adapter->num_rx_queues = 1; in igc_set_interrupt_capability()
4523 adapter->num_tx_queues = 1; in igc_set_interrupt_capability()
4524 adapter->num_q_vectors = 1; in igc_set_interrupt_capability()
4525 if (!pci_enable_msi(adapter->pdev)) in igc_set_interrupt_capability()
4526 adapter->flags |= IGC_FLAG_HAS_MSI; in igc_set_interrupt_capability()
4530 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4545 struct igc_adapter *adapter = q_vector->adapter; in igc_update_ring_itr()
4546 int new_val = q_vector->itr_val; in igc_update_ring_itr()
4550 /* For non-gigabit speeds, just fix the interrupt rate at 4000 in igc_update_ring_itr()
4551 * ints/sec - ITR timer value of 120 ticks. in igc_update_ring_itr()
4553 switch (adapter->link_speed) { in igc_update_ring_itr()
4562 packets = q_vector->rx.total_packets; in igc_update_ring_itr()
4564 avg_wire_size = q_vector->rx.total_bytes / packets; in igc_update_ring_itr()
4566 packets = q_vector->tx.total_packets; in igc_update_ring_itr()
4569 q_vector->tx.total_bytes / packets); in igc_update_ring_itr()
4581 /* Give a little boost to mid-size frames */ in igc_update_ring_itr()
4589 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_update_ring_itr()
4590 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_update_ring_itr()
4594 if (new_val != q_vector->itr_val) { in igc_update_ring_itr()
4595 q_vector->itr_val = new_val; in igc_update_ring_itr()
4596 q_vector->set_itr = 1; in igc_update_ring_itr()
4599 q_vector->rx.total_bytes = 0; in igc_update_ring_itr()
4600 q_vector->rx.total_packets = 0; in igc_update_ring_itr()
4601 q_vector->tx.total_bytes = 0; in igc_update_ring_itr()
4602 q_vector->tx.total_packets = 0; in igc_update_ring_itr()
4607 struct igc_adapter *adapter = q_vector->adapter; in igc_ring_irq_enable()
4608 struct igc_hw *hw = &adapter->hw; in igc_ring_irq_enable()
4610 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || in igc_ring_irq_enable()
4611 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { in igc_ring_irq_enable()
4612 if (adapter->num_q_vectors == 1) in igc_ring_irq_enable()
4618 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_ring_irq_enable()
4619 if (adapter->msix_entries) in igc_ring_irq_enable()
4620 wr32(IGC_EIMS, q_vector->eims_value); in igc_ring_irq_enable()
4629 head->ring = ring; in igc_add_ring()
4630 head->count++; in igc_add_ring()
4634 * igc_cache_ring_register - Descriptor ring to register mapping
4637 * Once we know the feature-set enabled for the device, we'll cache
4644 switch (adapter->hw.mac.type) { in igc_cache_ring_register()
4647 for (; i < adapter->num_rx_queues; i++) in igc_cache_ring_register()
4648 adapter->rx_ring[i]->reg_idx = i; in igc_cache_ring_register()
4649 for (; j < adapter->num_tx_queues; j++) in igc_cache_ring_register()
4650 adapter->tx_ring[j]->reg_idx = j; in igc_cache_ring_register()
4656 * igc_poll - NAPI Rx polling callback
4665 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_poll()
4669 if (q_vector->tx.ring) in igc_poll()
4673 int cleaned = rx_ring->xsk_pool ? in igc_poll()
4686 /* Exit the polling mode, but don't re-enable interrupts if stack might in igc_poll()
4687 * poll us due to busy-polling in igc_poll()
4692 return min(work_done, budget - 1); in igc_poll()
4696 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4705 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4718 return -ENOMEM; in igc_alloc_q_vector()
4723 q_vector = adapter->q_vector[v_idx]; in igc_alloc_q_vector()
4730 return -ENOMEM; in igc_alloc_q_vector()
4733 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); in igc_alloc_q_vector()
4736 adapter->q_vector[v_idx] = q_vector; in igc_alloc_q_vector()
4737 q_vector->adapter = adapter; in igc_alloc_q_vector()
4740 q_vector->tx.work_limit = adapter->tx_work_limit; in igc_alloc_q_vector()
4743 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); in igc_alloc_q_vector()
4744 q_vector->itr_val = IGC_START_ITR; in igc_alloc_q_vector()
4747 ring = q_vector->ring; in igc_alloc_q_vector()
4752 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) in igc_alloc_q_vector()
4753 q_vector->itr_val = adapter->rx_itr_setting; in igc_alloc_q_vector()
4756 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) in igc_alloc_q_vector()
4757 q_vector->itr_val = adapter->tx_itr_setting; in igc_alloc_q_vector()
4762 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4763 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4766 ring->q_vector = q_vector; in igc_alloc_q_vector()
4769 igc_add_ring(ring, &q_vector->tx); in igc_alloc_q_vector()
4772 ring->count = adapter->tx_ring_count; in igc_alloc_q_vector()
4773 ring->queue_index = txr_idx; in igc_alloc_q_vector()
4776 adapter->tx_ring[txr_idx] = ring; in igc_alloc_q_vector()
4784 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4785 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4788 ring->q_vector = q_vector; in igc_alloc_q_vector()
4791 igc_add_ring(ring, &q_vector->rx); in igc_alloc_q_vector()
4794 ring->count = adapter->rx_ring_count; in igc_alloc_q_vector()
4795 ring->queue_index = rxr_idx; in igc_alloc_q_vector()
4798 adapter->rx_ring[rxr_idx] = ring; in igc_alloc_q_vector()
4805 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4809 * return -ENOMEM.
4813 int rxr_remaining = adapter->num_rx_queues; in igc_alloc_q_vectors()
4814 int txr_remaining = adapter->num_tx_queues; in igc_alloc_q_vectors()
4816 int q_vectors = adapter->num_q_vectors; in igc_alloc_q_vectors()
4828 rxr_remaining--; in igc_alloc_q_vectors()
4834 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4835 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4844 rxr_remaining -= rqpv; in igc_alloc_q_vectors()
4845 txr_remaining -= tqpv; in igc_alloc_q_vectors()
4853 adapter->num_tx_queues = 0; in igc_alloc_q_vectors()
4854 adapter->num_rx_queues = 0; in igc_alloc_q_vectors()
4855 adapter->num_q_vectors = 0; in igc_alloc_q_vectors()
4857 while (v_idx--) in igc_alloc_q_vectors()
4860 return -ENOMEM; in igc_alloc_q_vectors()
4864 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4866 * @msix: boolean for MSI-X capability
4872 struct net_device *dev = adapter->netdev; in igc_init_interrupt_scheme()
4893 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4902 struct net_device *netdev = adapter->netdev; in igc_sw_init()
4903 struct pci_dev *pdev = adapter->pdev; in igc_sw_init()
4904 struct igc_hw *hw = &adapter->hw; in igc_sw_init()
4906 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); in igc_sw_init()
4909 adapter->tx_ring_count = IGC_DEFAULT_TXD; in igc_sw_init()
4910 adapter->rx_ring_count = IGC_DEFAULT_RXD; in igc_sw_init()
4913 adapter->rx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
4914 adapter->tx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
4917 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; in igc_sw_init()
4920 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + in igc_sw_init()
4922 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igc_sw_init()
4924 mutex_init(&adapter->nfc_rule_lock); in igc_sw_init()
4925 INIT_LIST_HEAD(&adapter->nfc_rule_list); in igc_sw_init()
4926 adapter->nfc_rule_count = 0; in igc_sw_init()
4928 spin_lock_init(&adapter->stats64_lock); in igc_sw_init()
4929 spin_lock_init(&adapter->qbv_tx_lock); in igc_sw_init()
4930 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ in igc_sw_init()
4931 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_sw_init()
4938 return -ENOMEM; in igc_sw_init()
4941 /* Explicitly disable IRQ since the NIC can be in any state. */ in igc_sw_init()
4944 set_bit(__IGC_DOWN, &adapter->state); in igc_sw_init()
4952 struct igc_q_vector *q_vector = adapter->q_vector[vector]; in igc_set_queue_napi()
4954 if (q_vector->rx.ring) in igc_set_queue_napi()
4955 netif_queue_set_napi(adapter->netdev, in igc_set_queue_napi()
4956 q_vector->rx.ring->queue_index, in igc_set_queue_napi()
4959 if (q_vector->tx.ring) in igc_set_queue_napi()
4960 netif_queue_set_napi(adapter->netdev, in igc_set_queue_napi()
4961 q_vector->tx.ring->queue_index, in igc_set_queue_napi()
4966 * igc_up - Open the interface and prepare it to handle traffic
4971 struct igc_hw *hw = &adapter->hw; in igc_up()
4978 clear_bit(__IGC_DOWN, &adapter->state); in igc_up()
4980 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_up()
4981 napi = &adapter->q_vector[i]->napi; in igc_up()
4986 if (adapter->msix_entries) in igc_up()
4989 igc_assign_vector(adapter->q_vector[0], 0); in igc_up()
4995 netif_tx_start_all_queues(adapter->netdev); in igc_up()
4998 hw->mac.get_link_status = true; in igc_up()
4999 schedule_work(&adapter->watchdog_task); in igc_up()
5003 * igc_update_stats - Update the board statistics counters
5008 struct rtnl_link_stats64 *net_stats = &adapter->stats64; in igc_update_stats()
5009 struct pci_dev *pdev = adapter->pdev; in igc_update_stats()
5010 struct igc_hw *hw = &adapter->hw; in igc_update_stats()
5020 if (adapter->link_speed == 0) in igc_update_stats()
5029 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_update_stats()
5030 struct igc_ring *ring = adapter->rx_ring[i]; in igc_update_stats()
5033 if (hw->mac.type >= igc_i225) in igc_update_stats()
5037 ring->rx_stats.drops += rqdpc; in igc_update_stats()
5038 net_stats->rx_fifo_errors += rqdpc; in igc_update_stats()
5042 start = u64_stats_fetch_begin(&ring->rx_syncp); in igc_update_stats()
5043 _bytes = ring->rx_stats.bytes; in igc_update_stats()
5044 _packets = ring->rx_stats.packets; in igc_update_stats()
5045 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); in igc_update_stats()
5050 net_stats->rx_bytes = bytes; in igc_update_stats()
5051 net_stats->rx_packets = packets; in igc_update_stats()
5055 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_update_stats()
5056 struct igc_ring *ring = adapter->tx_ring[i]; in igc_update_stats()
5059 start = u64_stats_fetch_begin(&ring->tx_syncp); in igc_update_stats()
5060 _bytes = ring->tx_stats.bytes; in igc_update_stats()
5061 _packets = ring->tx_stats.packets; in igc_update_stats()
5062 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); in igc_update_stats()
5066 net_stats->tx_bytes = bytes; in igc_update_stats()
5067 net_stats->tx_packets = packets; in igc_update_stats()
5071 adapter->stats.crcerrs += rd32(IGC_CRCERRS); in igc_update_stats()
5072 adapter->stats.gprc += rd32(IGC_GPRC); in igc_update_stats()
5073 adapter->stats.gorc += rd32(IGC_GORCL); in igc_update_stats()
5075 adapter->stats.bprc += rd32(IGC_BPRC); in igc_update_stats()
5076 adapter->stats.mprc += rd32(IGC_MPRC); in igc_update_stats()
5077 adapter->stats.roc += rd32(IGC_ROC); in igc_update_stats()
5079 adapter->stats.prc64 += rd32(IGC_PRC64); in igc_update_stats()
5080 adapter->stats.prc127 += rd32(IGC_PRC127); in igc_update_stats()
5081 adapter->stats.prc255 += rd32(IGC_PRC255); in igc_update_stats()
5082 adapter->stats.prc511 += rd32(IGC_PRC511); in igc_update_stats()
5083 adapter->stats.prc1023 += rd32(IGC_PRC1023); in igc_update_stats()
5084 adapter->stats.prc1522 += rd32(IGC_PRC1522); in igc_update_stats()
5085 adapter->stats.tlpic += rd32(IGC_TLPIC); in igc_update_stats()
5086 adapter->stats.rlpic += rd32(IGC_RLPIC); in igc_update_stats()
5087 adapter->stats.hgptc += rd32(IGC_HGPTC); in igc_update_stats()
5090 adapter->stats.mpc += mpc; in igc_update_stats()
5091 net_stats->rx_fifo_errors += mpc; in igc_update_stats()
5092 adapter->stats.scc += rd32(IGC_SCC); in igc_update_stats()
5093 adapter->stats.ecol += rd32(IGC_ECOL); in igc_update_stats()
5094 adapter->stats.mcc += rd32(IGC_MCC); in igc_update_stats()
5095 adapter->stats.latecol += rd32(IGC_LATECOL); in igc_update_stats()
5096 adapter->stats.dc += rd32(IGC_DC); in igc_update_stats()
5097 adapter->stats.rlec += rd32(IGC_RLEC); in igc_update_stats()
5098 adapter->stats.xonrxc += rd32(IGC_XONRXC); in igc_update_stats()
5099 adapter->stats.xontxc += rd32(IGC_XONTXC); in igc_update_stats()
5100 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); in igc_update_stats()
5101 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); in igc_update_stats()
5102 adapter->stats.fcruc += rd32(IGC_FCRUC); in igc_update_stats()
5103 adapter->stats.gptc += rd32(IGC_GPTC); in igc_update_stats()
5104 adapter->stats.gotc += rd32(IGC_GOTCL); in igc_update_stats()
5106 adapter->stats.rnbc += rd32(IGC_RNBC); in igc_update_stats()
5107 adapter->stats.ruc += rd32(IGC_RUC); in igc_update_stats()
5108 adapter->stats.rfc += rd32(IGC_RFC); in igc_update_stats()
5109 adapter->stats.rjc += rd32(IGC_RJC); in igc_update_stats()
5110 adapter->stats.tor += rd32(IGC_TORH); in igc_update_stats()
5111 adapter->stats.tot += rd32(IGC_TOTH); in igc_update_stats()
5112 adapter->stats.tpr += rd32(IGC_TPR); in igc_update_stats()
5114 adapter->stats.ptc64 += rd32(IGC_PTC64); in igc_update_stats()
5115 adapter->stats.ptc127 += rd32(IGC_PTC127); in igc_update_stats()
5116 adapter->stats.ptc255 += rd32(IGC_PTC255); in igc_update_stats()
5117 adapter->stats.ptc511 += rd32(IGC_PTC511); in igc_update_stats()
5118 adapter->stats.ptc1023 += rd32(IGC_PTC1023); in igc_update_stats()
5119 adapter->stats.ptc1522 += rd32(IGC_PTC1522); in igc_update_stats()
5121 adapter->stats.mptc += rd32(IGC_MPTC); in igc_update_stats()
5122 adapter->stats.bptc += rd32(IGC_BPTC); in igc_update_stats()
5124 adapter->stats.tpt += rd32(IGC_TPT); in igc_update_stats()
5125 adapter->stats.colc += rd32(IGC_COLC); in igc_update_stats()
5126 adapter->stats.colc += rd32(IGC_RERC); in igc_update_stats()
5128 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); in igc_update_stats()
5130 adapter->stats.tsctc += rd32(IGC_TSCTC); in igc_update_stats()
5132 adapter->stats.iac += rd32(IGC_IAC); in igc_update_stats()
5135 net_stats->multicast = adapter->stats.mprc; in igc_update_stats()
5136 net_stats->collisions = adapter->stats.colc; in igc_update_stats()
5143 net_stats->rx_errors = adapter->stats.rxerrc + in igc_update_stats()
5144 adapter->stats.crcerrs + adapter->stats.algnerrc + in igc_update_stats()
5145 adapter->stats.ruc + adapter->stats.roc + in igc_update_stats()
5146 adapter->stats.cexterr; in igc_update_stats()
5147 net_stats->rx_length_errors = adapter->stats.ruc + in igc_update_stats()
5148 adapter->stats.roc; in igc_update_stats()
5149 net_stats->rx_crc_errors = adapter->stats.crcerrs; in igc_update_stats()
5150 net_stats->rx_frame_errors = adapter->stats.algnerrc; in igc_update_stats()
5151 net_stats->rx_missed_errors = adapter->stats.mpc; in igc_update_stats()
5154 net_stats->tx_errors = adapter->stats.ecol + in igc_update_stats()
5155 adapter->stats.latecol; in igc_update_stats()
5156 net_stats->tx_aborted_errors = adapter->stats.ecol; in igc_update_stats()
5157 net_stats->tx_window_errors = adapter->stats.latecol; in igc_update_stats()
5158 net_stats->tx_carrier_errors = adapter->stats.tncrs; in igc_update_stats()
5161 net_stats->tx_dropped = adapter->stats.txdrop; in igc_update_stats()
5164 adapter->stats.mgptc += rd32(IGC_MGTPTC); in igc_update_stats()
5165 adapter->stats.mgprc += rd32(IGC_MGTPRC); in igc_update_stats()
5166 adapter->stats.mgpdc += rd32(IGC_MGTPDC); in igc_update_stats()
5170 * igc_down - Close the interface
5175 struct net_device *netdev = adapter->netdev; in igc_down()
5176 struct igc_hw *hw = &adapter->hw; in igc_down()
5180 set_bit(__IGC_DOWN, &adapter->state); in igc_down()
5184 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5185 /* disable receives in the hardware */ in igc_down()
5196 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5197 /* disable transmits in the hardware */ in igc_down()
5208 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_down()
5210 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_down()
5211 if (adapter->q_vector[i]) { in igc_down()
5212 napi_synchronize(&adapter->q_vector[i]->napi); in igc_down()
5214 napi_disable(&adapter->q_vector[i]->napi); in igc_down()
5218 del_timer_sync(&adapter->watchdog_timer); in igc_down()
5219 del_timer_sync(&adapter->phy_info_timer); in igc_down()
5222 spin_lock(&adapter->stats64_lock); in igc_down()
5224 spin_unlock(&adapter->stats64_lock); in igc_down()
5226 adapter->link_speed = 0; in igc_down()
5227 adapter->link_duplex = 0; in igc_down()
5229 if (!pci_channel_offline(adapter->pdev)) in igc_down()
5233 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; in igc_down()
5242 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_reinit_locked()
5246 clear_bit(__IGC_RESETTING, &adapter->state); in igc_reinit_locked()
5257 if (test_bit(__IGC_DOWN, &adapter->state) || in igc_reset_task()
5258 test_bit(__IGC_RESETTING, &adapter->state)) { in igc_reset_task()
5265 netdev_err(adapter->netdev, "Reset adapter\n"); in igc_reset_task()
5271 * igc_change_mtu - Change the Maximum Transfer Unit
5284 return -EINVAL; in igc_change_mtu()
5291 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_change_mtu()
5295 adapter->max_frame_size = max_frame; in igc_change_mtu()
5300 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); in igc_change_mtu()
5301 WRITE_ONCE(netdev->mtu, new_mtu); in igc_change_mtu()
5308 clear_bit(__IGC_RESETTING, &adapter->state); in igc_change_mtu()
5314 * igc_tx_timeout - Respond to a Tx Hang
5322 struct igc_hw *hw = &adapter->hw; in igc_tx_timeout()
5325 adapter->tx_timeout_count++; in igc_tx_timeout()
5326 schedule_work(&adapter->reset_task); in igc_tx_timeout()
5328 (adapter->eims_enable_mask & ~adapter->eims_other)); in igc_tx_timeout()
5332 * igc_get_stats64 - Get System Network Statistics
5344 spin_lock(&adapter->stats64_lock); in igc_get_stats64()
5345 if (!test_bit(__IGC_RESETTING, &adapter->state)) in igc_get_stats64()
5347 memcpy(stats, &adapter->stats64, sizeof(*stats)); in igc_get_stats64()
5348 spin_unlock(&adapter->stats64_lock); in igc_get_stats64()
5355 * enable/disable make sure Tx flag is always in same state as Rx. in igc_fix_features()
5368 netdev_features_t changed = netdev->features ^ features; in igc_set_features()
5381 netdev->features = features; in igc_set_features()
5406 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in igc_features_check()
5416 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in igc_features_check()
5424 struct igc_hw *hw = &adapter->hw; in igc_tsync_interrupt()
5433 if (adapter->ptp_caps.pps) in igc_tsync_interrupt()
5434 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5443 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5444 ts = timespec64_add(adapter->perout[0].start, in igc_tsync_interrupt()
5445 adapter->perout[0].period); in igc_tsync_interrupt()
5451 adapter->perout[0].start = ts; in igc_tsync_interrupt()
5452 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5456 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5457 ts = timespec64_add(adapter->perout[1].start, in igc_tsync_interrupt()
5458 adapter->perout[1].period); in igc_tsync_interrupt()
5464 adapter->perout[1].start = ts; in igc_tsync_interrupt()
5465 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5474 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5483 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5488 * igc_msix_other - msix other interrupt handler
5495 struct igc_hw *hw = &adapter->hw; in igc_msix_other()
5500 schedule_work(&adapter->reset_task); in igc_msix_other()
5504 adapter->stats.doosync++; in igc_msix_other()
5508 hw->mac.get_link_status = true; in igc_msix_other()
5510 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_msix_other()
5511 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_msix_other()
5517 wr32(IGC_EIMS, adapter->eims_other); in igc_msix_other()
5524 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; in igc_write_itr()
5526 if (!q_vector->set_itr) in igc_write_itr()
5534 writel(itr_val, q_vector->itr_register); in igc_write_itr()
5535 q_vector->set_itr = 0; in igc_write_itr()
5545 napi_schedule(&q_vector->napi); in igc_msix_ring()
5551 * igc_request_msix - Initialize MSI-X interrupts
5554 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5559 unsigned int num_q_vectors = adapter->num_q_vectors; in igc_request_msix()
5561 struct net_device *netdev = adapter->netdev; in igc_request_msix()
5563 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5564 &igc_msix_other, 0, netdev->name, adapter); in igc_request_msix()
5570 dev_warn(&adapter->pdev->dev, in igc_request_msix()
5572 adapter->num_q_vectors, MAX_Q_VECTORS); in igc_request_msix()
5575 struct igc_q_vector *q_vector = adapter->q_vector[i]; in igc_request_msix()
5579 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); in igc_request_msix()
5581 if (q_vector->rx.ring && q_vector->tx.ring) in igc_request_msix()
5582 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, in igc_request_msix()
5583 q_vector->rx.ring->queue_index); in igc_request_msix()
5584 else if (q_vector->tx.ring) in igc_request_msix()
5585 sprintf(q_vector->name, "%s-tx-%u", netdev->name, in igc_request_msix()
5586 q_vector->tx.ring->queue_index); in igc_request_msix()
5587 else if (q_vector->rx.ring) in igc_request_msix()
5588 sprintf(q_vector->name, "%s-rx-%u", netdev->name, in igc_request_msix()
5589 q_vector->rx.ring->queue_index); in igc_request_msix()
5591 sprintf(q_vector->name, "%s-unused", netdev->name); in igc_request_msix()
5593 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5594 igc_msix_ring, 0, q_vector->name, in igc_request_msix()
5599 netif_napi_set_irq(&q_vector->napi, in igc_request_msix()
5600 adapter->msix_entries[vector].vector); in igc_request_msix()
5608 free_irq(adapter->msix_entries[free_vector++].vector, adapter); in igc_request_msix()
5610 vector--; in igc_request_msix()
5612 free_irq(adapter->msix_entries[free_vector++].vector, in igc_request_msix()
5613 adapter->q_vector[i]); in igc_request_msix()
5620 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5624 * MSI-X interrupts allocated.
5639 igc_get_phy_info(&adapter->hw); in igc_update_phy_info()
5643 * igc_has_link - check shared code for link and determine up/down
5648 struct igc_hw *hw = &adapter->hw; in igc_has_link()
5656 if (!hw->mac.get_link_status) in igc_has_link()
5658 hw->mac.ops.check_for_link(hw); in igc_has_link()
5659 link_active = !hw->mac.get_link_status; in igc_has_link()
5661 if (hw->mac.type == igc_i225) { in igc_has_link()
5662 if (!netif_carrier_ok(adapter->netdev)) { in igc_has_link()
5663 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5664 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { in igc_has_link()
5665 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5666 adapter->link_check_timeout = jiffies; in igc_has_link()
5674 * igc_watchdog - Timer Call-back
5681 schedule_work(&adapter->watchdog_task); in igc_watchdog()
5689 struct net_device *netdev = adapter->netdev; in igc_watchdog_task()
5690 struct igc_hw *hw = &adapter->hw; in igc_watchdog_task()
5691 struct igc_phy_info *phy = &hw->phy; in igc_watchdog_task()
5698 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { in igc_watchdog_task()
5699 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) in igc_watchdog_task()
5700 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_watchdog_task()
5707 pm_runtime_resume(netdev->dev.parent); in igc_watchdog_task()
5712 hw->mac.ops.get_speed_and_duplex(hw, in igc_watchdog_task()
5713 &adapter->link_speed, in igc_watchdog_task()
5714 &adapter->link_duplex); in igc_watchdog_task()
5720 adapter->link_speed, in igc_watchdog_task()
5721 adapter->link_duplex == FULL_DUPLEX ? in igc_watchdog_task()
5728 /* disable EEE if enabled */ in igc_watchdog_task()
5729 if ((adapter->flags & IGC_FLAG_EEE) && in igc_watchdog_task()
5730 adapter->link_duplex == HALF_DUPLEX) { in igc_watchdog_task()
5732 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); in igc_watchdog_task()
5733 adapter->hw.dev_spec._base.eee_enable = false; in igc_watchdog_task()
5734 adapter->flags &= ~IGC_FLAG_EEE; in igc_watchdog_task()
5739 if (phy->speed_downgraded) in igc_watchdog_task()
5743 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5744 switch (adapter->link_speed) { in igc_watchdog_task()
5746 adapter->tx_timeout_factor = 14; in igc_watchdog_task()
5751 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5757 * based on link-up activity. Write into the register in igc_watchdog_task()
5762 if (adapter->link_speed != SPEED_1000) in igc_watchdog_task()
5772 retry_count--; in igc_watchdog_task()
5778 netdev_err(netdev, "read 1000Base-T Status Reg\n"); in igc_watchdog_task()
5784 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5785 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5790 adapter->link_speed = 0; in igc_watchdog_task()
5791 adapter->link_duplex = 0; in igc_watchdog_task()
5798 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5799 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5802 pm_schedule_suspend(netdev->dev.parent, in igc_watchdog_task()
5807 spin_lock(&adapter->stats64_lock); in igc_watchdog_task()
5809 spin_unlock(&adapter->stats64_lock); in igc_watchdog_task()
5811 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_watchdog_task()
5812 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_watchdog_task()
5820 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { in igc_watchdog_task()
5821 adapter->tx_timeout_count++; in igc_watchdog_task()
5822 schedule_work(&adapter->reset_task); in igc_watchdog_task()
5829 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_watchdog_task()
5833 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_watchdog_task()
5836 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_watchdog_task()
5837 struct igc_q_vector *q_vector = adapter->q_vector[i]; in igc_watchdog_task()
5840 if (!q_vector->rx.ring) in igc_watchdog_task()
5843 rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index]; in igc_watchdog_task()
5845 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igc_watchdog_task()
5846 eics |= q_vector->eims_value; in igc_watchdog_task()
5847 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_watchdog_task()
5853 struct igc_ring *rx_ring = adapter->rx_ring[0]; in igc_watchdog_task()
5855 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igc_watchdog_task()
5856 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_watchdog_task()
5864 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_watchdog_task()
5865 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) in igc_watchdog_task()
5866 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
5869 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
5875 * igc_intr_msi - Interrupt Handler
5882 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr_msi()
5883 struct igc_hw *hw = &adapter->hw; in igc_intr_msi()
5890 schedule_work(&adapter->reset_task); in igc_intr_msi()
5894 adapter->stats.doosync++; in igc_intr_msi()
5898 hw->mac.get_link_status = true; in igc_intr_msi()
5899 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr_msi()
5900 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr_msi()
5906 napi_schedule(&q_vector->napi); in igc_intr_msi()
5912 * igc_intr - Legacy Interrupt Handler
5919 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr()
5920 struct igc_hw *hw = &adapter->hw; in igc_intr()
5921 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No in igc_intr()
5926 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in igc_intr()
5935 schedule_work(&adapter->reset_task); in igc_intr()
5939 adapter->stats.doosync++; in igc_intr()
5943 hw->mac.get_link_status = true; in igc_intr()
5945 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr()
5946 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr()
5952 napi_schedule(&q_vector->napi); in igc_intr()
5959 if (adapter->msix_entries) { in igc_free_irq()
5962 free_irq(adapter->msix_entries[vector++].vector, adapter); in igc_free_irq()
5964 for (i = 0; i < adapter->num_q_vectors; i++) in igc_free_irq()
5965 free_irq(adapter->msix_entries[vector++].vector, in igc_free_irq()
5966 adapter->q_vector[i]); in igc_free_irq()
5968 free_irq(adapter->pdev->irq, adapter); in igc_free_irq()
5973 * igc_request_irq - initialize interrupts
5981 struct net_device *netdev = adapter->netdev; in igc_request_irq()
5982 struct pci_dev *pdev = adapter->pdev; in igc_request_irq()
5985 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_request_irq()
6002 igc_assign_vector(adapter->q_vector[0], 0); in igc_request_irq()
6004 if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_request_irq()
6005 err = request_irq(pdev->irq, &igc_intr_msi, 0, in igc_request_irq()
6006 netdev->name, adapter); in igc_request_irq()
6012 adapter->flags &= ~IGC_FLAG_HAS_MSI; in igc_request_irq()
6015 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, in igc_request_irq()
6016 netdev->name, adapter); in igc_request_irq()
6026 * __igc_open - Called when a network interface is made active
6041 struct pci_dev *pdev = adapter->pdev; in __igc_open()
6042 struct igc_hw *hw = &adapter->hw; in __igc_open()
6049 if (test_bit(__IGC_TESTING, &adapter->state)) { in __igc_open()
6051 return -EBUSY; in __igc_open()
6055 pm_runtime_get_sync(&pdev->dev); in __igc_open()
6077 clear_bit(__IGC_DOWN, &adapter->state); in __igc_open()
6079 for (i = 0; i < adapter->num_q_vectors; i++) { in __igc_open()
6080 napi = &adapter->q_vector[i]->napi; in __igc_open()
6090 pm_runtime_put(&pdev->dev); in __igc_open()
6095 hw->mac.get_link_status = true; in __igc_open()
6096 schedule_work(&adapter->watchdog_task); in __igc_open()
6102 igc_power_down_phy_copper_base(&adapter->hw); in __igc_open()
6109 pm_runtime_put(&pdev->dev); in __igc_open()
6120 err = netif_set_real_num_queues(netdev, adapter->num_tx_queues, in igc_open()
6121 adapter->num_rx_queues); in igc_open()
6131 * __igc_close - Disables a network interface
6137 * The close entry point is called when an interface is de-activated
6145 struct pci_dev *pdev = adapter->pdev; in __igc_close()
6147 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); in __igc_close()
6150 pm_runtime_get_sync(&pdev->dev); in __igc_close()
6162 pm_runtime_put_sync(&pdev->dev); in __igc_close()
6169 if (netif_device_present(netdev) || netdev->dismantle) in igc_close()
6175 * igc_ioctl - Access the hwtstamp interface
6188 return -EOPNOTSUPP; in igc_ioctl()
6197 if (queue < 0 || queue >= adapter->num_tx_queues) in igc_save_launchtime_params()
6198 return -EINVAL; in igc_save_launchtime_params()
6200 ring = adapter->tx_ring[queue]; in igc_save_launchtime_params()
6201 ring->launchtime_enable = enable; in igc_save_launchtime_params()
6219 struct igc_hw *hw = &adapter->hw; in validate_schedule()
6223 if (qopt->cycle_time_extension) in validate_schedule()
6234 if (!is_base_time_past(qopt->base_time, &now) && in validate_schedule()
6238 for (n = 0; n < qopt->num_entries; n++) { in validate_schedule()
6242 prev = n ? &qopt->entries[n - 1] : NULL; in validate_schedule()
6243 e = &qopt->entries[n]; in validate_schedule()
6248 if (e->command != TC_TAPRIO_CMD_SET_GATES) in validate_schedule()
6251 for (i = 0; i < adapter->num_tx_queues; i++) in validate_schedule()
6252 if (e->gate_mask & BIT(i)) { in validate_schedule()
6260 !(prev->gate_mask & BIT(i))) in validate_schedule()
6271 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_launchtime()
6274 if (hw->mac.type != igc_i225) in igc_tsn_enable_launchtime()
6275 return -EOPNOTSUPP; in igc_tsn_enable_launchtime()
6277 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); in igc_tsn_enable_launchtime()
6289 adapter->base_time = 0; in igc_qbv_clear_schedule()
6290 adapter->cycle_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6291 adapter->taprio_offload_enable = false; in igc_qbv_clear_schedule()
6292 adapter->qbv_config_change_errors = 0; in igc_qbv_clear_schedule()
6293 adapter->qbv_count = 0; in igc_qbv_clear_schedule()
6295 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6296 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6298 ring->start_time = 0; in igc_qbv_clear_schedule()
6299 ring->end_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6300 ring->max_sdu = 0; in igc_qbv_clear_schedule()
6303 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6305 adapter->qbv_transition = false; in igc_qbv_clear_schedule()
6307 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6308 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6310 ring->oper_gate_closed = false; in igc_qbv_clear_schedule()
6311 ring->admin_gate_closed = false; in igc_qbv_clear_schedule()
6314 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6332 stats->tx_overruns = 0; in igc_taprio_stats()
6338 struct tc_taprio_qopt_stats *stats = &queue_stats->stats; in igc_taprio_queue_stats()
6343 stats->tx_overruns = 0; in igc_taprio_queue_stats()
6350 struct igc_hw *hw = &adapter->hw; in igc_save_qbv_schedule()
6357 if (qopt->base_time < 0) in igc_save_qbv_schedule()
6358 return -ERANGE; in igc_save_qbv_schedule()
6360 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) in igc_save_qbv_schedule()
6361 return -EALREADY; in igc_save_qbv_schedule()
6364 return -EINVAL; in igc_save_qbv_schedule()
6369 is_base_time_past(qopt->base_time, &now)) in igc_save_qbv_schedule()
6370 adapter->qbv_config_change_errors++; in igc_save_qbv_schedule()
6372 adapter->cycle_time = qopt->cycle_time; in igc_save_qbv_schedule()
6373 adapter->base_time = qopt->base_time; in igc_save_qbv_schedule()
6374 adapter->taprio_offload_enable = true; in igc_save_qbv_schedule()
6376 for (n = 0; n < qopt->num_entries; n++) { in igc_save_qbv_schedule()
6377 struct tc_taprio_sched_entry *e = &qopt->entries[n]; in igc_save_qbv_schedule()
6379 end_time += e->interval; in igc_save_qbv_schedule()
6387 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, in igc_save_qbv_schedule()
6392 if (end_time > adapter->cycle_time || in igc_save_qbv_schedule()
6393 n + 1 == qopt->num_entries) in igc_save_qbv_schedule()
6394 end_time = adapter->cycle_time; in igc_save_qbv_schedule()
6396 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6397 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6399 if (!(e->gate_mask & BIT(i))) in igc_save_qbv_schedule()
6407 ring->start_time = start_time; in igc_save_qbv_schedule()
6408 ring->end_time = end_time; in igc_save_qbv_schedule()
6410 if (ring->start_time >= adapter->cycle_time) in igc_save_qbv_schedule()
6416 start_time += e->interval; in igc_save_qbv_schedule()
6419 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6424 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6425 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6427 if (!is_base_time_past(qopt->base_time, &now)) { in igc_save_qbv_schedule()
6428 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6430 ring->oper_gate_closed = false; in igc_save_qbv_schedule()
6431 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6435 if (!is_base_time_past(qopt->base_time, &now)) in igc_save_qbv_schedule()
6436 ring->admin_gate_closed = true; in igc_save_qbv_schedule()
6438 ring->oper_gate_closed = true; in igc_save_qbv_schedule()
6440 ring->start_time = end_time; in igc_save_qbv_schedule()
6441 ring->end_time = end_time; in igc_save_qbv_schedule()
6445 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6447 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6448 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6449 struct net_device *dev = adapter->netdev; in igc_save_qbv_schedule()
6451 if (qopt->max_sdu[i]) in igc_save_qbv_schedule()
6452 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; in igc_save_qbv_schedule()
6454 ring->max_sdu = 0; in igc_save_qbv_schedule()
6463 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_qbv_scheduling()
6466 if (hw->mac.type != igc_i225) in igc_tsn_enable_qbv_scheduling()
6467 return -EOPNOTSUPP; in igc_tsn_enable_qbv_scheduling()
6469 switch (qopt->cmd) { in igc_tsn_enable_qbv_scheduling()
6477 igc_taprio_stats(adapter->netdev, &qopt->stats); in igc_tsn_enable_qbv_scheduling()
6480 igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); in igc_tsn_enable_qbv_scheduling()
6483 return -EOPNOTSUPP; in igc_tsn_enable_qbv_scheduling()
6497 struct net_device *netdev = adapter->netdev; in igc_save_cbs_params()
6501 /* i225 has two sets of credit-based shaper logic. in igc_save_cbs_params()
6505 return -EINVAL; in igc_save_cbs_params()
6507 ring = adapter->tx_ring[queue]; in igc_save_cbs_params()
6510 if (adapter->tx_ring[i]) in igc_save_cbs_params()
6511 cbs_status[i] = adapter->tx_ring[i]->cbs_enable; in igc_save_cbs_params()
6520 return -EINVAL; in igc_save_cbs_params()
6526 return -EINVAL; in igc_save_cbs_params()
6530 ring->cbs_enable = enable; in igc_save_cbs_params()
6531 ring->idleslope = idleslope; in igc_save_cbs_params()
6532 ring->sendslope = sendslope; in igc_save_cbs_params()
6533 ring->hicredit = hicredit; in igc_save_cbs_params()
6534 ring->locredit = locredit; in igc_save_cbs_params()
6542 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_cbs()
6545 if (hw->mac.type != igc_i225) in igc_tsn_enable_cbs()
6546 return -EOPNOTSUPP; in igc_tsn_enable_cbs()
6548 if (qopt->queue < 0 || qopt->queue > 1) in igc_tsn_enable_cbs()
6549 return -EINVAL; in igc_tsn_enable_cbs()
6551 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, in igc_tsn_enable_cbs()
6552 qopt->idleslope, qopt->sendslope, in igc_tsn_enable_cbs()
6553 qopt->hicredit, qopt->locredit); in igc_tsn_enable_cbs()
6563 struct igc_hw *hw = &adapter->hw; in igc_tc_query_caps()
6565 switch (base->type) { in igc_tc_query_caps()
6567 struct tc_mqprio_caps *caps = base->caps; in igc_tc_query_caps()
6569 caps->validate_queue_counts = true; in igc_tc_query_caps()
6574 struct tc_taprio_caps *caps = base->caps; in igc_tc_query_caps()
6576 caps->broken_mqprio = true; in igc_tc_query_caps()
6578 if (hw->mac.type == igc_i225) { in igc_tc_query_caps()
6579 caps->supports_queue_max_sdu = true; in igc_tc_query_caps()
6580 caps->gate_mask_per_txq = true; in igc_tc_query_caps()
6586 return -EOPNOTSUPP; in igc_tc_query_caps()
6595 adapter->strict_priority_enable = true; in igc_save_mqprio_params()
6596 adapter->num_tc = num_tc; in igc_save_mqprio_params()
6599 adapter->queue_per_tc[i] = offset[i]; in igc_save_mqprio_params()
6605 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_mqprio()
6608 if (hw->mac.type != igc_i225) in igc_tsn_enable_mqprio()
6609 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6611 if (!mqprio->qopt.num_tc) { in igc_tsn_enable_mqprio()
6612 adapter->strict_priority_enable = false; in igc_tsn_enable_mqprio()
6617 if (mqprio->qopt.num_tc != adapter->num_tx_queues) { in igc_tsn_enable_mqprio()
6618 NL_SET_ERR_MSG_FMT_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6620 adapter->num_tx_queues); in igc_tsn_enable_mqprio()
6621 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6625 for (i = 0; i < mqprio->qopt.num_tc; i++) { in igc_tsn_enable_mqprio()
6626 if (mqprio->qopt.count[i] != 1) { in igc_tsn_enable_mqprio()
6627 NL_SET_ERR_MSG_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6629 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6634 if (mqprio->preemptible_tcs) { in igc_tsn_enable_mqprio()
6635 NL_SET_ERR_MSG_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6637 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6640 igc_save_mqprio_params(adapter, mqprio->qopt.num_tc, in igc_tsn_enable_mqprio()
6641 mqprio->qopt.offset); in igc_tsn_enable_mqprio()
6643 mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; in igc_tsn_enable_mqprio()
6654 adapter->tc_setup_type = type; in igc_setup_tc()
6672 return -EOPNOTSUPP; in igc_setup_tc()
6680 switch (bpf->command) { in igc_bpf()
6682 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); in igc_bpf()
6684 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, in igc_bpf()
6685 bpf->xsk.queue_id); in igc_bpf()
6687 return -EOPNOTSUPP; in igc_bpf()
6701 return -ENETDOWN; in igc_xdp_xmit()
6704 return -EINVAL; in igc_xdp_xmit()
6736 struct igc_hw *hw = &adapter->hw; in igc_trigger_rxtxq_interrupt()
6739 eics |= q_vector->eims_value; in igc_trigger_rxtxq_interrupt()
6749 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_xsk_wakeup()
6750 return -ENETDOWN; in igc_xsk_wakeup()
6753 return -ENXIO; in igc_xsk_wakeup()
6755 if (queue_id >= adapter->num_rx_queues) in igc_xsk_wakeup()
6756 return -EINVAL; in igc_xsk_wakeup()
6758 ring = adapter->rx_ring[queue_id]; in igc_xsk_wakeup()
6760 if (!ring->xsk_pool) in igc_xsk_wakeup()
6761 return -ENXIO; in igc_xsk_wakeup()
6763 q_vector = adapter->q_vector[queue_id]; in igc_xsk_wakeup()
6764 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) in igc_xsk_wakeup()
6778 tstamp = hwtstamps->netdev_data; in igc_get_tstamp()
6781 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1); in igc_get_tstamp()
6783 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_get_tstamp()
6811 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); in igc_rd32()
6821 struct net_device *netdev = igc->netdev; in igc_rd32()
6823 hw->hw_addr = NULL; in igc_rd32()
6826 WARN(pci_device_is_present(igc->pdev), in igc_rd32()
6846 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
6858 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) in igc_xdp_rx_hash()
6859 return -ENODATA; in igc_xdp_rx_hash()
6861 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); in igc_xdp_rx_hash()
6862 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; in igc_xdp_rx_hash()
6870 struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev); in igc_xdp_rx_timestamp()
6871 struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; in igc_xdp_rx_timestamp()
6873 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { in igc_xdp_rx_timestamp()
6874 *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_xdp_rx_timestamp()
6879 return -ENODATA; in igc_xdp_rx_timestamp()
6894 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
6896 adapter->qbv_transition = true; in igc_qbv_scheduling_timer()
6897 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_scheduling_timer()
6898 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_qbv_scheduling_timer()
6900 if (tx_ring->admin_gate_closed) { in igc_qbv_scheduling_timer()
6901 tx_ring->admin_gate_closed = false; in igc_qbv_scheduling_timer()
6902 tx_ring->oper_gate_closed = true; in igc_qbv_scheduling_timer()
6904 tx_ring->oper_gate_closed = false; in igc_qbv_scheduling_timer()
6907 adapter->qbv_transition = false; in igc_qbv_scheduling_timer()
6909 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
6915 * igc_probe - Device Initialization Routine
6931 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; in igc_probe()
6938 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in igc_probe()
6940 dev_err(&pdev->dev, in igc_probe()
6951 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); in igc_probe()
6955 err = -ENOMEM; in igc_probe()
6962 SET_NETDEV_DEV(netdev, &pdev->dev); in igc_probe()
6966 adapter->netdev = netdev; in igc_probe()
6967 adapter->pdev = pdev; in igc_probe()
6968 hw = &adapter->hw; in igc_probe()
6969 hw->back = adapter; in igc_probe()
6970 adapter->port_num = hw->bus.func; in igc_probe()
6971 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igc_probe()
6977 err = -EIO; in igc_probe()
6978 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), in igc_probe()
6980 if (!adapter->io_addr) in igc_probe()
6983 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ in igc_probe()
6984 hw->hw_addr = adapter->io_addr; in igc_probe()
6986 netdev->netdev_ops = &igc_netdev_ops; in igc_probe()
6987 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; in igc_probe()
6988 netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops; in igc_probe()
6990 netdev->watchdog_timeo = 5 * HZ; in igc_probe()
6992 netdev->mem_start = pci_resource_start(pdev, 0); in igc_probe()
6993 netdev->mem_end = pci_resource_end(pdev, 0); in igc_probe()
6996 hw->vendor_id = pdev->vendor; in igc_probe()
6997 hw->device_id = pdev->device; in igc_probe()
6998 hw->revision_id = pdev->revision; in igc_probe()
6999 hw->subsystem_vendor_id = pdev->subsystem_vendor; in igc_probe()
7000 hw->subsystem_device_id = pdev->subsystem_device; in igc_probe()
7003 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in igc_probe()
7004 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in igc_probe()
7006 /* Initialize skew-specific constants */ in igc_probe()
7007 err = ei->get_invariants(hw); in igc_probe()
7012 netdev->features |= NETIF_F_SG; in igc_probe()
7013 netdev->features |= NETIF_F_TSO; in igc_probe()
7014 netdev->features |= NETIF_F_TSO6; in igc_probe()
7015 netdev->features |= NETIF_F_TSO_ECN; in igc_probe()
7016 netdev->features |= NETIF_F_RXHASH; in igc_probe()
7017 netdev->features |= NETIF_F_RXCSUM; in igc_probe()
7018 netdev->features |= NETIF_F_HW_CSUM; in igc_probe()
7019 netdev->features |= NETIF_F_SCTP_CRC; in igc_probe()
7020 netdev->features |= NETIF_F_HW_TC; in igc_probe()
7029 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; in igc_probe()
7030 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; in igc_probe()
7038 netdev->hw_features |= NETIF_F_NTUPLE; in igc_probe()
7039 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; in igc_probe()
7040 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in igc_probe()
7041 netdev->hw_features |= netdev->features; in igc_probe()
7043 netdev->features |= NETIF_F_HIGHDMA; in igc_probe()
7045 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in igc_probe()
7046 netdev->mpls_features |= NETIF_F_HW_CSUM; in igc_probe()
7047 netdev->hw_enc_features |= netdev->vlan_features; in igc_probe()
7049 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in igc_probe()
7052 /* MTU range: 68 - 9216 */ in igc_probe()
7053 netdev->min_mtu = ETH_MIN_MTU; in igc_probe()
7054 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; in igc_probe()
7059 hw->mac.ops.reset_hw(hw); in igc_probe()
7062 if (hw->nvm.ops.validate(hw) < 0) { in igc_probe()
7063 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in igc_probe()
7064 err = -EIO; in igc_probe()
7069 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { in igc_probe()
7071 if (hw->mac.ops.read_mac_addr(hw)) in igc_probe()
7072 dev_err(&pdev->dev, "NVM Read Error\n"); in igc_probe()
7075 eth_hw_addr_set(netdev, hw->mac.addr); in igc_probe()
7077 if (!is_valid_ether_addr(netdev->dev_addr)) { in igc_probe()
7078 dev_err(&pdev->dev, "Invalid MAC Address\n"); in igc_probe()
7079 err = -EIO; in igc_probe()
7087 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); in igc_probe()
7088 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); in igc_probe()
7090 INIT_WORK(&adapter->reset_task, igc_reset_task); in igc_probe()
7091 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); in igc_probe()
7093 hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in igc_probe()
7094 adapter->hrtimer.function = &igc_qbv_scheduling_timer; in igc_probe()
7096 /* Initialize link properties that are user-changeable */ in igc_probe()
7097 adapter->fc_autoneg = true; in igc_probe()
7098 hw->phy.autoneg_advertised = 0xaf; in igc_probe()
7100 hw->fc.requested_mode = igc_fc_default; in igc_probe()
7101 hw->fc.current_mode = igc_fc_default; in igc_probe()
7104 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; in igc_probe()
7107 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) in igc_probe()
7108 adapter->wol |= IGC_WUFC_MAG; in igc_probe()
7110 device_set_wakeup_enable(&adapter->pdev->dev, in igc_probe()
7111 adapter->flags & IGC_FLAG_WOL_SUPPORTED); in igc_probe()
7125 strscpy(netdev->name, "eth%d", sizeof(netdev->name)); in igc_probe()
7134 adapter->ei = *ei; in igc_probe()
7138 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); in igc_probe()
7140 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); in igc_probe()
7141 /* Disable EEE for internal PHY devices */ in igc_probe()
7142 hw->dev_spec._base.eee_enable = false; in igc_probe()
7143 adapter->flags &= ~IGC_FLAG_EEE; in igc_probe()
7146 pm_runtime_put_noidle(&pdev->dev); in igc_probe()
7163 iounmap(adapter->io_addr); in igc_probe()
7175 * igc_remove - Device Removal Routine
7180 * Hot-Plug event, or because the driver is going to be removed from
7188 pm_runtime_get_noresume(&pdev->dev); in igc_remove()
7197 set_bit(__IGC_DOWN, &adapter->state); in igc_remove()
7199 del_timer_sync(&adapter->watchdog_timer); in igc_remove()
7200 del_timer_sync(&adapter->phy_info_timer); in igc_remove()
7202 cancel_work_sync(&adapter->reset_task); in igc_remove()
7203 cancel_work_sync(&adapter->watchdog_task); in igc_remove()
7204 hrtimer_cancel(&adapter->hrtimer); in igc_remove()
7216 pci_iounmap(pdev, adapter->io_addr); in igc_remove()
7229 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; in __igc_shutdown()
7230 struct igc_hw *hw = &adapter->hw; in __igc_shutdown()
7253 /* turn on all-multi mode if wake on multicast is enabled */ in __igc_shutdown()
7274 wake = wufc || adapter->en_mng_pt; in __igc_shutdown()
7276 igc_power_down_phy_copper_base(&adapter->hw); in __igc_shutdown()
7301 struct igc_hw *hw = &adapter->hw; in igc_deliver_wake_packet()
7319 /* Ensure reads are 32-bit aligned */ in igc_deliver_wake_packet()
7322 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); in igc_deliver_wake_packet()
7324 skb->protocol = eth_type_trans(skb, netdev); in igc_deliver_wake_packet()
7333 struct igc_hw *hw = &adapter->hw; in __igc_resume()
7341 return -ENODEV; in __igc_resume()
7354 return -ENOMEM; in __igc_resume()
7406 return -EBUSY; in igc_runtime_idle()
7422 * igc_io_error_detected - called when PCI error is detected
7453 * igc_io_slot_reset - called after the PCI bus has been reset.
7456 * Restart the card from scratch, as if from a cold-boot. Implementation
7457 * resembles the first-half of the __igc_resume routine.
7463 struct igc_hw *hw = &adapter->hw; in igc_io_slot_reset()
7467 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); in igc_io_slot_reset()
7478 * so we should re-assign it here. in igc_io_slot_reset()
7480 hw->hw_addr = adapter->io_addr; in igc_io_slot_reset()
7491 * igc_io_resume - called when traffic can start to flow again.
7496 * second-half of the __igc_resume routine.
7542 * igc_reinit_queues - return error
7547 struct net_device *netdev = adapter->netdev; in igc_reinit_queues()
7557 return -ENOMEM; in igc_reinit_queues()
7567 * igc_get_hw_dev - return device
7574 struct igc_adapter *adapter = hw->back; in igc_get_hw_dev()
7576 return adapter->netdev; in igc_get_hw_dev()
7581 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_rx_ring_hw()
7582 u8 idx = ring->reg_idx; in igc_disable_rx_ring_hw()
7599 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_rx_ring()
7603 if (ring->xsk_pool) in igc_enable_rx_ring()
7617 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_tx_ring()
7623 * igc_init_module - Driver Registration Routine
7642 * igc_exit_module - Driver Exit Cleanup Routine