Lines Matching +full:de +full:- +full:skew

1 // SPDX-License-Identifier: GPL-2.0
33 static int debug = -1;
81 struct net_device *dev = adapter->netdev; in igc_reset()
82 struct igc_hw *hw = &adapter->hw; in igc_reset()
83 struct igc_fc_info *fc = &hw->fc; in igc_reset()
95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame in igc_reset()
97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); in igc_reset()
99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ in igc_reset()
100 fc->low_water = fc->high_water - 16; in igc_reset()
101 fc->pause_time = 0xFFFF; in igc_reset()
102 fc->send_xon = 1; in igc_reset()
103 fc->current_mode = fc->requested_mode; in igc_reset()
105 hw->mac.ops.reset_hw(hw); in igc_reset()
107 if (hw->mac.ops.init_hw(hw)) in igc_reset()
110 /* Re-establish EEE setting */ in igc_reset()
113 if (!netif_running(adapter->netdev)) in igc_reset()
114 igc_power_down_phy_copper_base(&adapter->hw); in igc_reset()
119 /* Re-enable PTP, where applicable. */ in igc_reset()
122 /* Re-enable TSN offloading, where applicable. */ in igc_reset()
129 * igc_power_up_link - Power up the phy link
134 igc_reset_phy(&adapter->hw); in igc_power_up_link()
136 igc_power_up_phy_copper(&adapter->hw); in igc_power_up_link()
138 igc_setup_link(&adapter->hw); in igc_power_up_link()
142 * igc_release_hw_control - release control of the h/w to f/w
151 struct igc_hw *hw = &adapter->hw; in igc_release_hw_control()
154 if (!pci_device_is_present(adapter->pdev)) in igc_release_hw_control()
164 * igc_get_hw_control - get control of the h/w from f/w
173 struct igc_hw *hw = &adapter->hw; in igc_get_hw_control()
191 * igc_clean_tx_ring - Free Tx Buffers
196 u16 i = tx_ring->next_to_clean; in igc_clean_tx_ring()
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_ring()
200 while (i != tx_ring->next_to_use) { in igc_clean_tx_ring()
203 switch (tx_buffer->type) { in igc_clean_tx_ring()
208 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_ring()
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
212 dev_kfree_skb_any(tx_buffer->skb); in igc_clean_tx_ring()
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_ring()
221 eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_ring()
229 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
231 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
240 tx_buffer->next_to_watch = NULL; in igc_clean_tx_ring()
245 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
247 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring()
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring()
258 memset(tx_ring->tx_buffer_info, 0, in igc_clean_tx_ring()
259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); in igc_clean_tx_ring()
262 memset(tx_ring->desc, 0, tx_ring->size); in igc_clean_tx_ring()
265 tx_ring->next_to_use = 0; in igc_clean_tx_ring()
266 tx_ring->next_to_clean = 0; in igc_clean_tx_ring()
270 * igc_free_tx_resources - Free Tx Resources per Queue
279 vfree(tx_ring->tx_buffer_info); in igc_free_tx_resources()
280 tx_ring->tx_buffer_info = NULL; in igc_free_tx_resources()
283 if (!tx_ring->desc) in igc_free_tx_resources()
286 dma_free_coherent(tx_ring->dev, tx_ring->size, in igc_free_tx_resources()
287 tx_ring->desc, tx_ring->dma); in igc_free_tx_resources()
289 tx_ring->desc = NULL; in igc_free_tx_resources()
293 * igc_free_all_tx_resources - Free Tx Resources for All Queues
302 for (i = 0; i < adapter->num_tx_queues; i++) in igc_free_all_tx_resources()
303 igc_free_tx_resources(adapter->tx_ring[i]); in igc_free_all_tx_resources()
307 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
314 for (i = 0; i < adapter->num_tx_queues; i++) in igc_clean_all_tx_rings()
315 if (adapter->tx_ring[i]) in igc_clean_all_tx_rings()
316 igc_clean_tx_ring(adapter->tx_ring[i]); in igc_clean_all_tx_rings()
321 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_tx_ring_hw()
322 u8 idx = ring->reg_idx; in igc_disable_tx_ring_hw()
332 * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
339 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_disable_all_tx_rings_hw()
340 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_disable_all_tx_rings_hw()
347 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
354 struct net_device *ndev = tx_ring->netdev; in igc_setup_tx_resources()
355 struct device *dev = tx_ring->dev; in igc_setup_tx_resources()
358 size = sizeof(struct igc_tx_buffer) * tx_ring->count; in igc_setup_tx_resources()
359 tx_ring->tx_buffer_info = vzalloc(size); in igc_setup_tx_resources()
360 if (!tx_ring->tx_buffer_info) in igc_setup_tx_resources()
364 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); in igc_setup_tx_resources()
365 tx_ring->size = ALIGN(tx_ring->size, 4096); in igc_setup_tx_resources()
367 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igc_setup_tx_resources()
368 &tx_ring->dma, GFP_KERNEL); in igc_setup_tx_resources()
370 if (!tx_ring->desc) in igc_setup_tx_resources()
373 tx_ring->next_to_use = 0; in igc_setup_tx_resources()
374 tx_ring->next_to_clean = 0; in igc_setup_tx_resources()
379 vfree(tx_ring->tx_buffer_info); in igc_setup_tx_resources()
381 return -ENOMEM; in igc_setup_tx_resources()
385 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
392 struct net_device *dev = adapter->netdev; in igc_setup_all_tx_resources()
395 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_setup_all_tx_resources()
396 err = igc_setup_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
399 for (i--; i >= 0; i--) in igc_setup_all_tx_resources()
400 igc_free_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
410 u16 i = rx_ring->next_to_clean; in igc_clean_rx_ring_page_shared()
412 dev_kfree_skb(rx_ring->skb); in igc_clean_rx_ring_page_shared()
413 rx_ring->skb = NULL; in igc_clean_rx_ring_page_shared()
416 while (i != rx_ring->next_to_alloc) { in igc_clean_rx_ring_page_shared()
417 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igc_clean_rx_ring_page_shared()
422 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_clean_rx_ring_page_shared()
423 buffer_info->dma, in igc_clean_rx_ring_page_shared()
424 buffer_info->page_offset, in igc_clean_rx_ring_page_shared()
429 dma_unmap_page_attrs(rx_ring->dev, in igc_clean_rx_ring_page_shared()
430 buffer_info->dma, in igc_clean_rx_ring_page_shared()
434 __page_frag_cache_drain(buffer_info->page, in igc_clean_rx_ring_page_shared()
435 buffer_info->pagecnt_bias); in igc_clean_rx_ring_page_shared()
438 if (i == rx_ring->count) in igc_clean_rx_ring_page_shared()
448 for (i = 0; i < ring->count; i++) { in igc_clean_rx_ring_xsk_pool()
449 bi = &ring->rx_buffer_info[i]; in igc_clean_rx_ring_xsk_pool()
450 if (!bi->xdp) in igc_clean_rx_ring_xsk_pool()
453 xsk_buff_free(bi->xdp); in igc_clean_rx_ring_xsk_pool()
454 bi->xdp = NULL; in igc_clean_rx_ring_xsk_pool()
459 * igc_clean_rx_ring - Free Rx Buffers per Queue
464 if (ring->xsk_pool) in igc_clean_rx_ring()
471 ring->next_to_alloc = 0; in igc_clean_rx_ring()
472 ring->next_to_clean = 0; in igc_clean_rx_ring()
473 ring->next_to_use = 0; in igc_clean_rx_ring()
477 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
484 for (i = 0; i < adapter->num_rx_queues; i++) in igc_clean_all_rx_rings()
485 if (adapter->rx_ring[i]) in igc_clean_all_rx_rings()
486 igc_clean_rx_ring(adapter->rx_ring[i]); in igc_clean_all_rx_rings()
490 * igc_free_rx_resources - Free Rx Resources
499 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_free_rx_resources()
501 vfree(rx_ring->rx_buffer_info); in igc_free_rx_resources()
502 rx_ring->rx_buffer_info = NULL; in igc_free_rx_resources()
505 if (!rx_ring->desc) in igc_free_rx_resources()
508 dma_free_coherent(rx_ring->dev, rx_ring->size, in igc_free_rx_resources()
509 rx_ring->desc, rx_ring->dma); in igc_free_rx_resources()
511 rx_ring->desc = NULL; in igc_free_rx_resources()
515 * igc_free_all_rx_resources - Free Rx Resources for All Queues
524 for (i = 0; i < adapter->num_rx_queues; i++) in igc_free_all_rx_resources()
525 igc_free_rx_resources(adapter->rx_ring[i]); in igc_free_all_rx_resources()
529 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
536 struct net_device *ndev = rx_ring->netdev; in igc_setup_rx_resources()
537 struct device *dev = rx_ring->dev; in igc_setup_rx_resources()
538 u8 index = rx_ring->queue_index; in igc_setup_rx_resources()
541 /* XDP RX-queue info */ in igc_setup_rx_resources()
542 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in igc_setup_rx_resources()
543 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
544 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, in igc_setup_rx_resources()
545 rx_ring->q_vector->napi.napi_id); in igc_setup_rx_resources()
552 size = sizeof(struct igc_rx_buffer) * rx_ring->count; in igc_setup_rx_resources()
553 rx_ring->rx_buffer_info = vzalloc(size); in igc_setup_rx_resources()
554 if (!rx_ring->rx_buffer_info) in igc_setup_rx_resources()
560 rx_ring->size = rx_ring->count * desc_len; in igc_setup_rx_resources()
561 rx_ring->size = ALIGN(rx_ring->size, 4096); in igc_setup_rx_resources()
563 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in igc_setup_rx_resources()
564 &rx_ring->dma, GFP_KERNEL); in igc_setup_rx_resources()
566 if (!rx_ring->desc) in igc_setup_rx_resources()
569 rx_ring->next_to_alloc = 0; in igc_setup_rx_resources()
570 rx_ring->next_to_clean = 0; in igc_setup_rx_resources()
571 rx_ring->next_to_use = 0; in igc_setup_rx_resources()
576 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
577 vfree(rx_ring->rx_buffer_info); in igc_setup_rx_resources()
578 rx_ring->rx_buffer_info = NULL; in igc_setup_rx_resources()
580 return -ENOMEM; in igc_setup_rx_resources()
584 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
592 struct net_device *dev = adapter->netdev; in igc_setup_all_rx_resources()
595 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_setup_all_rx_resources()
596 err = igc_setup_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
599 for (i--; i >= 0; i--) in igc_setup_all_rx_resources()
600 igc_free_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
612 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) in igc_get_xsk_pool()
615 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); in igc_get_xsk_pool()
619 * igc_configure_rx_ring - Configure a receive ring after Reset
628 struct igc_hw *hw = &adapter->hw; in igc_configure_rx_ring()
630 int reg_idx = ring->reg_idx; in igc_configure_rx_ring()
632 u64 rdba = ring->dma; in igc_configure_rx_ring()
635 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in igc_configure_rx_ring()
636 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring()
637 if (ring->xsk_pool) { in igc_configure_rx_ring()
638 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
641 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring()
643 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
659 ring->count * sizeof(union igc_adv_rx_desc)); in igc_configure_rx_ring()
662 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); in igc_configure_rx_ring()
664 writel(0, ring->tail); in igc_configure_rx_ring()
666 /* reset next-to- use/clean to place SW in sync with hardware */ in igc_configure_rx_ring()
667 ring->next_to_clean = 0; in igc_configure_rx_ring()
668 ring->next_to_use = 0; in igc_configure_rx_ring()
670 if (ring->xsk_pool) in igc_configure_rx_ring()
671 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring()
691 memset(ring->rx_buffer_info, 0, in igc_configure_rx_ring()
692 sizeof(struct igc_rx_buffer) * ring->count); in igc_configure_rx_ring()
696 rx_desc->wb.upper.length = 0; in igc_configure_rx_ring()
705 * igc_configure_rx - Configure receive Unit after Reset
717 for (i = 0; i < adapter->num_rx_queues; i++) in igc_configure_rx()
718 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); in igc_configure_rx()
722 * igc_configure_tx_ring - Configure transmit ring after Reset
731 struct igc_hw *hw = &adapter->hw; in igc_configure_tx_ring()
732 int reg_idx = ring->reg_idx; in igc_configure_tx_ring()
733 u64 tdba = ring->dma; in igc_configure_tx_ring()
736 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring()
743 ring->count * sizeof(union igc_adv_tx_desc)); in igc_configure_tx_ring()
748 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); in igc_configure_tx_ring()
750 writel(0, ring->tail); in igc_configure_tx_ring()
759 * igc_configure_tx - Configure transmit Unit after Reset
768 for (i = 0; i < adapter->num_tx_queues; i++) in igc_configure_tx()
769 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); in igc_configure_tx()
773 * igc_setup_mrqc - configure the multiple receive queue control registers
778 struct igc_hw *hw = &adapter->hw; in igc_setup_mrqc()
787 num_rx_queues = adapter->rss_queues; in igc_setup_mrqc()
789 if (adapter->rss_indir_tbl_init != num_rx_queues) { in igc_setup_mrqc()
791 adapter->rss_indir_tbl[j] = in igc_setup_mrqc()
793 adapter->rss_indir_tbl_init = num_rx_queues; in igc_setup_mrqc()
819 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) in igc_setup_mrqc()
821 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) in igc_setup_mrqc()
830 * igc_setup_rctl - configure the receive control registers
835 struct igc_hw *hw = &adapter->hw; in igc_setup_rctl()
844 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); in igc_setup_rctl()
857 /* disable queue 0 to prevent tail write w/o re-config */ in igc_setup_rctl()
861 if (adapter->netdev->features & NETIF_F_RXALL) { in igc_setup_rctl()
877 * igc_setup_tctl - configure the transmit control registers
882 struct igc_hw *hw = &adapter->hw; in igc_setup_tctl()
901 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
906 * @queue: If non-negative, queue assignment feature is enabled and frames
914 struct net_device *dev = adapter->netdev; in igc_set_mac_filter_hw()
915 struct igc_hw *hw = &adapter->hw; in igc_set_mac_filter_hw()
918 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_set_mac_filter_hw()
944 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
950 struct net_device *dev = adapter->netdev; in igc_clear_mac_filter_hw()
951 struct igc_hw *hw = &adapter->hw; in igc_clear_mac_filter_hw()
953 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_clear_mac_filter_hw()
965 struct net_device *dev = adapter->netdev; in igc_set_default_mac_filter()
966 u8 *addr = adapter->hw.mac.addr; in igc_set_default_mac_filter()
970 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_set_default_mac_filter()
974 * igc_set_mac - Change the Ethernet Address of the NIC
983 struct igc_hw *hw = &adapter->hw; in igc_set_mac()
986 if (!is_valid_ether_addr(addr->sa_data)) in igc_set_mac()
987 return -EADDRNOTAVAIL; in igc_set_mac()
989 eth_hw_addr_set(netdev, addr->sa_data); in igc_set_mac()
990 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in igc_set_mac()
999 * igc_write_mc_addr_list - write multicast addresses to MTA
1003 * Returns: -ENOMEM on failure
1010 struct igc_hw *hw = &adapter->hw; in igc_write_mc_addr_list()
1023 return -ENOMEM; in igc_write_mc_addr_list()
1028 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in igc_write_mc_addr_list()
1039 struct igc_adapter *adapter = netdev_priv(ring->netdev); in igc_tx_launchtime()
1040 ktime_t cycle_time = adapter->cycle_time; in igc_tx_launchtime()
1041 ktime_t base_time = adapter->base_time; in igc_tx_launchtime()
1053 if (baset_est != ring->last_ff_cycle) { in igc_tx_launchtime()
1055 ring->last_ff_cycle = baset_est; in igc_tx_launchtime()
1057 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) in igc_tx_launchtime()
1068 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", in igc_tx_launchtime()
1071 ring->last_tx_cycle = end_of_cycle; in igc_tx_launchtime()
1091 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_init_empty_frame()
1092 if (dma_mapping_error(ring->dev, dma)) { in igc_init_empty_frame()
1094 netdev_name(ring->netdev)); in igc_init_empty_frame()
1095 return -ENOMEM; in igc_init_empty_frame()
1098 buffer->type = IGC_TX_BUFFER_TYPE_SKB; in igc_init_empty_frame()
1099 buffer->skb = skb; in igc_init_empty_frame()
1100 buffer->protocol = 0; in igc_init_empty_frame()
1101 buffer->bytecount = skb->len; in igc_init_empty_frame()
1102 buffer->gso_segs = 1; in igc_init_empty_frame()
1103 buffer->time_stamp = jiffies; in igc_init_empty_frame()
1104 dma_unmap_len_set(buffer, len, skb->len); in igc_init_empty_frame()
1119 first->bytecount; in igc_init_tx_empty_descriptor()
1120 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_init_tx_empty_descriptor()
1122 desc = IGC_TX_DESC(ring, ring->next_to_use); in igc_init_tx_empty_descriptor()
1123 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_init_tx_empty_descriptor()
1124 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_init_tx_empty_descriptor()
1125 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); in igc_init_tx_empty_descriptor()
1127 netdev_tx_sent_queue(txring_txq(ring), skb->len); in igc_init_tx_empty_descriptor()
1129 first->next_to_watch = desc; in igc_init_tx_empty_descriptor()
1131 ring->next_to_use++; in igc_init_tx_empty_descriptor()
1132 if (ring->next_to_use == ring->count) in igc_init_tx_empty_descriptor()
1133 ring->next_to_use = 0; in igc_init_tx_empty_descriptor()
1144 u16 i = tx_ring->next_to_use; in igc_tx_ctxtdesc()
1149 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igc_tx_ctxtdesc()
1155 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igc_tx_ctxtdesc()
1156 mss_l4len_idx |= tx_ring->reg_idx << 4; in igc_tx_ctxtdesc()
1161 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in igc_tx_ctxtdesc()
1162 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in igc_tx_ctxtdesc()
1163 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in igc_tx_ctxtdesc()
1164 context_desc->launch_time = launch_time; in igc_tx_ctxtdesc()
1170 struct sk_buff *skb = first->skb; in igc_tx_csum()
1174 if (skb->ip_summed != CHECKSUM_PARTIAL) { in igc_tx_csum()
1176 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && in igc_tx_csum()
1177 !tx_ring->launchtime_enable) in igc_tx_csum()
1182 switch (skb->csum_offset) { in igc_tx_csum()
1201 first->tx_flags |= IGC_TX_FLAGS_CSUM; in igc_tx_csum()
1202 vlan_macip_lens = skb_checksum_start_offset(skb) - in igc_tx_csum()
1206 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tx_csum()
1214 struct net_device *netdev = tx_ring->netdev; in __igc_maybe_stop_tx()
1216 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1225 return -EBUSY; in __igc_maybe_stop_tx()
1228 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1230 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1231 tx_ring->tx_stats.restart_queue2++; in __igc_maybe_stop_tx()
1232 u64_stats_update_end(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1280 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); in igc_tx_cmd_type()
1303 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_tx_olinfo_status()
1310 struct sk_buff *skb = first->skb; in igc_tx_map()
1313 u32 tx_flags = first->tx_flags; in igc_tx_map()
1315 u16 i = tx_ring->next_to_use; in igc_tx_map()
1323 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igc_tx_map()
1326 data_len = skb->data_len; in igc_tx_map()
1328 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_tx_map()
1332 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in igc_tx_map()
1333 if (dma_mapping_error(tx_ring->dev, dma)) in igc_tx_map()
1340 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1343 tx_desc->read.cmd_type_len = in igc_tx_map()
1348 if (i == tx_ring->count) { in igc_tx_map()
1352 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1355 size -= IGC_MAX_DATA_PER_TXD; in igc_tx_map()
1357 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1363 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); in igc_tx_map()
1367 if (i == tx_ring->count) { in igc_tx_map()
1371 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1374 data_len -= size; in igc_tx_map()
1376 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igc_tx_map()
1379 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1384 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_tx_map()
1386 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igc_tx_map()
1389 first->time_stamp = jiffies; in igc_tx_map()
1394 * are new descriptors to fetch. (Only applicable for weak-ordered in igc_tx_map()
1395 * memory model archs, such as IA-64). in igc_tx_map()
1403 first->next_to_watch = tx_desc; in igc_tx_map()
1406 if (i == tx_ring->count) in igc_tx_map()
1409 tx_ring->next_to_use = i; in igc_tx_map()
1415 writel(i, tx_ring->tail); in igc_tx_map()
1420 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); in igc_tx_map()
1421 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1426 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1428 if (i-- == 0) in igc_tx_map()
1429 i += tx_ring->count; in igc_tx_map()
1430 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1434 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1436 dev_kfree_skb_any(tx_buffer->skb); in igc_tx_map()
1437 tx_buffer->skb = NULL; in igc_tx_map()
1439 tx_ring->next_to_use = i; in igc_tx_map()
1441 return -1; in igc_tx_map()
1450 struct sk_buff *skb = first->skb; in igc_tso()
1464 if (skb->ip_summed != CHECKSUM_PARTIAL) in igc_tso()
1481 if (ip.v4->version == 4) { in igc_tso()
1483 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in igc_tso()
1488 ip.v4->check = csum_fold(csum_partial(trans_start, in igc_tso()
1489 csum_start - trans_start, in igc_tso()
1493 ip.v4->tot_len = 0; in igc_tso()
1494 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1498 ip.v6->payload_len = 0; in igc_tso()
1499 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1504 l4_offset = l4.hdr - skb->data; in igc_tso()
1507 paylen = skb->len - l4_offset; in igc_tso()
1510 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in igc_tso()
1511 csum_replace_by_diff(&l4.tcp->check, in igc_tso()
1516 csum_replace_by_diff(&l4.udp->check, in igc_tso()
1521 first->gso_segs = skb_shinfo(skb)->gso_segs; in igc_tso()
1522 first->bytecount += (first->gso_segs - 1) * *hdr_len; in igc_tso()
1525 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; in igc_tso()
1526 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; in igc_tso()
1529 vlan_macip_lens = l4.hdr - ip.hdr; in igc_tso()
1530 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; in igc_tso()
1531 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tso()
1544 struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; in igc_request_tx_tstamp()
1546 if (tstamp->skb) in igc_request_tx_tstamp()
1549 tstamp->skb = skb_get(skb); in igc_request_tx_tstamp()
1550 tstamp->start = jiffies; in igc_request_tx_tstamp()
1551 *flags = tstamp->flags; in igc_request_tx_tstamp()
1566 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_insert_empty_frame()
1570 netdev_name(tx_ring->netdev)); in igc_insert_empty_frame()
1571 return -ENOMEM; in igc_insert_empty_frame()
1596 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); in igc_xmit_frame_ring()
1615 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in igc_xmit_frame_ring()
1617 &skb_shinfo(skb)->frags[f])); in igc_xmit_frame_ring()
1624 if (!tx_ring->launchtime_enable) in igc_xmit_frame_ring()
1627 txtime = skb->tstamp; in igc_xmit_frame_ring()
1628 skb->tstamp = ktime_set(0, 0); in igc_xmit_frame_ring()
1646 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xmit_frame_ring()
1647 first->type = IGC_TX_BUFFER_TYPE_SKB; in igc_xmit_frame_ring()
1648 first->skb = skb; in igc_xmit_frame_ring()
1649 first->bytecount = skb->len; in igc_xmit_frame_ring()
1650 first->gso_segs = 1; in igc_xmit_frame_ring()
1652 if (adapter->qbv_transition || tx_ring->oper_gate_closed) in igc_xmit_frame_ring()
1655 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { in igc_xmit_frame_ring()
1656 adapter->stats.txdrop++; in igc_xmit_frame_ring()
1660 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && in igc_xmit_frame_ring()
1661 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in igc_xmit_frame_ring()
1665 spin_lock_irqsave(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1667 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in igc_xmit_frame_ring()
1669 if (skb->sk && in igc_xmit_frame_ring()
1670 READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC) in igc_xmit_frame_ring()
1673 adapter->tx_hwtstamp_skipped++; in igc_xmit_frame_ring()
1676 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1685 first->tx_flags = tx_flags; in igc_xmit_frame_ring()
1686 first->protocol = protocol; in igc_xmit_frame_ring()
1691 if (tx_ring->preemptible && skb->len < ETH_ZLEN) { in igc_xmit_frame_ring()
1694 skb_put(skb, ETH_ZLEN - skb->len); in igc_xmit_frame_ring()
1708 dev_kfree_skb_any(first->skb); in igc_xmit_frame_ring()
1709 first->skb = NULL; in igc_xmit_frame_ring()
1717 unsigned int r_idx = skb->queue_mapping; in igc_tx_queue_mapping()
1719 if (r_idx >= adapter->num_tx_queues) in igc_tx_queue_mapping()
1720 r_idx = r_idx % adapter->num_tx_queues; in igc_tx_queue_mapping()
1722 return adapter->tx_ring[r_idx]; in igc_tx_queue_mapping()
1733 if (skb->len < 17) { in igc_xmit_frame()
1736 skb->len = 17; in igc_xmit_frame()
1753 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in igc_rx_checksum()
1764 if (!(skb->len == 60 && in igc_rx_checksum()
1765 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { in igc_rx_checksum()
1766 u64_stats_update_begin(&ring->rx_syncp); in igc_rx_checksum()
1767 ring->rx_stats.csum_err++; in igc_rx_checksum()
1768 u64_stats_update_end(&ring->rx_syncp); in igc_rx_checksum()
1776 skb->ip_summed = CHECKSUM_UNNECESSARY; in igc_rx_checksum()
1778 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", in igc_rx_checksum()
1779 le32_to_cpu(rx_desc->wb.upper.status_error)); in igc_rx_checksum()
1795 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1806 if (ring->netdev->features & NETIF_F_RXHASH) { in igc_rx_hash()
1807 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); in igc_rx_hash()
1818 struct net_device *dev = rx_ring->netdev; in igc_rx_vlan()
1821 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && in igc_rx_vlan()
1824 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) in igc_rx_vlan()
1825 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); in igc_rx_vlan()
1827 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in igc_rx_vlan()
1834 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1853 skb_record_rx_queue(skb, rx_ring->queue_index); in igc_process_skb_fields()
1855 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in igc_process_skb_fields()
1862 struct igc_hw *hw = &adapter->hw; in igc_vlan_mode()
1879 igc_vlan_mode(adapter->netdev, adapter->netdev->features); in igc_restore_vlan()
1888 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in igc_get_rx_buffer()
1891 page_count(rx_buffer->page); in igc_get_rx_buffer()
1895 prefetchw(rx_buffer->page); in igc_get_rx_buffer()
1898 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_get_rx_buffer()
1899 rx_buffer->dma, in igc_get_rx_buffer()
1900 rx_buffer->page_offset, in igc_get_rx_buffer()
1904 rx_buffer->pagecnt_bias--; in igc_get_rx_buffer()
1913 buffer->page_offset ^= truesize; in igc_rx_buffer_flip()
1915 buffer->page_offset += truesize; in igc_rx_buffer_flip()
1936 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1942 * This function will add the data contained in rx_buffer->page to the skb.
1958 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in igc_add_rx_frag()
1959 rx_buffer->page_offset, size, truesize); in igc_add_rx_frag()
1968 unsigned int size = xdp->data_end - xdp->data; in igc_build_skb()
1970 unsigned int metasize = xdp->data - xdp->data_meta; in igc_build_skb()
1974 net_prefetch(xdp->data_meta); in igc_build_skb()
1977 skb = napi_build_skb(xdp->data_hard_start, truesize); in igc_build_skb()
1982 skb_reserve(skb, xdp->data - xdp->data_hard_start); in igc_build_skb()
1995 struct xdp_buff *xdp = &ctx->xdp; in igc_construct_skb()
1996 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb()
1997 unsigned int size = xdp->data_end - xdp->data; in igc_construct_skb()
1999 void *va = xdp->data; in igc_construct_skb()
2004 net_prefetch(xdp->data_meta); in igc_construct_skb()
2007 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in igc_construct_skb()
2012 if (ctx->rx_ts) { in igc_construct_skb()
2013 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; in igc_construct_skb()
2014 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; in igc_construct_skb()
2020 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); in igc_construct_skb()
2023 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, in igc_construct_skb()
2032 size -= headlen; in igc_construct_skb()
2034 skb_add_rx_frag(skb, 0, rx_buffer->page, in igc_construct_skb()
2035 (va + headlen) - page_address(rx_buffer->page), in igc_construct_skb()
2039 rx_buffer->pagecnt_bias++; in igc_construct_skb()
2046 * igc_reuse_rx_page - page flip buffer and store it back on the ring
2055 u16 nta = rx_ring->next_to_alloc; in igc_reuse_rx_page()
2058 new_buff = &rx_ring->rx_buffer_info[nta]; in igc_reuse_rx_page()
2062 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in igc_reuse_rx_page()
2068 new_buff->dma = old_buff->dma; in igc_reuse_rx_page()
2069 new_buff->page = old_buff->page; in igc_reuse_rx_page()
2070 new_buff->page_offset = old_buff->page_offset; in igc_reuse_rx_page()
2071 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igc_reuse_rx_page()
2077 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igc_can_reuse_rx_page()
2078 struct page *page = rx_buffer->page; in igc_can_reuse_rx_page()
2080 /* avoid re-using remote and pfmemalloc pages */ in igc_can_reuse_rx_page()
2086 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in igc_can_reuse_rx_page()
2090 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) in igc_can_reuse_rx_page()
2092 if (rx_buffer->page_offset > IGC_LAST_OFFSET) in igc_can_reuse_rx_page()
2101 page_ref_add(page, USHRT_MAX - 1); in igc_can_reuse_rx_page()
2102 rx_buffer->pagecnt_bias = USHRT_MAX; in igc_can_reuse_rx_page()
2109 * igc_is_non_eop - process handling of non-EOP buffers
2116 * that this is in fact a non-EOP buffer.
2121 u32 ntc = rx_ring->next_to_clean + 1; in igc_is_non_eop()
2124 ntc = (ntc < rx_ring->count) ? ntc : 0; in igc_is_non_eop()
2125 rx_ring->next_to_clean = ntc; in igc_is_non_eop()
2136 * igc_cleanup_headers - Correct corrupted or empty headers
2154 struct net_device *netdev = rx_ring->netdev; in igc_cleanup_headers()
2156 if (!(netdev->features & NETIF_F_RXALL)) { in igc_cleanup_headers()
2180 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in igc_put_rx_buffer()
2183 __page_frag_cache_drain(rx_buffer->page, in igc_put_rx_buffer()
2184 rx_buffer->pagecnt_bias); in igc_put_rx_buffer()
2188 rx_buffer->page = NULL; in igc_put_rx_buffer()
2193 struct igc_adapter *adapter = rx_ring->q_vector->adapter; in igc_rx_offset()
2206 struct page *page = bi->page; in igc_alloc_mapped_page()
2216 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2217 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_alloc_mapped_page()
2222 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in igc_alloc_mapped_page()
2230 if (dma_mapping_error(rx_ring->dev, dma)) { in igc_alloc_mapped_page()
2233 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2234 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_alloc_mapped_page()
2238 bi->dma = dma; in igc_alloc_mapped_page()
2239 bi->page = page; in igc_alloc_mapped_page()
2240 bi->page_offset = igc_rx_offset(rx_ring); in igc_alloc_mapped_page()
2241 page_ref_add(page, USHRT_MAX - 1); in igc_alloc_mapped_page()
2242 bi->pagecnt_bias = USHRT_MAX; in igc_alloc_mapped_page()
2248 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2255 u16 i = rx_ring->next_to_use; in igc_alloc_rx_buffers()
2264 bi = &rx_ring->rx_buffer_info[i]; in igc_alloc_rx_buffers()
2265 i -= rx_ring->count; in igc_alloc_rx_buffers()
2274 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in igc_alloc_rx_buffers()
2275 bi->page_offset, bufsz, in igc_alloc_rx_buffers()
2279 * because each write-back erases this info. in igc_alloc_rx_buffers()
2281 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in igc_alloc_rx_buffers()
2288 bi = rx_ring->rx_buffer_info; in igc_alloc_rx_buffers()
2289 i -= rx_ring->count; in igc_alloc_rx_buffers()
2293 rx_desc->wb.upper.length = 0; in igc_alloc_rx_buffers()
2295 cleaned_count--; in igc_alloc_rx_buffers()
2298 i += rx_ring->count; in igc_alloc_rx_buffers()
2300 if (rx_ring->next_to_use != i) { in igc_alloc_rx_buffers()
2302 rx_ring->next_to_use = i; in igc_alloc_rx_buffers()
2305 rx_ring->next_to_alloc = i; in igc_alloc_rx_buffers()
2309 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers()
2310 * such as IA-64). in igc_alloc_rx_buffers()
2313 writel(i, rx_ring->tail); in igc_alloc_rx_buffers()
2320 u16 i = ring->next_to_use; in igc_alloc_rx_buffers_zc()
2331 bi = &ring->rx_buffer_info[i]; in igc_alloc_rx_buffers_zc()
2332 i -= ring->count; in igc_alloc_rx_buffers_zc()
2335 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc()
2336 if (!bi->xdp) { in igc_alloc_rx_buffers_zc()
2341 dma = xsk_buff_xdp_get_dma(bi->xdp); in igc_alloc_rx_buffers_zc()
2342 desc->read.pkt_addr = cpu_to_le64(dma); in igc_alloc_rx_buffers_zc()
2349 bi = ring->rx_buffer_info; in igc_alloc_rx_buffers_zc()
2350 i -= ring->count; in igc_alloc_rx_buffers_zc()
2354 desc->wb.upper.length = 0; in igc_alloc_rx_buffers_zc()
2356 count--; in igc_alloc_rx_buffers_zc()
2359 i += ring->count; in igc_alloc_rx_buffers_zc()
2361 if (ring->next_to_use != i) { in igc_alloc_rx_buffers_zc()
2362 ring->next_to_use = i; in igc_alloc_rx_buffers_zc()
2366 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers_zc()
2367 * such as IA-64). in igc_alloc_rx_buffers_zc()
2370 writel(i, ring->tail); in igc_alloc_rx_buffers_zc()
2381 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in igc_xdp_init_tx_descriptor()
2382 u16 count, index = ring->next_to_use; in igc_xdp_init_tx_descriptor()
2383 struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2386 u32 olinfo_status, len = xdpf->len, cmd_type; in igc_xdp_init_tx_descriptor()
2387 void *data = xdpf->data; in igc_xdp_init_tx_descriptor()
2392 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); in igc_xdp_init_tx_descriptor()
2396 return -EBUSY; in igc_xdp_init_tx_descriptor()
2400 head->bytecount = xdp_get_frame_len(xdpf); in igc_xdp_init_tx_descriptor()
2401 head->type = IGC_TX_BUFFER_TYPE_XDP; in igc_xdp_init_tx_descriptor()
2402 head->gso_segs = 1; in igc_xdp_init_tx_descriptor()
2403 head->xdpf = xdpf; in igc_xdp_init_tx_descriptor()
2405 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_xdp_init_tx_descriptor()
2406 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_init_tx_descriptor()
2411 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); in igc_xdp_init_tx_descriptor()
2412 if (dma_mapping_error(ring->dev, dma)) { in igc_xdp_init_tx_descriptor()
2413 netdev_err_once(ring->netdev, in igc_xdp_init_tx_descriptor()
2424 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_xdp_init_tx_descriptor()
2425 desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_init_tx_descriptor()
2427 buffer->protocol = 0; in igc_xdp_init_tx_descriptor()
2429 if (++index == ring->count) in igc_xdp_init_tx_descriptor()
2435 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2437 desc->read.olinfo_status = 0; in igc_xdp_init_tx_descriptor()
2439 data = skb_frag_address(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2440 len = skb_frag_size(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2443 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); in igc_xdp_init_tx_descriptor()
2445 netdev_tx_sent_queue(txring_txq(ring), head->bytecount); in igc_xdp_init_tx_descriptor()
2447 head->time_stamp = jiffies; in igc_xdp_init_tx_descriptor()
2449 head->next_to_watch = desc; in igc_xdp_init_tx_descriptor()
2450 ring->next_to_use = index; in igc_xdp_init_tx_descriptor()
2456 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2458 dma_unmap_page(ring->dev, in igc_xdp_init_tx_descriptor()
2467 index += ring->count; in igc_xdp_init_tx_descriptor()
2468 index--; in igc_xdp_init_tx_descriptor()
2471 return -ENOMEM; in igc_xdp_init_tx_descriptor()
2481 while (index >= adapter->num_tx_queues) in igc_get_tx_ring()
2482 index -= adapter->num_tx_queues; in igc_get_tx_ring()
2484 return adapter->tx_ring[index]; in igc_get_tx_ring()
2496 return -EFAULT; in igc_xdp_xmit_back()
2524 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) in __igc_xdp_run_prog()
2529 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2533 trace_xdp_exception(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2545 prog = READ_ONCE(adapter->xdp_prog); in igc_xdp_run_prog()
2565 writel(ring->next_to_use, ring->tail); in igc_flush_tx_descriptors()
2590 struct igc_ring *ring = q_vector->rx.ring; in igc_update_rx_stats()
2592 u64_stats_update_begin(&ring->rx_syncp); in igc_update_rx_stats()
2593 ring->rx_stats.packets += packets; in igc_update_rx_stats()
2594 ring->rx_stats.bytes += bytes; in igc_update_rx_stats()
2595 u64_stats_update_end(&ring->rx_syncp); in igc_update_rx_stats()
2597 q_vector->rx.total_packets += packets; in igc_update_rx_stats()
2598 q_vector->rx.total_bytes += bytes; in igc_update_rx_stats()
2604 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq()
2605 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_clean_rx_irq()
2606 struct sk_buff *skb = rx_ring->skb; in igc_clean_rx_irq()
2625 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); in igc_clean_rx_irq()
2626 size = le16_to_cpu(rx_desc->wb.upper.length); in igc_clean_rx_irq()
2639 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; in igc_clean_rx_irq()
2644 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq()
2649 /* Advance the ring next-to-clean */ in igc_clean_rx_irq()
2656 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); in igc_clean_rx_irq()
2657 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), in igc_clean_rx_irq()
2669 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2689 rx_ring->rx_stats.alloc_failed++; in igc_clean_rx_irq()
2690 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2691 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_clean_rx_irq()
2698 /* fetch next buffer in frame if non-eop */ in igc_clean_rx_irq()
2709 total_bytes += skb->len; in igc_clean_rx_irq()
2714 napi_gro_receive(&q_vector->napi, skb); in igc_clean_rx_irq()
2727 rx_ring->skb = skb; in igc_clean_rx_irq()
2740 struct xdp_buff *xdp = &ctx->xdp; in igc_construct_skb_zc()
2741 unsigned int totalsize = xdp->data_end - xdp->data_meta; in igc_construct_skb_zc()
2742 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb_zc()
2745 net_prefetch(xdp->data_meta); in igc_construct_skb_zc()
2747 skb = napi_alloc_skb(&ring->q_vector->napi, totalsize); in igc_construct_skb_zc()
2751 memcpy(__skb_put(skb, totalsize), xdp->data_meta, in igc_construct_skb_zc()
2759 if (ctx->rx_ts) { in igc_construct_skb_zc()
2760 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; in igc_construct_skb_zc()
2761 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; in igc_construct_skb_zc()
2771 struct igc_ring *ring = q_vector->rx.ring; in igc_dispatch_skb_zc()
2776 ring->rx_stats.alloc_failed++; in igc_dispatch_skb_zc()
2777 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags); in igc_dispatch_skb_zc()
2785 napi_gro_receive(&q_vector->napi, skb); in igc_dispatch_skb_zc()
2792 * igc_xdp_buff fields fall into xdp_buff_xsk->cb in xsk_buff_to_igc_ctx()
2799 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq_zc()
2800 struct igc_ring *ring = q_vector->rx.ring; in igc_clean_rx_irq_zc()
2803 u16 ntc = ring->next_to_clean; in igc_clean_rx_irq_zc()
2810 prog = READ_ONCE(adapter->xdp_prog); in igc_clean_rx_irq_zc()
2820 size = le16_to_cpu(desc->wb.upper.length); in igc_clean_rx_irq_zc()
2830 bi = &ring->rx_buffer_info[ntc]; in igc_clean_rx_irq_zc()
2832 ctx = xsk_buff_to_igc_ctx(bi->xdp); in igc_clean_rx_irq_zc()
2833 ctx->rx_desc = desc; in igc_clean_rx_irq_zc()
2836 ctx->rx_ts = bi->xdp->data; in igc_clean_rx_irq_zc()
2838 bi->xdp->data += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2843 bi->xdp->data_meta += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2844 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2846 ctx->rx_ts = NULL; in igc_clean_rx_irq_zc()
2849 bi->xdp->data_end = bi->xdp->data + size; in igc_clean_rx_irq_zc()
2850 xsk_buff_dma_sync_for_cpu(bi->xdp); in igc_clean_rx_irq_zc()
2852 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); in igc_clean_rx_irq_zc()
2858 xsk_buff_free(bi->xdp); in igc_clean_rx_irq_zc()
2866 bi->xdp = NULL; in igc_clean_rx_irq_zc()
2871 if (ntc == ring->count) in igc_clean_rx_irq_zc()
2875 ring->next_to_clean = ntc; in igc_clean_rx_irq_zc()
2886 if (xsk_uses_need_wakeup(ring->xsk_pool)) { in igc_clean_rx_irq_zc()
2887 if (failure || ring->next_to_clean == ring->next_to_use) in igc_clean_rx_irq_zc()
2888 xsk_set_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2890 xsk_clear_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2900 struct igc_ring *ring = q_vector->tx.ring; in igc_update_tx_stats()
2902 u64_stats_update_begin(&ring->tx_syncp); in igc_update_tx_stats()
2903 ring->tx_stats.bytes += bytes; in igc_update_tx_stats()
2904 ring->tx_stats.packets += packets; in igc_update_tx_stats()
2905 u64_stats_update_end(&ring->tx_syncp); in igc_update_tx_stats()
2907 q_vector->tx.total_bytes += bytes; in igc_update_tx_stats()
2908 q_vector->tx.total_packets += packets; in igc_update_tx_stats()
2914 struct igc_ring *tx_ring = meta_req->tx_ring; in igc_xsk_request_timestamp()
2922 if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) { in igc_xsk_request_timestamp()
2923 adapter = netdev_priv(tx_ring->netdev); in igc_xsk_request_timestamp()
2925 spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags); in igc_xsk_request_timestamp()
2929 tstamp = &adapter->tx_tstamp[i]; in igc_xsk_request_timestamp()
2931 /* tstamp->skb and tstamp->xsk_tx_buffer are in union. in igc_xsk_request_timestamp()
2932 * When tstamp->skb is equal to NULL, in igc_xsk_request_timestamp()
2933 * tstamp->xsk_tx_buffer is equal to NULL as well. in igc_xsk_request_timestamp()
2937 if (!tstamp->skb) { in igc_xsk_request_timestamp()
2945 adapter->tx_hwtstamp_skipped++; in igc_xsk_request_timestamp()
2946 spin_unlock_irqrestore(&adapter->ptp_tx_lock, in igc_xsk_request_timestamp()
2951 tstamp->start = jiffies; in igc_xsk_request_timestamp()
2952 tstamp->xsk_queue_index = tx_ring->queue_index; in igc_xsk_request_timestamp()
2953 tstamp->xsk_tx_buffer = meta_req->tx_buffer; in igc_xsk_request_timestamp()
2954 tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK; in igc_xsk_request_timestamp()
2957 meta_req->tx_buffer->xsk_pending_ts = true; in igc_xsk_request_timestamp()
2963 xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta); in igc_xsk_request_timestamp()
2966 tx_flags |= tstamp->flags; in igc_xsk_request_timestamp()
2967 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2970 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2973 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2976 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2980 spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags); in igc_xsk_request_timestamp()
2992 struct igc_ring *tx_ring = meta_req->tx_ring; in igc_xsk_request_launch_time()
2998 if (!tx_ring->launchtime_enable) in igc_xsk_request_launch_time()
3011 meta_req->tx_buffer = in igc_xsk_request_launch_time()
3012 &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xsk_request_launch_time()
3024 meta_req->used_desc += used_desc; in igc_xsk_request_launch_time()
3035 struct xsk_buff_pool *pool = ring->xsk_pool; in igc_xdp_xmit_zc()
3042 if (!netif_carrier_ok(ring->netdev)) in igc_xdp_xmit_zc()
3050 ntu = ring->next_to_use; in igc_xdp_xmit_zc()
3075 bi = &ring->tx_buffer_info[ntu]; in igc_xdp_xmit_zc()
3085 ntu = ring->next_to_use; in igc_xdp_xmit_zc()
3091 budget -= meta_req.used_desc; in igc_xdp_xmit_zc()
3094 tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type); in igc_xdp_xmit_zc()
3095 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_xmit_zc()
3096 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_xmit_zc()
3098 bi->type = IGC_TX_BUFFER_TYPE_XSK; in igc_xdp_xmit_zc()
3099 bi->protocol = 0; in igc_xdp_xmit_zc()
3100 bi->bytecount = xdp_desc.len; in igc_xdp_xmit_zc()
3101 bi->gso_segs = 1; in igc_xdp_xmit_zc()
3102 bi->time_stamp = jiffies; in igc_xdp_xmit_zc()
3103 bi->next_to_watch = tx_desc; in igc_xdp_xmit_zc()
3108 if (ntu == ring->count) in igc_xdp_xmit_zc()
3111 ring->next_to_use = ntu; in igc_xdp_xmit_zc()
3112 budget--; in igc_xdp_xmit_zc()
3124 * igc_clean_tx_irq - Reclaim resources after transmit completes
3132 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_tx_irq()
3134 unsigned int budget = q_vector->tx.work_limit; in igc_clean_tx_irq()
3135 struct igc_ring *tx_ring = q_vector->tx.ring; in igc_clean_tx_irq()
3136 unsigned int i = tx_ring->next_to_clean; in igc_clean_tx_irq()
3141 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_clean_tx_irq()
3144 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_irq()
3146 i -= tx_ring->count; in igc_clean_tx_irq()
3149 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_irq()
3159 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) in igc_clean_tx_irq()
3164 ethtool_mmsv_event_handle(&adapter->fpe.mmsv, in igc_clean_tx_irq()
3170 if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK && in igc_clean_tx_irq()
3171 tx_buffer->xsk_pending_ts) in igc_clean_tx_irq()
3175 tx_buffer->next_to_watch = NULL; in igc_clean_tx_irq()
3178 total_bytes += tx_buffer->bytecount; in igc_clean_tx_irq()
3179 total_packets += tx_buffer->gso_segs; in igc_clean_tx_irq()
3181 switch (tx_buffer->type) { in igc_clean_tx_irq()
3186 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_irq()
3187 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3190 napi_consume_skb(tx_buffer->skb, napi_budget); in igc_clean_tx_irq()
3191 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3194 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_irq()
3204 i -= tx_ring->count; in igc_clean_tx_irq()
3205 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3211 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3219 i -= tx_ring->count; in igc_clean_tx_irq()
3220 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3228 budget--; in igc_clean_tx_irq()
3234 i += tx_ring->count; in igc_clean_tx_irq()
3235 tx_ring->next_to_clean = i; in igc_clean_tx_irq()
3239 if (tx_ring->xsk_pool) { in igc_clean_tx_irq()
3241 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_irq()
3242 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) in igc_clean_tx_irq()
3243 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); in igc_clean_tx_irq()
3247 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igc_clean_tx_irq()
3248 struct igc_hw *hw = &adapter->hw; in igc_clean_tx_irq()
3253 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_clean_tx_irq()
3254 if (tx_buffer->next_to_watch && in igc_clean_tx_irq()
3255 time_after(jiffies, tx_buffer->time_stamp + in igc_clean_tx_irq()
3256 (adapter->tx_timeout_factor * HZ)) && in igc_clean_tx_irq()
3258 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && in igc_clean_tx_irq()
3259 !tx_ring->oper_gate_closed) { in igc_clean_tx_irq()
3261 netdev_err(tx_ring->netdev, in igc_clean_tx_irq()
3273 tx_ring->queue_index, in igc_clean_tx_irq()
3274 rd32(IGC_TDH(tx_ring->reg_idx)), in igc_clean_tx_irq()
3275 readl(tx_ring->tail), in igc_clean_tx_irq()
3276 tx_ring->next_to_use, in igc_clean_tx_irq()
3277 tx_ring->next_to_clean, in igc_clean_tx_irq()
3278 tx_buffer->time_stamp, in igc_clean_tx_irq()
3279 tx_buffer->next_to_watch, in igc_clean_tx_irq()
3281 tx_buffer->next_to_watch->wb.status); in igc_clean_tx_irq()
3282 netif_stop_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3283 tx_ring->queue_index); in igc_clean_tx_irq()
3292 netif_carrier_ok(tx_ring->netdev) && in igc_clean_tx_irq()
3298 if (__netif_subqueue_stopped(tx_ring->netdev, in igc_clean_tx_irq()
3299 tx_ring->queue_index) && in igc_clean_tx_irq()
3300 !(test_bit(__IGC_DOWN, &adapter->state))) { in igc_clean_tx_irq()
3301 netif_wake_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3302 tx_ring->queue_index); in igc_clean_tx_irq()
3304 u64_stats_update_begin(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3305 tx_ring->tx_stats.restart_queue++; in igc_clean_tx_irq()
3306 u64_stats_update_end(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3316 struct igc_hw *hw = &adapter->hw; in igc_find_mac_filter()
3317 int max_entries = hw->mac.rar_entry_count; in igc_find_mac_filter()
3338 return -1; in igc_find_mac_filter()
3343 struct igc_hw *hw = &adapter->hw; in igc_get_avail_mac_filter_slot()
3344 int max_entries = hw->mac.rar_entry_count; in igc_get_avail_mac_filter_slot()
3355 return -1; in igc_get_avail_mac_filter_slot()
3359 * igc_add_mac_filter() - Add MAC address filter
3363 * @queue: If non-negative, queue assignment feature is enabled and frames
3373 struct net_device *dev = adapter->netdev; in igc_add_mac_filter()
3382 return -ENOSPC; in igc_add_mac_filter()
3394 * igc_del_mac_filter() - Delete MAC address filter
3402 struct net_device *dev = adapter->netdev; in igc_del_mac_filter()
3416 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); in igc_del_mac_filter()
3428 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3438 struct net_device *dev = adapter->netdev; in igc_add_vlan_prio_filter()
3439 struct igc_hw *hw = &adapter->hw; in igc_add_vlan_prio_filter()
3446 return -EEXIST; in igc_add_vlan_prio_filter()
3460 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3466 struct igc_hw *hw = &adapter->hw; in igc_del_vlan_prio_filter()
3476 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", in igc_del_vlan_prio_filter()
3482 struct igc_hw *hw = &adapter->hw; in igc_get_avail_etype_filter_slot()
3492 return -1; in igc_get_avail_etype_filter_slot()
3496 * igc_add_etype_filter() - Add ethertype filter
3499 * @queue: If non-negative, queue assignment feature is enabled and frames
3508 struct igc_hw *hw = &adapter->hw; in igc_add_etype_filter()
3514 return -ENOSPC; in igc_add_etype_filter()
3531 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", in igc_add_etype_filter()
3538 struct igc_hw *hw = &adapter->hw; in igc_find_etype_filter()
3548 return -1; in igc_find_etype_filter()
3552 * igc_del_etype_filter() - Delete ethertype filter
3558 struct igc_hw *hw = &adapter->hw; in igc_del_etype_filter()
3567 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", in igc_del_etype_filter()
3575 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_select()
3579 if (input->index >= MAX_FLEX_FILTER) { in igc_flex_filter_select()
3580 netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n"); in igc_flex_filter_select()
3581 return -EINVAL; in igc_flex_filter_select()
3587 switch (input->index) { in igc_flex_filter_select()
3604 fhft_index = input->index % 8; in igc_flex_filter_select()
3607 IGC_FHFT_EXT(fhft_index - 4); in igc_flex_filter_select()
3615 struct igc_hw *hw = &adapter->hw; in igc_write_flex_filter_ll()
3616 u8 *data = input->data; in igc_write_flex_filter_ll()
3617 u8 *mask = input->mask; in igc_write_flex_filter_ll()
3627 if (input->length % 8 != 0) { in igc_write_flex_filter_ll()
3628 netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n"); in igc_write_flex_filter_ll()
3629 return -EINVAL; in igc_write_flex_filter_ll()
3645 queuing = input->length & IGC_FHFT_LENGTH_MASK; in igc_write_flex_filter_ll()
3646 queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue); in igc_write_flex_filter_ll()
3647 queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio); in igc_write_flex_filter_ll()
3649 if (input->immediate_irq) in igc_write_flex_filter_ll()
3652 if (input->drop) in igc_write_flex_filter_ll()
3686 if (input->index > 8) { in igc_write_flex_filter_ll()
3687 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ in igc_write_flex_filter_ll()
3690 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); in igc_write_flex_filter_ll()
3694 wufc |= (IGC_WUFC_FLX0 << input->index); in igc_write_flex_filter_ll()
3698 netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n", in igc_write_flex_filter_ll()
3699 input->index); in igc_write_flex_filter_ll()
3711 memcpy(&flex->data[offset], src, len); in igc_flex_filter_add_field()
3720 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3725 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3731 struct igc_hw *hw = &adapter->hw; in igc_find_avail_flex_filter_slot()
3743 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) in igc_find_avail_flex_filter_slot()
3748 return -ENOSPC; in igc_find_avail_flex_filter_slot()
3753 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_in_use()
3771 struct igc_nfc_filter *filter = &rule->filter; in igc_add_flex_filter()
3779 return -ENOSPC; in igc_add_flex_filter()
3782 * -> dest_mac [6] in igc_add_flex_filter()
3783 * -> src_mac [6] in igc_add_flex_filter()
3784 * -> tpid [2] in igc_add_flex_filter()
3785 * -> vlan tci [2] in igc_add_flex_filter()
3786 * -> ether type [2] in igc_add_flex_filter()
3787 * -> user data [8] in igc_add_flex_filter()
3788 * -> = 26 bytes => 32 length in igc_add_flex_filter()
3792 flex.rx_queue = rule->action; in igc_add_flex_filter()
3794 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; in igc_add_flex_filter()
3799 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_add_flex_filter()
3800 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, in igc_add_flex_filter()
3804 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_add_flex_filter()
3805 igc_flex_filter_add_field(&flex, &filter->src_addr, 6, in igc_add_flex_filter()
3809 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { in igc_add_flex_filter()
3810 __be16 vlan_etype = cpu_to_be16(filter->vlan_etype); in igc_add_flex_filter()
3817 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) in igc_add_flex_filter()
3818 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, in igc_add_flex_filter()
3819 sizeof(filter->vlan_tci), NULL); in igc_add_flex_filter()
3822 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_add_flex_filter()
3823 __be16 etype = cpu_to_be16(filter->etype); in igc_add_flex_filter()
3830 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) in igc_add_flex_filter()
3831 igc_flex_filter_add_field(&flex, &filter->user_data, in igc_add_flex_filter()
3833 sizeof(filter->user_data), in igc_add_flex_filter()
3834 filter->user_mask); in igc_add_flex_filter()
3841 filter->flex_index = index; in igc_add_flex_filter()
3849 struct igc_hw *hw = &adapter->hw; in igc_del_flex_filter()
3859 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); in igc_del_flex_filter()
3879 struct igc_hw *hw = &adapter->hw; in igc_set_default_queue_filter()
3898 if (rule->flex) { in igc_enable_nfc_rule()
3902 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_enable_nfc_rule()
3903 err = igc_add_etype_filter(adapter, rule->filter.etype, in igc_enable_nfc_rule()
3904 rule->action); in igc_enable_nfc_rule()
3909 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { in igc_enable_nfc_rule()
3911 rule->filter.src_addr, rule->action); in igc_enable_nfc_rule()
3916 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { in igc_enable_nfc_rule()
3918 rule->filter.dst_addr, rule->action); in igc_enable_nfc_rule()
3923 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_enable_nfc_rule()
3924 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_enable_nfc_rule()
3926 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); in igc_enable_nfc_rule()
3931 if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE) in igc_enable_nfc_rule()
3932 igc_set_default_queue_filter(adapter, rule->action); in igc_enable_nfc_rule()
3940 if (rule->flex) { in igc_disable_nfc_rule()
3941 igc_del_flex_filter(adapter, rule->filter.flex_index); in igc_disable_nfc_rule()
3945 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) in igc_disable_nfc_rule()
3946 igc_del_etype_filter(adapter, rule->filter.etype); in igc_disable_nfc_rule()
3948 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_disable_nfc_rule()
3949 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_disable_nfc_rule()
3954 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_disable_nfc_rule()
3956 rule->filter.src_addr); in igc_disable_nfc_rule()
3958 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_disable_nfc_rule()
3960 rule->filter.dst_addr); in igc_disable_nfc_rule()
3962 if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE) in igc_disable_nfc_rule()
3967 * igc_get_nfc_rule() - Get NFC rule
3971 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3980 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { in igc_get_nfc_rule()
3981 if (rule->location == location) in igc_get_nfc_rule()
3983 if (rule->location > location) in igc_get_nfc_rule()
3991 * igc_del_nfc_rule() - Delete NFC rule
3997 * Context: Expects adapter->nfc_rule_lock to be held by caller.
4003 list_del(&rule->list); in igc_del_nfc_rule()
4004 adapter->nfc_rule_count--; in igc_del_nfc_rule()
4013 mutex_lock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
4015 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) in igc_flush_nfc_rules()
4018 mutex_unlock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
4022 * igc_add_nfc_rule() - Add NFC rule
4028 * Context: Expects adapter->nfc_rule_lock to be held by caller.
4042 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { in igc_add_nfc_rule()
4043 if (cur->location >= rule->location) in igc_add_nfc_rule()
4048 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); in igc_add_nfc_rule()
4049 adapter->nfc_rule_count++; in igc_add_nfc_rule()
4057 mutex_lock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
4059 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) in igc_restore_nfc_rules()
4062 mutex_unlock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
4069 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_uc_sync()
4081 * igc_enable_empty_addr_recv - Enable Rx of packets with all-zeroes MAC address
4084 * Frame preemption verification requires that packets with the all-zeroes
4086 * all-zeroes destination address to the list of acceptable addresses.
4094 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1); in igc_enable_empty_addr_recv()
4105 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
4111 * promiscuous mode, and all-multi behavior.
4116 struct igc_hw *hw = &adapter->hw; in igc_set_rx_mode()
4121 if (netdev->flags & IFF_PROMISC) { in igc_set_rx_mode()
4124 if (netdev->flags & IFF_ALLMULTI) { in igc_set_rx_mode()
4149 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) in igc_set_rx_mode()
4156 * igc_configure - configure the hardware for RX and TX
4161 struct net_device *netdev = adapter->netdev; in igc_configure()
4179 igc_rx_fifo_flush_base(&adapter->hw); in igc_configure()
4185 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_configure()
4186 struct igc_ring *ring = adapter->rx_ring[i]; in igc_configure()
4188 if (ring->xsk_pool) in igc_configure()
4196 * igc_write_ivar - configure ivar for given MSI-X vector
4222 struct igc_adapter *adapter = q_vector->adapter; in igc_assign_vector()
4223 struct igc_hw *hw = &adapter->hw; in igc_assign_vector()
4227 if (q_vector->rx.ring) in igc_assign_vector()
4228 rx_queue = q_vector->rx.ring->reg_idx; in igc_assign_vector()
4229 if (q_vector->tx.ring) in igc_assign_vector()
4230 tx_queue = q_vector->tx.ring->reg_idx; in igc_assign_vector()
4232 switch (hw->mac.type) { in igc_assign_vector()
4242 q_vector->eims_value = BIT(msix_vector); in igc_assign_vector()
4245 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); in igc_assign_vector()
4250 adapter->eims_enable_mask |= q_vector->eims_value; in igc_assign_vector()
4253 q_vector->set_itr = 1; in igc_assign_vector()
4257 * igc_configure_msix - Configure MSI-X hardware
4261 * generate MSI-X interrupts.
4265 struct igc_hw *hw = &adapter->hw; in igc_configure_msix()
4269 adapter->eims_enable_mask = 0; in igc_configure_msix()
4272 switch (hw->mac.type) { in igc_configure_msix()
4274 /* Turn on MSI-X capability first, or our settings in igc_configure_msix()
4282 adapter->eims_other = BIT(vector); in igc_configure_msix()
4288 /* do nothing, since nothing else supports MSI-X */ in igc_configure_msix()
4290 } /* switch (hw->mac.type) */ in igc_configure_msix()
4292 adapter->eims_enable_mask |= adapter->eims_other; in igc_configure_msix()
4294 for (i = 0; i < adapter->num_q_vectors; i++) in igc_configure_msix()
4295 igc_assign_vector(adapter->q_vector[i], vector++); in igc_configure_msix()
4301 * igc_irq_enable - Enable default interrupt generation settings
4306 struct igc_hw *hw = &adapter->hw; in igc_irq_enable()
4308 if (adapter->msix_entries) { in igc_irq_enable()
4312 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); in igc_irq_enable()
4314 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); in igc_irq_enable()
4315 wr32(IGC_EIMS, adapter->eims_enable_mask); in igc_irq_enable()
4324 * igc_irq_disable - Mask off interrupt generation on the NIC
4329 struct igc_hw *hw = &adapter->hw; in igc_irq_disable()
4331 if (adapter->msix_entries) { in igc_irq_disable()
4334 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4335 wr32(IGC_EIMC, adapter->eims_enable_mask); in igc_irq_disable()
4337 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4344 if (adapter->msix_entries) { in igc_irq_disable()
4347 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4349 for (i = 0; i < adapter->num_q_vectors; i++) in igc_irq_disable()
4350 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4352 synchronize_irq(adapter->pdev->irq); in igc_irq_disable()
4363 if (adapter->rss_queues > (max_rss_queues / 2)) in igc_set_flag_queue_pairs()
4364 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4366 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4379 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); in igc_init_queue_configuration()
4385 * igc_reset_q_vector - Reset config for interrupt vector
4394 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_reset_q_vector()
4402 if (q_vector->tx.ring) in igc_reset_q_vector()
4403 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igc_reset_q_vector()
4405 if (q_vector->rx.ring) in igc_reset_q_vector()
4406 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igc_reset_q_vector()
4408 netif_napi_del(&q_vector->napi); in igc_reset_q_vector()
4412 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4420 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_free_q_vector()
4422 adapter->q_vector[v_idx] = NULL; in igc_free_q_vector()
4432 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4441 int v_idx = adapter->num_q_vectors; in igc_free_q_vectors()
4443 adapter->num_tx_queues = 0; in igc_free_q_vectors()
4444 adapter->num_rx_queues = 0; in igc_free_q_vectors()
4445 adapter->num_q_vectors = 0; in igc_free_q_vectors()
4447 while (v_idx--) { in igc_free_q_vectors()
4454 * igc_update_itr - update the dynamic ITR value based on statistics
4465 * NOTE: These calculations are only valid when operating in a single-
4471 unsigned int packets = ring_container->total_packets; in igc_update_itr()
4472 unsigned int bytes = ring_container->total_bytes; in igc_update_itr()
4473 u8 itrval = ring_container->itr; in igc_update_itr()
4513 ring_container->total_bytes = 0; in igc_update_itr()
4514 ring_container->total_packets = 0; in igc_update_itr()
4517 ring_container->itr = itrval; in igc_update_itr()
4522 struct igc_adapter *adapter = q_vector->adapter; in igc_set_itr()
4523 u32 new_itr = q_vector->itr_val; in igc_set_itr()
4526 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in igc_set_itr()
4527 switch (adapter->link_speed) { in igc_set_itr()
4537 igc_update_itr(q_vector, &q_vector->tx); in igc_set_itr()
4538 igc_update_itr(q_vector, &q_vector->rx); in igc_set_itr()
4540 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in igc_set_itr()
4544 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_set_itr()
4545 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_set_itr()
4564 if (new_itr != q_vector->itr_val) { in igc_set_itr()
4569 new_itr = new_itr > q_vector->itr_val ? in igc_set_itr()
4570 max((new_itr * q_vector->itr_val) / in igc_set_itr()
4571 (new_itr + (q_vector->itr_val >> 2)), in igc_set_itr()
4579 q_vector->itr_val = new_itr; in igc_set_itr()
4580 q_vector->set_itr = 1; in igc_set_itr()
4586 int v_idx = adapter->num_q_vectors; in igc_reset_interrupt_capability()
4588 if (adapter->msix_entries) { in igc_reset_interrupt_capability()
4589 pci_disable_msix(adapter->pdev); in igc_reset_interrupt_capability()
4590 kfree(adapter->msix_entries); in igc_reset_interrupt_capability()
4591 adapter->msix_entries = NULL; in igc_reset_interrupt_capability()
4592 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_reset_interrupt_capability()
4593 pci_disable_msi(adapter->pdev); in igc_reset_interrupt_capability()
4596 while (v_idx--) in igc_reset_interrupt_capability()
4601 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4603 * @msix: boolean value for MSI-X capability
4616 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4619 adapter->num_rx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4621 adapter->num_tx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4624 numvecs = adapter->num_rx_queues; in igc_set_interrupt_capability()
4627 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) in igc_set_interrupt_capability()
4628 numvecs += adapter->num_tx_queues; in igc_set_interrupt_capability()
4631 adapter->num_q_vectors = numvecs; in igc_set_interrupt_capability()
4636 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), in igc_set_interrupt_capability()
4639 if (!adapter->msix_entries) in igc_set_interrupt_capability()
4644 adapter->msix_entries[i].entry = i; in igc_set_interrupt_capability()
4646 err = pci_enable_msix_range(adapter->pdev, in igc_set_interrupt_capability()
4647 adapter->msix_entries, in igc_set_interrupt_capability()
4653 kfree(adapter->msix_entries); in igc_set_interrupt_capability()
4654 adapter->msix_entries = NULL; in igc_set_interrupt_capability()
4659 adapter->flags &= ~IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4661 adapter->rss_queues = 1; in igc_set_interrupt_capability()
4662 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_interrupt_capability()
4663 adapter->num_rx_queues = 1; in igc_set_interrupt_capability()
4664 adapter->num_tx_queues = 1; in igc_set_interrupt_capability()
4665 adapter->num_q_vectors = 1; in igc_set_interrupt_capability()
4666 if (!pci_enable_msi(adapter->pdev)) in igc_set_interrupt_capability()
4667 adapter->flags |= IGC_FLAG_HAS_MSI; in igc_set_interrupt_capability()
4671 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4686 struct igc_adapter *adapter = q_vector->adapter; in igc_update_ring_itr()
4687 int new_val = q_vector->itr_val; in igc_update_ring_itr()
4691 /* For non-gigabit speeds, just fix the interrupt rate at 4000 in igc_update_ring_itr()
4692 * ints/sec - ITR timer value of 120 ticks. in igc_update_ring_itr()
4694 switch (adapter->link_speed) { in igc_update_ring_itr()
4703 packets = q_vector->rx.total_packets; in igc_update_ring_itr()
4705 avg_wire_size = q_vector->rx.total_bytes / packets; in igc_update_ring_itr()
4707 packets = q_vector->tx.total_packets; in igc_update_ring_itr()
4710 q_vector->tx.total_bytes / packets); in igc_update_ring_itr()
4722 /* Give a little boost to mid-size frames */ in igc_update_ring_itr()
4730 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_update_ring_itr()
4731 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_update_ring_itr()
4735 if (new_val != q_vector->itr_val) { in igc_update_ring_itr()
4736 q_vector->itr_val = new_val; in igc_update_ring_itr()
4737 q_vector->set_itr = 1; in igc_update_ring_itr()
4740 q_vector->rx.total_bytes = 0; in igc_update_ring_itr()
4741 q_vector->rx.total_packets = 0; in igc_update_ring_itr()
4742 q_vector->tx.total_bytes = 0; in igc_update_ring_itr()
4743 q_vector->tx.total_packets = 0; in igc_update_ring_itr()
4748 struct igc_adapter *adapter = q_vector->adapter; in igc_ring_irq_enable()
4749 struct igc_hw *hw = &adapter->hw; in igc_ring_irq_enable()
4751 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || in igc_ring_irq_enable()
4752 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { in igc_ring_irq_enable()
4753 if (adapter->num_q_vectors == 1) in igc_ring_irq_enable()
4759 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_ring_irq_enable()
4760 if (adapter->msix_entries) in igc_ring_irq_enable()
4761 wr32(IGC_EIMS, q_vector->eims_value); in igc_ring_irq_enable()
4770 head->ring = ring; in igc_add_ring()
4771 head->count++; in igc_add_ring()
4775 * igc_cache_ring_register - Descriptor ring to register mapping
4778 * Once we know the feature-set enabled for the device, we'll cache
4785 switch (adapter->hw.mac.type) { in igc_cache_ring_register()
4788 for (; i < adapter->num_rx_queues; i++) in igc_cache_ring_register()
4789 adapter->rx_ring[i]->reg_idx = i; in igc_cache_ring_register()
4790 for (; j < adapter->num_tx_queues; j++) in igc_cache_ring_register()
4791 adapter->tx_ring[j]->reg_idx = j; in igc_cache_ring_register()
4797 * igc_poll - NAPI Rx polling callback
4806 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_poll()
4810 if (q_vector->tx.ring) in igc_poll()
4814 int cleaned = rx_ring->xsk_pool ? in igc_poll()
4827 /* Exit the polling mode, but don't re-enable interrupts if stack might in igc_poll()
4828 * poll us due to busy-polling in igc_poll()
4833 return min(work_done, budget - 1); in igc_poll()
4837 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4846 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4859 return -ENOMEM; in igc_alloc_q_vector()
4864 q_vector = adapter->q_vector[v_idx]; in igc_alloc_q_vector()
4871 return -ENOMEM; in igc_alloc_q_vector()
4874 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); in igc_alloc_q_vector()
4877 adapter->q_vector[v_idx] = q_vector; in igc_alloc_q_vector()
4878 q_vector->adapter = adapter; in igc_alloc_q_vector()
4881 q_vector->tx.work_limit = adapter->tx_work_limit; in igc_alloc_q_vector()
4884 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); in igc_alloc_q_vector()
4885 q_vector->itr_val = IGC_START_ITR; in igc_alloc_q_vector()
4888 ring = q_vector->ring; in igc_alloc_q_vector()
4893 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) in igc_alloc_q_vector()
4894 q_vector->itr_val = adapter->rx_itr_setting; in igc_alloc_q_vector()
4897 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) in igc_alloc_q_vector()
4898 q_vector->itr_val = adapter->tx_itr_setting; in igc_alloc_q_vector()
4903 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4904 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4907 ring->q_vector = q_vector; in igc_alloc_q_vector()
4910 igc_add_ring(ring, &q_vector->tx); in igc_alloc_q_vector()
4913 ring->count = adapter->tx_ring_count; in igc_alloc_q_vector()
4914 ring->queue_index = txr_idx; in igc_alloc_q_vector()
4917 adapter->tx_ring[txr_idx] = ring; in igc_alloc_q_vector()
4925 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4926 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4929 ring->q_vector = q_vector; in igc_alloc_q_vector()
4932 igc_add_ring(ring, &q_vector->rx); in igc_alloc_q_vector()
4935 ring->count = adapter->rx_ring_count; in igc_alloc_q_vector()
4936 ring->queue_index = rxr_idx; in igc_alloc_q_vector()
4939 adapter->rx_ring[rxr_idx] = ring; in igc_alloc_q_vector()
4946 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4950 * return -ENOMEM.
4954 int rxr_remaining = adapter->num_rx_queues; in igc_alloc_q_vectors()
4955 int txr_remaining = adapter->num_tx_queues; in igc_alloc_q_vectors()
4957 int q_vectors = adapter->num_q_vectors; in igc_alloc_q_vectors()
4969 rxr_remaining--; in igc_alloc_q_vectors()
4975 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4976 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4985 rxr_remaining -= rqpv; in igc_alloc_q_vectors()
4986 txr_remaining -= tqpv; in igc_alloc_q_vectors()
4994 adapter->num_tx_queues = 0; in igc_alloc_q_vectors()
4995 adapter->num_rx_queues = 0; in igc_alloc_q_vectors()
4996 adapter->num_q_vectors = 0; in igc_alloc_q_vectors()
4998 while (v_idx--) in igc_alloc_q_vectors()
5001 return -ENOMEM; in igc_alloc_q_vectors()
5005 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
5007 * @msix: boolean for MSI-X capability
5013 struct net_device *dev = adapter->netdev; in igc_init_interrupt_scheme()
5034 * igc_sw_init - Initialize general software structures (struct igc_adapter)
5043 struct net_device *netdev = adapter->netdev; in igc_sw_init()
5044 struct pci_dev *pdev = adapter->pdev; in igc_sw_init()
5045 struct igc_hw *hw = &adapter->hw; in igc_sw_init()
5047 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); in igc_sw_init()
5050 adapter->tx_ring_count = IGC_DEFAULT_TXD; in igc_sw_init()
5051 adapter->rx_ring_count = IGC_DEFAULT_RXD; in igc_sw_init()
5054 adapter->rx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
5055 adapter->tx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
5058 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; in igc_sw_init()
5061 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + in igc_sw_init()
5063 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igc_sw_init()
5065 mutex_init(&adapter->nfc_rule_lock); in igc_sw_init()
5066 INIT_LIST_HEAD(&adapter->nfc_rule_list); in igc_sw_init()
5067 adapter->nfc_rule_count = 0; in igc_sw_init()
5069 spin_lock_init(&adapter->stats64_lock); in igc_sw_init()
5070 spin_lock_init(&adapter->qbv_tx_lock); in igc_sw_init()
5071 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ in igc_sw_init()
5072 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_sw_init()
5079 return -ENOMEM; in igc_sw_init()
5085 set_bit(__IGC_DOWN, &adapter->state); in igc_sw_init()
5093 struct igc_q_vector *q_vector = adapter->q_vector[vector]; in igc_set_queue_napi()
5095 if (q_vector->rx.ring) in igc_set_queue_napi()
5096 netif_queue_set_napi(adapter->netdev, in igc_set_queue_napi()
5097 q_vector->rx.ring->queue_index, in igc_set_queue_napi()
5100 if (q_vector->tx.ring) in igc_set_queue_napi()
5101 netif_queue_set_napi(adapter->netdev, in igc_set_queue_napi()
5102 q_vector->tx.ring->queue_index, in igc_set_queue_napi()
5107 * igc_up - Open the interface and prepare it to handle traffic
5112 struct igc_hw *hw = &adapter->hw; in igc_up()
5119 clear_bit(__IGC_DOWN, &adapter->state); in igc_up()
5121 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_up()
5122 napi = &adapter->q_vector[i]->napi; in igc_up()
5127 if (adapter->msix_entries) in igc_up()
5130 igc_assign_vector(adapter->q_vector[0], 0); in igc_up()
5136 netif_tx_start_all_queues(adapter->netdev); in igc_up()
5139 hw->mac.get_link_status = true; in igc_up()
5140 schedule_work(&adapter->watchdog_task); in igc_up()
5144 * igc_update_stats - Update the board statistics counters
5149 struct rtnl_link_stats64 *net_stats = &adapter->stats64; in igc_update_stats()
5150 struct pci_dev *pdev = adapter->pdev; in igc_update_stats()
5151 struct igc_hw *hw = &adapter->hw; in igc_update_stats()
5161 if (adapter->link_speed == 0) in igc_update_stats()
5170 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_update_stats()
5171 struct igc_ring *ring = adapter->rx_ring[i]; in igc_update_stats()
5174 if (hw->mac.type >= igc_i225) in igc_update_stats()
5178 ring->rx_stats.drops += rqdpc; in igc_update_stats()
5179 net_stats->rx_fifo_errors += rqdpc; in igc_update_stats()
5183 start = u64_stats_fetch_begin(&ring->rx_syncp); in igc_update_stats()
5184 _bytes = ring->rx_stats.bytes; in igc_update_stats()
5185 _packets = ring->rx_stats.packets; in igc_update_stats()
5186 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); in igc_update_stats()
5191 net_stats->rx_bytes = bytes; in igc_update_stats()
5192 net_stats->rx_packets = packets; in igc_update_stats()
5196 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_update_stats()
5197 struct igc_ring *ring = adapter->tx_ring[i]; in igc_update_stats()
5200 start = u64_stats_fetch_begin(&ring->tx_syncp); in igc_update_stats()
5201 _bytes = ring->tx_stats.bytes; in igc_update_stats()
5202 _packets = ring->tx_stats.packets; in igc_update_stats()
5203 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); in igc_update_stats()
5207 net_stats->tx_bytes = bytes; in igc_update_stats()
5208 net_stats->tx_packets = packets; in igc_update_stats()
5212 adapter->stats.crcerrs += rd32(IGC_CRCERRS); in igc_update_stats()
5213 adapter->stats.gprc += rd32(IGC_GPRC); in igc_update_stats()
5214 adapter->stats.gorc += rd32(IGC_GORCL); in igc_update_stats()
5216 adapter->stats.bprc += rd32(IGC_BPRC); in igc_update_stats()
5217 adapter->stats.mprc += rd32(IGC_MPRC); in igc_update_stats()
5218 adapter->stats.roc += rd32(IGC_ROC); in igc_update_stats()
5220 adapter->stats.prc64 += rd32(IGC_PRC64); in igc_update_stats()
5221 adapter->stats.prc127 += rd32(IGC_PRC127); in igc_update_stats()
5222 adapter->stats.prc255 += rd32(IGC_PRC255); in igc_update_stats()
5223 adapter->stats.prc511 += rd32(IGC_PRC511); in igc_update_stats()
5224 adapter->stats.prc1023 += rd32(IGC_PRC1023); in igc_update_stats()
5225 adapter->stats.prc1522 += rd32(IGC_PRC1522); in igc_update_stats()
5226 adapter->stats.tlpic += rd32(IGC_TLPIC); in igc_update_stats()
5227 adapter->stats.rlpic += rd32(IGC_RLPIC); in igc_update_stats()
5228 adapter->stats.hgptc += rd32(IGC_HGPTC); in igc_update_stats()
5231 adapter->stats.mpc += mpc; in igc_update_stats()
5232 net_stats->rx_fifo_errors += mpc; in igc_update_stats()
5233 adapter->stats.scc += rd32(IGC_SCC); in igc_update_stats()
5234 adapter->stats.ecol += rd32(IGC_ECOL); in igc_update_stats()
5235 adapter->stats.mcc += rd32(IGC_MCC); in igc_update_stats()
5236 adapter->stats.latecol += rd32(IGC_LATECOL); in igc_update_stats()
5237 adapter->stats.dc += rd32(IGC_DC); in igc_update_stats()
5238 adapter->stats.rlec += rd32(IGC_RLEC); in igc_update_stats()
5239 adapter->stats.xonrxc += rd32(IGC_XONRXC); in igc_update_stats()
5240 adapter->stats.xontxc += rd32(IGC_XONTXC); in igc_update_stats()
5241 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); in igc_update_stats()
5242 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); in igc_update_stats()
5243 adapter->stats.fcruc += rd32(IGC_FCRUC); in igc_update_stats()
5244 adapter->stats.gptc += rd32(IGC_GPTC); in igc_update_stats()
5245 adapter->stats.gotc += rd32(IGC_GOTCL); in igc_update_stats()
5247 adapter->stats.rnbc += rd32(IGC_RNBC); in igc_update_stats()
5248 adapter->stats.ruc += rd32(IGC_RUC); in igc_update_stats()
5249 adapter->stats.rfc += rd32(IGC_RFC); in igc_update_stats()
5250 adapter->stats.rjc += rd32(IGC_RJC); in igc_update_stats()
5251 adapter->stats.tor += rd32(IGC_TORH); in igc_update_stats()
5252 adapter->stats.tot += rd32(IGC_TOTH); in igc_update_stats()
5253 adapter->stats.tpr += rd32(IGC_TPR); in igc_update_stats()
5255 adapter->stats.ptc64 += rd32(IGC_PTC64); in igc_update_stats()
5256 adapter->stats.ptc127 += rd32(IGC_PTC127); in igc_update_stats()
5257 adapter->stats.ptc255 += rd32(IGC_PTC255); in igc_update_stats()
5258 adapter->stats.ptc511 += rd32(IGC_PTC511); in igc_update_stats()
5259 adapter->stats.ptc1023 += rd32(IGC_PTC1023); in igc_update_stats()
5260 adapter->stats.ptc1522 += rd32(IGC_PTC1522); in igc_update_stats()
5262 adapter->stats.mptc += rd32(IGC_MPTC); in igc_update_stats()
5263 adapter->stats.bptc += rd32(IGC_BPTC); in igc_update_stats()
5265 adapter->stats.tpt += rd32(IGC_TPT); in igc_update_stats()
5266 adapter->stats.colc += rd32(IGC_COLC); in igc_update_stats()
5267 adapter->stats.colc += rd32(IGC_RERC); in igc_update_stats()
5269 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); in igc_update_stats()
5271 adapter->stats.tsctc += rd32(IGC_TSCTC); in igc_update_stats()
5273 adapter->stats.iac += rd32(IGC_IAC); in igc_update_stats()
5276 net_stats->multicast = adapter->stats.mprc; in igc_update_stats()
5277 net_stats->collisions = adapter->stats.colc; in igc_update_stats()
5284 net_stats->rx_errors = adapter->stats.rxerrc + in igc_update_stats()
5285 adapter->stats.crcerrs + adapter->stats.algnerrc + in igc_update_stats()
5286 adapter->stats.ruc + adapter->stats.roc + in igc_update_stats()
5287 adapter->stats.cexterr; in igc_update_stats()
5288 net_stats->rx_length_errors = adapter->stats.ruc + in igc_update_stats()
5289 adapter->stats.roc; in igc_update_stats()
5290 net_stats->rx_crc_errors = adapter->stats.crcerrs; in igc_update_stats()
5291 net_stats->rx_frame_errors = adapter->stats.algnerrc; in igc_update_stats()
5292 net_stats->rx_missed_errors = adapter->stats.mpc; in igc_update_stats()
5295 net_stats->tx_errors = adapter->stats.ecol + in igc_update_stats()
5296 adapter->stats.latecol; in igc_update_stats()
5297 net_stats->tx_aborted_errors = adapter->stats.ecol; in igc_update_stats()
5298 net_stats->tx_window_errors = adapter->stats.latecol; in igc_update_stats()
5299 net_stats->tx_carrier_errors = adapter->stats.tncrs; in igc_update_stats()
5302 net_stats->tx_dropped = adapter->stats.txdrop; in igc_update_stats()
5305 adapter->stats.mgptc += rd32(IGC_MGTPTC); in igc_update_stats()
5306 adapter->stats.mgprc += rd32(IGC_MGTPRC); in igc_update_stats()
5307 adapter->stats.mgpdc += rd32(IGC_MGTPDC); in igc_update_stats()
5311 * igc_down - Close the interface
5316 struct net_device *netdev = adapter->netdev; in igc_down()
5317 struct igc_hw *hw = &adapter->hw; in igc_down()
5321 set_bit(__IGC_DOWN, &adapter->state); in igc_down()
5325 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5337 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5349 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_down()
5351 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_down()
5352 if (adapter->q_vector[i]) { in igc_down()
5353 napi_synchronize(&adapter->q_vector[i]->napi); in igc_down()
5355 napi_disable(&adapter->q_vector[i]->napi); in igc_down()
5359 timer_delete_sync(&adapter->watchdog_timer); in igc_down()
5360 timer_delete_sync(&adapter->phy_info_timer); in igc_down()
5363 spin_lock(&adapter->stats64_lock); in igc_down()
5365 spin_unlock(&adapter->stats64_lock); in igc_down()
5367 adapter->link_speed = 0; in igc_down()
5368 adapter->link_duplex = 0; in igc_down()
5370 if (!pci_channel_offline(adapter->pdev)) in igc_down()
5374 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; in igc_down()
5380 if (adapter->fpe.mmsv.pmac_enabled) in igc_down()
5381 ethtool_mmsv_stop(&adapter->fpe.mmsv); in igc_down()
5386 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_reinit_locked()
5390 clear_bit(__IGC_RESETTING, &adapter->state); in igc_reinit_locked()
5401 if (test_bit(__IGC_DOWN, &adapter->state) || in igc_reset_task()
5402 test_bit(__IGC_RESETTING, &adapter->state)) { in igc_reset_task()
5409 netdev_err(adapter->netdev, "Reset adapter\n"); in igc_reset_task()
5415 * igc_change_mtu - Change the Maximum Transfer Unit
5428 return -EINVAL; in igc_change_mtu()
5435 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_change_mtu()
5439 adapter->max_frame_size = max_frame; in igc_change_mtu()
5444 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); in igc_change_mtu()
5445 WRITE_ONCE(netdev->mtu, new_mtu); in igc_change_mtu()
5452 clear_bit(__IGC_RESETTING, &adapter->state); in igc_change_mtu()
5458 * igc_tx_timeout - Respond to a Tx Hang
5466 struct igc_hw *hw = &adapter->hw; in igc_tx_timeout()
5469 adapter->tx_timeout_count++; in igc_tx_timeout()
5470 schedule_work(&adapter->reset_task); in igc_tx_timeout()
5472 (adapter->eims_enable_mask & ~adapter->eims_other)); in igc_tx_timeout()
5476 * igc_get_stats64 - Get System Network Statistics
5488 spin_lock(&adapter->stats64_lock); in igc_get_stats64()
5489 if (!test_bit(__IGC_RESETTING, &adapter->state)) in igc_get_stats64()
5491 memcpy(stats, &adapter->stats64, sizeof(*stats)); in igc_get_stats64()
5492 spin_unlock(&adapter->stats64_lock); in igc_get_stats64()
5512 netdev_features_t changed = netdev->features ^ features; in igc_set_features()
5525 netdev->features = features; in igc_set_features()
5550 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in igc_features_check()
5560 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in igc_features_check()
5568 struct igc_hw *hw = &adapter->hw; in igc_tsync_interrupt()
5577 if (adapter->ptp_caps.pps) in igc_tsync_interrupt()
5578 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5587 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5588 ts = timespec64_add(adapter->perout[0].start, in igc_tsync_interrupt()
5589 adapter->perout[0].period); in igc_tsync_interrupt()
5595 adapter->perout[0].start = ts; in igc_tsync_interrupt()
5596 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5600 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5601 ts = timespec64_add(adapter->perout[1].start, in igc_tsync_interrupt()
5602 adapter->perout[1].period); in igc_tsync_interrupt()
5608 adapter->perout[1].start = ts; in igc_tsync_interrupt()
5609 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5618 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5627 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5632 * igc_msix_other - msix other interrupt handler
5639 struct igc_hw *hw = &adapter->hw; in igc_msix_other()
5644 schedule_work(&adapter->reset_task); in igc_msix_other()
5648 adapter->stats.doosync++; in igc_msix_other()
5652 hw->mac.get_link_status = true; in igc_msix_other()
5654 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_msix_other()
5655 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_msix_other()
5661 wr32(IGC_EIMS, adapter->eims_other); in igc_msix_other()
5668 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; in igc_write_itr()
5670 if (!q_vector->set_itr) in igc_write_itr()
5678 writel(itr_val, q_vector->itr_register); in igc_write_itr()
5679 q_vector->set_itr = 0; in igc_write_itr()
5689 napi_schedule(&q_vector->napi); in igc_msix_ring()
5695 * igc_request_msix - Initialize MSI-X interrupts
5698 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5703 unsigned int num_q_vectors = adapter->num_q_vectors; in igc_request_msix()
5705 struct net_device *netdev = adapter->netdev; in igc_request_msix()
5707 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5708 &igc_msix_other, 0, netdev->name, adapter); in igc_request_msix()
5714 dev_warn(&adapter->pdev->dev, in igc_request_msix()
5716 adapter->num_q_vectors, MAX_Q_VECTORS); in igc_request_msix()
5719 struct igc_q_vector *q_vector = adapter->q_vector[i]; in igc_request_msix()
5723 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); in igc_request_msix()
5725 if (q_vector->rx.ring && q_vector->tx.ring) in igc_request_msix()
5726 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, in igc_request_msix()
5727 q_vector->rx.ring->queue_index); in igc_request_msix()
5728 else if (q_vector->tx.ring) in igc_request_msix()
5729 sprintf(q_vector->name, "%s-tx-%u", netdev->name, in igc_request_msix()
5730 q_vector->tx.ring->queue_index); in igc_request_msix()
5731 else if (q_vector->rx.ring) in igc_request_msix()
5732 sprintf(q_vector->name, "%s-rx-%u", netdev->name, in igc_request_msix()
5733 q_vector->rx.ring->queue_index); in igc_request_msix()
5735 sprintf(q_vector->name, "%s-unused", netdev->name); in igc_request_msix()
5737 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5738 igc_msix_ring, 0, q_vector->name, in igc_request_msix()
5743 netif_napi_set_irq(&q_vector->napi, in igc_request_msix()
5744 adapter->msix_entries[vector].vector); in igc_request_msix()
5752 free_irq(adapter->msix_entries[free_vector++].vector, adapter); in igc_request_msix()
5754 vector--; in igc_request_msix()
5756 free_irq(adapter->msix_entries[free_vector++].vector, in igc_request_msix()
5757 adapter->q_vector[i]); in igc_request_msix()
5764 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5768 * MSI-X interrupts allocated.
5784 igc_get_phy_info(&adapter->hw); in igc_update_phy_info()
5788 * igc_has_link - check shared code for link and determine up/down
5793 struct igc_hw *hw = &adapter->hw; in igc_has_link()
5801 if (!hw->mac.get_link_status) in igc_has_link()
5803 hw->mac.ops.check_for_link(hw); in igc_has_link()
5804 link_active = !hw->mac.get_link_status; in igc_has_link()
5806 if (hw->mac.type == igc_i225) { in igc_has_link()
5807 if (!netif_carrier_ok(adapter->netdev)) { in igc_has_link()
5808 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5809 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { in igc_has_link()
5810 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5811 adapter->link_check_timeout = jiffies; in igc_has_link()
5819 * igc_watchdog - Timer Call-back
5827 schedule_work(&adapter->watchdog_task); in igc_watchdog()
5835 struct net_device *netdev = adapter->netdev; in igc_watchdog_task()
5836 struct igc_hw *hw = &adapter->hw; in igc_watchdog_task()
5837 struct igc_phy_info *phy = &hw->phy; in igc_watchdog_task()
5844 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { in igc_watchdog_task()
5845 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) in igc_watchdog_task()
5846 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_watchdog_task()
5853 pm_runtime_resume(netdev->dev.parent); in igc_watchdog_task()
5858 hw->mac.ops.get_speed_and_duplex(hw, in igc_watchdog_task()
5859 &adapter->link_speed, in igc_watchdog_task()
5860 &adapter->link_duplex); in igc_watchdog_task()
5866 adapter->link_speed, in igc_watchdog_task()
5867 adapter->link_duplex == FULL_DUPLEX ? in igc_watchdog_task()
5875 if ((adapter->flags & IGC_FLAG_EEE) && in igc_watchdog_task()
5876 adapter->link_duplex == HALF_DUPLEX) { in igc_watchdog_task()
5878 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); in igc_watchdog_task()
5879 adapter->hw.dev_spec._base.eee_enable = false; in igc_watchdog_task()
5880 adapter->flags &= ~IGC_FLAG_EEE; in igc_watchdog_task()
5885 if (phy->speed_downgraded) in igc_watchdog_task()
5889 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5890 switch (adapter->link_speed) { in igc_watchdog_task()
5892 adapter->tx_timeout_factor = 14; in igc_watchdog_task()
5897 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5903 * based on link-up activity. Write into the register in igc_watchdog_task()
5908 if (adapter->fpe.mmsv.pmac_enabled) in igc_watchdog_task()
5909 ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv, in igc_watchdog_task()
5912 if (adapter->link_speed != SPEED_1000) in igc_watchdog_task()
5922 retry_count--; in igc_watchdog_task()
5928 netdev_err(netdev, "read 1000Base-T Status Reg\n"); in igc_watchdog_task()
5934 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5935 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5940 adapter->link_speed = 0; in igc_watchdog_task()
5941 adapter->link_duplex = 0; in igc_watchdog_task()
5947 if (adapter->fpe.mmsv.pmac_enabled) in igc_watchdog_task()
5948 ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv, in igc_watchdog_task()
5952 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5953 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5956 pm_schedule_suspend(netdev->dev.parent, in igc_watchdog_task()
5961 spin_lock(&adapter->stats64_lock); in igc_watchdog_task()
5963 spin_unlock(&adapter->stats64_lock); in igc_watchdog_task()
5965 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_watchdog_task()
5966 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_watchdog_task()
5974 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { in igc_watchdog_task()
5975 adapter->tx_timeout_count++; in igc_watchdog_task()
5976 schedule_work(&adapter->reset_task); in igc_watchdog_task()
5983 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_watchdog_task()
5987 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_watchdog_task()
5990 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_watchdog_task()
5991 struct igc_q_vector *q_vector = adapter->q_vector[i]; in igc_watchdog_task()
5994 if (!q_vector->rx.ring) in igc_watchdog_task()
5997 rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index]; in igc_watchdog_task()
5999 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igc_watchdog_task()
6000 eics |= q_vector->eims_value; in igc_watchdog_task()
6001 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_watchdog_task()
6007 struct igc_ring *rx_ring = adapter->rx_ring[0]; in igc_watchdog_task()
6009 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igc_watchdog_task()
6010 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_watchdog_task()
6018 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_watchdog_task()
6019 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) in igc_watchdog_task()
6020 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
6023 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
6029 * igc_intr_msi - Interrupt Handler
6036 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr_msi()
6037 struct igc_hw *hw = &adapter->hw; in igc_intr_msi()
6044 schedule_work(&adapter->reset_task); in igc_intr_msi()
6048 adapter->stats.doosync++; in igc_intr_msi()
6052 hw->mac.get_link_status = true; in igc_intr_msi()
6053 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr_msi()
6054 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr_msi()
6060 napi_schedule(&q_vector->napi); in igc_intr_msi()
6066 * igc_intr - Legacy Interrupt Handler
6073 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr()
6074 struct igc_hw *hw = &adapter->hw; in igc_intr()
6075 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No in igc_intr()
6080 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in igc_intr()
6089 schedule_work(&adapter->reset_task); in igc_intr()
6093 adapter->stats.doosync++; in igc_intr()
6097 hw->mac.get_link_status = true; in igc_intr()
6099 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr()
6100 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr()
6106 napi_schedule(&q_vector->napi); in igc_intr()
6113 if (adapter->msix_entries) { in igc_free_irq()
6116 free_irq(adapter->msix_entries[vector++].vector, adapter); in igc_free_irq()
6118 for (i = 0; i < adapter->num_q_vectors; i++) in igc_free_irq()
6119 free_irq(adapter->msix_entries[vector++].vector, in igc_free_irq()
6120 adapter->q_vector[i]); in igc_free_irq()
6122 free_irq(adapter->pdev->irq, adapter); in igc_free_irq()
6127 * igc_request_irq - initialize interrupts
6135 struct net_device *netdev = adapter->netdev; in igc_request_irq()
6136 struct pci_dev *pdev = adapter->pdev; in igc_request_irq()
6139 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_request_irq()
6156 igc_assign_vector(adapter->q_vector[0], 0); in igc_request_irq()
6158 if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_request_irq()
6159 err = request_irq(pdev->irq, &igc_intr_msi, 0, in igc_request_irq()
6160 netdev->name, adapter); in igc_request_irq()
6166 adapter->flags &= ~IGC_FLAG_HAS_MSI; in igc_request_irq()
6169 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, in igc_request_irq()
6170 netdev->name, adapter); in igc_request_irq()
6180 * __igc_open - Called when a network interface is made active
6195 struct pci_dev *pdev = adapter->pdev; in __igc_open()
6196 struct igc_hw *hw = &adapter->hw; in __igc_open()
6203 if (test_bit(__IGC_TESTING, &adapter->state)) { in __igc_open()
6205 return -EBUSY; in __igc_open()
6209 pm_runtime_get_sync(&pdev->dev); in __igc_open()
6231 clear_bit(__IGC_DOWN, &adapter->state); in __igc_open()
6233 for (i = 0; i < adapter->num_q_vectors; i++) { in __igc_open()
6234 napi = &adapter->q_vector[i]->napi; in __igc_open()
6244 pm_runtime_put(&pdev->dev); in __igc_open()
6249 hw->mac.get_link_status = true; in __igc_open()
6250 schedule_work(&adapter->watchdog_task); in __igc_open()
6256 igc_power_down_phy_copper_base(&adapter->hw); in __igc_open()
6263 pm_runtime_put(&pdev->dev); in __igc_open()
6274 err = netif_set_real_num_queues(netdev, adapter->num_tx_queues, in igc_open()
6275 adapter->num_rx_queues); in igc_open()
6285 * __igc_close - Disables a network interface
6291 * The close entry point is called when an interface is de-activated
6299 struct pci_dev *pdev = adapter->pdev; in __igc_close()
6301 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); in __igc_close()
6304 pm_runtime_get_sync(&pdev->dev); in __igc_close()
6316 pm_runtime_put_sync(&pdev->dev); in __igc_close()
6323 if (netif_device_present(netdev) || netdev->dismantle) in igc_close()
6333 if (queue < 0 || queue >= adapter->num_tx_queues) in igc_save_launchtime_params()
6334 return -EINVAL; in igc_save_launchtime_params()
6336 ring = adapter->tx_ring[queue]; in igc_save_launchtime_params()
6337 ring->launchtime_enable = enable; in igc_save_launchtime_params()
6355 struct igc_hw *hw = &adapter->hw; in validate_schedule()
6359 if (qopt->cycle_time_extension) in validate_schedule()
6370 if (!is_base_time_past(qopt->base_time, &now) && in validate_schedule()
6374 for (n = 0; n < qopt->num_entries; n++) { in validate_schedule()
6378 prev = n ? &qopt->entries[n - 1] : NULL; in validate_schedule()
6379 e = &qopt->entries[n]; in validate_schedule()
6384 if (e->command != TC_TAPRIO_CMD_SET_GATES) in validate_schedule()
6387 for (i = 0; i < adapter->num_tx_queues; i++) in validate_schedule()
6388 if (e->gate_mask & BIT(i)) { in validate_schedule()
6396 !(prev->gate_mask & BIT(i))) in validate_schedule()
6407 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_launchtime()
6410 if (hw->mac.type != igc_i225) in igc_tsn_enable_launchtime()
6411 return -EOPNOTSUPP; in igc_tsn_enable_launchtime()
6413 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); in igc_tsn_enable_launchtime()
6425 adapter->base_time = 0; in igc_qbv_clear_schedule()
6426 adapter->cycle_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6427 adapter->taprio_offload_enable = false; in igc_qbv_clear_schedule()
6428 adapter->qbv_config_change_errors = 0; in igc_qbv_clear_schedule()
6429 adapter->qbv_count = 0; in igc_qbv_clear_schedule()
6431 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6432 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6434 ring->start_time = 0; in igc_qbv_clear_schedule()
6435 ring->end_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6436 ring->max_sdu = 0; in igc_qbv_clear_schedule()
6437 ring->preemptible = false; in igc_qbv_clear_schedule()
6440 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6442 adapter->qbv_transition = false; in igc_qbv_clear_schedule()
6444 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6445 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6447 ring->oper_gate_closed = false; in igc_qbv_clear_schedule()
6448 ring->admin_gate_closed = false; in igc_qbv_clear_schedule()
6451 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6469 stats->tx_overruns = 0; in igc_taprio_stats()
6475 struct tc_taprio_qopt_stats *stats = &queue_stats->stats; in igc_taprio_queue_stats()
6480 stats->tx_overruns = 0; in igc_taprio_queue_stats()
6487 struct igc_hw *hw = &adapter->hw; in igc_save_qbv_schedule()
6494 if (qopt->base_time < 0) in igc_save_qbv_schedule()
6495 return -ERANGE; in igc_save_qbv_schedule()
6497 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) in igc_save_qbv_schedule()
6498 return -EALREADY; in igc_save_qbv_schedule()
6501 return -EINVAL; in igc_save_qbv_schedule()
6503 if (qopt->mqprio.preemptible_tcs && in igc_save_qbv_schedule()
6504 !(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) { in igc_save_qbv_schedule()
6505 NL_SET_ERR_MSG_MOD(qopt->extack, in igc_save_qbv_schedule()
6506 "reverse-tsn-txq-prio private flag must be enabled before setting preemptible tc"); in igc_save_qbv_schedule()
6507 return -ENODEV; in igc_save_qbv_schedule()
6513 is_base_time_past(qopt->base_time, &now)) in igc_save_qbv_schedule()
6514 adapter->qbv_config_change_errors++; in igc_save_qbv_schedule()
6516 adapter->cycle_time = qopt->cycle_time; in igc_save_qbv_schedule()
6517 adapter->base_time = qopt->base_time; in igc_save_qbv_schedule()
6518 adapter->taprio_offload_enable = true; in igc_save_qbv_schedule()
6520 for (n = 0; n < qopt->num_entries; n++) { in igc_save_qbv_schedule()
6521 struct tc_taprio_sched_entry *e = &qopt->entries[n]; in igc_save_qbv_schedule()
6523 end_time += e->interval; in igc_save_qbv_schedule()
6531 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, in igc_save_qbv_schedule()
6536 if (end_time > adapter->cycle_time || in igc_save_qbv_schedule()
6537 n + 1 == qopt->num_entries) in igc_save_qbv_schedule()
6538 end_time = adapter->cycle_time; in igc_save_qbv_schedule()
6540 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6541 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6543 if (!(e->gate_mask & BIT(i))) in igc_save_qbv_schedule()
6551 ring->start_time = start_time; in igc_save_qbv_schedule()
6552 ring->end_time = end_time; in igc_save_qbv_schedule()
6554 if (ring->start_time >= adapter->cycle_time) in igc_save_qbv_schedule()
6560 start_time += e->interval; in igc_save_qbv_schedule()
6563 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6568 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6569 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6571 if (!is_base_time_past(qopt->base_time, &now)) { in igc_save_qbv_schedule()
6572 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6574 ring->oper_gate_closed = false; in igc_save_qbv_schedule()
6575 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6579 if (!is_base_time_past(qopt->base_time, &now)) in igc_save_qbv_schedule()
6580 ring->admin_gate_closed = true; in igc_save_qbv_schedule()
6582 ring->oper_gate_closed = true; in igc_save_qbv_schedule()
6584 ring->start_time = end_time; in igc_save_qbv_schedule()
6585 ring->end_time = end_time; in igc_save_qbv_schedule()
6589 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6591 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6592 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6593 struct net_device *dev = adapter->netdev; in igc_save_qbv_schedule()
6595 if (qopt->max_sdu[i]) in igc_save_qbv_schedule()
6596 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; in igc_save_qbv_schedule()
6598 ring->max_sdu = 0; in igc_save_qbv_schedule()
6601 igc_fpe_save_preempt_queue(adapter, &qopt->mqprio); in igc_save_qbv_schedule()
6609 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_qbv_scheduling()
6612 if (hw->mac.type != igc_i225) in igc_tsn_enable_qbv_scheduling()
6613 return -EOPNOTSUPP; in igc_tsn_enable_qbv_scheduling()
6615 switch (qopt->cmd) { in igc_tsn_enable_qbv_scheduling()
6623 igc_taprio_stats(adapter->netdev, &qopt->stats); in igc_tsn_enable_qbv_scheduling()
6626 igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); in igc_tsn_enable_qbv_scheduling()
6629 return -EOPNOTSUPP; in igc_tsn_enable_qbv_scheduling()
6643 struct net_device *netdev = adapter->netdev; in igc_save_cbs_params()
6647 /* i225 has two sets of credit-based shaper logic. in igc_save_cbs_params()
6651 return -EINVAL; in igc_save_cbs_params()
6653 ring = adapter->tx_ring[queue]; in igc_save_cbs_params()
6656 if (adapter->tx_ring[i]) in igc_save_cbs_params()
6657 cbs_status[i] = adapter->tx_ring[i]->cbs_enable; in igc_save_cbs_params()
6666 return -EINVAL; in igc_save_cbs_params()
6672 return -EINVAL; in igc_save_cbs_params()
6676 ring->cbs_enable = enable; in igc_save_cbs_params()
6677 ring->idleslope = idleslope; in igc_save_cbs_params()
6678 ring->sendslope = sendslope; in igc_save_cbs_params()
6679 ring->hicredit = hicredit; in igc_save_cbs_params()
6680 ring->locredit = locredit; in igc_save_cbs_params()
6688 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_cbs()
6691 if (hw->mac.type != igc_i225) in igc_tsn_enable_cbs()
6692 return -EOPNOTSUPP; in igc_tsn_enable_cbs()
6694 if (qopt->queue < 0 || qopt->queue > 1) in igc_tsn_enable_cbs()
6695 return -EINVAL; in igc_tsn_enable_cbs()
6697 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, in igc_tsn_enable_cbs()
6698 qopt->idleslope, qopt->sendslope, in igc_tsn_enable_cbs()
6699 qopt->hicredit, qopt->locredit); in igc_tsn_enable_cbs()
6709 struct igc_hw *hw = &adapter->hw; in igc_tc_query_caps()
6711 switch (base->type) { in igc_tc_query_caps()
6713 struct tc_mqprio_caps *caps = base->caps; in igc_tc_query_caps()
6715 caps->validate_queue_counts = true; in igc_tc_query_caps()
6720 struct tc_taprio_caps *caps = base->caps; in igc_tc_query_caps()
6722 if (!(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) in igc_tc_query_caps()
6723 caps->broken_mqprio = true; in igc_tc_query_caps()
6725 if (hw->mac.type == igc_i225) { in igc_tc_query_caps()
6726 caps->supports_queue_max_sdu = true; in igc_tc_query_caps()
6727 caps->gate_mask_per_txq = true; in igc_tc_query_caps()
6733 return -EOPNOTSUPP; in igc_tc_query_caps()
6742 adapter->strict_priority_enable = true; in igc_save_mqprio_params()
6743 adapter->num_tc = num_tc; in igc_save_mqprio_params()
6746 adapter->queue_per_tc[i] = offset[i]; in igc_save_mqprio_params()
6752 int num_tc = mqprio->qopt.num_tc; in igc_tsn_is_tc_to_queue_priority_ordered()
6756 if (mqprio->qopt.offset[i - 1] > mqprio->qopt.offset[i]) in igc_tsn_is_tc_to_queue_priority_ordered()
6766 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_mqprio()
6769 if (hw->mac.type != igc_i225) in igc_tsn_enable_mqprio()
6770 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6772 if (!mqprio->qopt.num_tc) { in igc_tsn_enable_mqprio()
6773 adapter->strict_priority_enable = false; in igc_tsn_enable_mqprio()
6775 netdev_reset_tc(adapter->netdev); in igc_tsn_enable_mqprio()
6780 if (mqprio->qopt.num_tc != adapter->num_tx_queues) { in igc_tsn_enable_mqprio()
6781 NL_SET_ERR_MSG_FMT_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6783 adapter->num_tx_queues); in igc_tsn_enable_mqprio()
6784 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6788 for (i = 0; i < mqprio->qopt.num_tc; i++) { in igc_tsn_enable_mqprio()
6789 if (mqprio->qopt.count[i] != 1) { in igc_tsn_enable_mqprio()
6790 NL_SET_ERR_MSG_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6792 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6797 NL_SET_ERR_MSG_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6798 "tc to queue mapping must preserve increasing priority (higher tc -> higher queue)"); in igc_tsn_enable_mqprio()
6799 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6802 igc_save_mqprio_params(adapter, mqprio->qopt.num_tc, in igc_tsn_enable_mqprio()
6803 mqprio->qopt.offset); in igc_tsn_enable_mqprio()
6805 err = netdev_set_num_tc(adapter->netdev, adapter->num_tc); in igc_tsn_enable_mqprio()
6809 for (i = 0; i < adapter->num_tc; i++) { in igc_tsn_enable_mqprio()
6810 err = netdev_set_tc_queue(adapter->netdev, i, 1, in igc_tsn_enable_mqprio()
6811 adapter->queue_per_tc[i]); in igc_tsn_enable_mqprio()
6818 adapter->queue_per_tc[i] = i; in igc_tsn_enable_mqprio()
6820 mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; in igc_tsn_enable_mqprio()
6832 adapter->tc_setup_type = type; in igc_setup_tc()
6850 return -EOPNOTSUPP; in igc_setup_tc()
6858 switch (bpf->command) { in igc_bpf()
6860 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); in igc_bpf()
6862 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, in igc_bpf()
6863 bpf->xsk.queue_id); in igc_bpf()
6865 return -EOPNOTSUPP; in igc_bpf()
6879 return -ENETDOWN; in igc_xdp_xmit()
6882 return -EINVAL; in igc_xdp_xmit()
6914 struct igc_hw *hw = &adapter->hw; in igc_trigger_rxtxq_interrupt()
6917 eics |= q_vector->eims_value; in igc_trigger_rxtxq_interrupt()
6927 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_xsk_wakeup()
6928 return -ENETDOWN; in igc_xsk_wakeup()
6931 return -ENXIO; in igc_xsk_wakeup()
6933 if (queue_id >= adapter->num_rx_queues) in igc_xsk_wakeup()
6934 return -EINVAL; in igc_xsk_wakeup()
6936 ring = adapter->rx_ring[queue_id]; in igc_xsk_wakeup()
6938 if (!ring->xsk_pool) in igc_xsk_wakeup()
6939 return -ENXIO; in igc_xsk_wakeup()
6941 q_vector = adapter->q_vector[queue_id]; in igc_xsk_wakeup()
6942 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) in igc_xsk_wakeup()
6956 tstamp = hwtstamps->netdev_data; in igc_get_tstamp()
6959 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1); in igc_get_tstamp()
6961 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_get_tstamp()
6990 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); in igc_rd32()
7000 struct net_device *netdev = igc->netdev; in igc_rd32()
7002 hw->hw_addr = NULL; in igc_rd32()
7005 WARN(pci_device_is_present(igc->pdev), in igc_rd32()
7025 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
7037 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) in igc_xdp_rx_hash()
7038 return -ENODATA; in igc_xdp_rx_hash()
7040 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); in igc_xdp_rx_hash()
7041 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; in igc_xdp_rx_hash()
7049 struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev); in igc_xdp_rx_timestamp()
7050 struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; in igc_xdp_rx_timestamp()
7052 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { in igc_xdp_rx_timestamp()
7053 *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_xdp_rx_timestamp()
7058 return -ENODATA; in igc_xdp_rx_timestamp()
7073 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
7075 adapter->qbv_transition = true; in igc_qbv_scheduling_timer()
7076 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_scheduling_timer()
7077 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_qbv_scheduling_timer()
7079 if (tx_ring->admin_gate_closed) { in igc_qbv_scheduling_timer()
7080 tx_ring->admin_gate_closed = false; in igc_qbv_scheduling_timer()
7081 tx_ring->oper_gate_closed = true; in igc_qbv_scheduling_timer()
7083 tx_ring->oper_gate_closed = false; in igc_qbv_scheduling_timer()
7086 adapter->qbv_transition = false; in igc_qbv_scheduling_timer()
7088 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
7094 * igc_probe - Device Initialization Routine
7110 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; in igc_probe()
7117 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in igc_probe()
7119 dev_err(&pdev->dev, in igc_probe()
7130 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); in igc_probe()
7134 err = -ENOMEM; in igc_probe()
7141 SET_NETDEV_DEV(netdev, &pdev->dev); in igc_probe()
7145 adapter->netdev = netdev; in igc_probe()
7146 adapter->pdev = pdev; in igc_probe()
7147 hw = &adapter->hw; in igc_probe()
7148 hw->back = adapter; in igc_probe()
7149 adapter->port_num = hw->bus.func; in igc_probe()
7150 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igc_probe()
7153 hw->vendor_id = pdev->vendor; in igc_probe()
7154 hw->device_id = pdev->device; in igc_probe()
7155 hw->revision_id = pdev->revision; in igc_probe()
7156 hw->subsystem_vendor_id = pdev->subsystem_vendor; in igc_probe()
7157 hw->subsystem_device_id = pdev->subsystem_device; in igc_probe()
7167 err = -EIO; in igc_probe()
7168 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), in igc_probe()
7170 if (!adapter->io_addr) in igc_probe()
7173 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ in igc_probe()
7174 hw->hw_addr = adapter->io_addr; in igc_probe()
7176 netdev->netdev_ops = &igc_netdev_ops; in igc_probe()
7177 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; in igc_probe()
7178 netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops; in igc_probe()
7180 netdev->watchdog_timeo = 5 * HZ; in igc_probe()
7182 netdev->mem_start = pci_resource_start(pdev, 0); in igc_probe()
7183 netdev->mem_end = pci_resource_end(pdev, 0); in igc_probe()
7186 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in igc_probe()
7187 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in igc_probe()
7189 /* Initialize skew-specific constants */ in igc_probe()
7190 err = ei->get_invariants(hw); in igc_probe()
7195 netdev->features |= NETIF_F_SG; in igc_probe()
7196 netdev->features |= NETIF_F_TSO; in igc_probe()
7197 netdev->features |= NETIF_F_TSO6; in igc_probe()
7198 netdev->features |= NETIF_F_TSO_ECN; in igc_probe()
7199 netdev->features |= NETIF_F_RXHASH; in igc_probe()
7200 netdev->features |= NETIF_F_RXCSUM; in igc_probe()
7201 netdev->features |= NETIF_F_HW_CSUM; in igc_probe()
7202 netdev->features |= NETIF_F_SCTP_CRC; in igc_probe()
7203 netdev->features |= NETIF_F_HW_TC; in igc_probe()
7212 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; in igc_probe()
7213 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; in igc_probe()
7221 netdev->hw_features |= NETIF_F_NTUPLE; in igc_probe()
7222 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; in igc_probe()
7223 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in igc_probe()
7224 netdev->hw_features |= netdev->features; in igc_probe()
7226 netdev->features |= NETIF_F_HIGHDMA; in igc_probe()
7228 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in igc_probe()
7229 netdev->mpls_features |= NETIF_F_HW_CSUM; in igc_probe()
7230 netdev->hw_enc_features |= netdev->vlan_features; in igc_probe()
7232 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in igc_probe()
7236 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; in igc_probe()
7238 /* MTU range: 68 - 9216 */ in igc_probe()
7239 netdev->min_mtu = ETH_MIN_MTU; in igc_probe()
7240 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; in igc_probe()
7245 hw->mac.ops.reset_hw(hw); in igc_probe()
7248 if (hw->nvm.ops.validate(hw) < 0) { in igc_probe()
7249 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in igc_probe()
7250 err = -EIO; in igc_probe()
7255 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { in igc_probe()
7257 if (hw->mac.ops.read_mac_addr(hw)) in igc_probe()
7258 dev_err(&pdev->dev, "NVM Read Error\n"); in igc_probe()
7261 eth_hw_addr_set(netdev, hw->mac.addr); in igc_probe()
7263 if (!is_valid_ether_addr(netdev->dev_addr)) { in igc_probe()
7264 dev_err(&pdev->dev, "Invalid MAC Address\n"); in igc_probe()
7265 err = -EIO; in igc_probe()
7273 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); in igc_probe()
7274 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); in igc_probe()
7276 INIT_WORK(&adapter->reset_task, igc_reset_task); in igc_probe()
7277 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); in igc_probe()
7279 hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC, in igc_probe()
7282 /* Initialize link properties that are user-changeable */ in igc_probe()
7283 adapter->fc_autoneg = true; in igc_probe()
7284 hw->phy.autoneg_advertised = 0xaf; in igc_probe()
7286 hw->fc.requested_mode = igc_fc_default; in igc_probe()
7287 hw->fc.current_mode = igc_fc_default; in igc_probe()
7290 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; in igc_probe()
7293 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) in igc_probe()
7294 adapter->wol |= IGC_WUFC_MAG; in igc_probe()
7296 device_set_wakeup_enable(&adapter->pdev->dev, in igc_probe()
7297 adapter->flags & IGC_FLAG_WOL_SUPPORTED); in igc_probe()
7313 strscpy(netdev->name, "eth%d", sizeof(netdev->name)); in igc_probe()
7322 adapter->ei = *ei; in igc_probe()
7326 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); in igc_probe()
7328 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); in igc_probe()
7330 hw->dev_spec._base.eee_enable = false; in igc_probe()
7331 adapter->flags &= ~IGC_FLAG_EEE; in igc_probe()
7334 pm_runtime_put_noidle(&pdev->dev); in igc_probe()
7342 adapter->leds_available = false; in igc_probe()
7344 adapter->leds_available = true; in igc_probe()
7358 iounmap(adapter->io_addr); in igc_probe()
7370 * igc_remove - Device Removal Routine
7375 * Hot-Plug event, or because the driver is going to be removed from
7383 pm_runtime_get_noresume(&pdev->dev); in igc_remove()
7392 set_bit(__IGC_DOWN, &adapter->state); in igc_remove()
7394 timer_delete_sync(&adapter->watchdog_timer); in igc_remove()
7395 timer_delete_sync(&adapter->phy_info_timer); in igc_remove()
7397 cancel_work_sync(&adapter->reset_task); in igc_remove()
7398 cancel_work_sync(&adapter->watchdog_task); in igc_remove()
7399 hrtimer_cancel(&adapter->hrtimer); in igc_remove()
7401 if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available) in igc_remove()
7411 pci_iounmap(pdev, adapter->io_addr); in igc_remove()
7424 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; in __igc_shutdown()
7425 struct igc_hw *hw = &adapter->hw; in __igc_shutdown()
7448 /* turn on all-multi mode if wake on multicast is enabled */ in __igc_shutdown()
7469 wake = wufc || adapter->en_mng_pt; in __igc_shutdown()
7471 igc_power_down_phy_copper_base(&adapter->hw); in __igc_shutdown()
7496 struct igc_hw *hw = &adapter->hw; in igc_deliver_wake_packet()
7514 /* Ensure reads are 32-bit aligned */ in igc_deliver_wake_packet()
7517 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); in igc_deliver_wake_packet()
7519 skb->protocol = eth_type_trans(skb, netdev); in igc_deliver_wake_packet()
7528 struct igc_hw *hw = &adapter->hw; in __igc_resume()
7536 return -ENODEV; in __igc_resume()
7552 return -ENOMEM; in __igc_resume()
7604 return -EBUSY; in igc_runtime_idle()
7620 * igc_io_error_detected - called when PCI error is detected
7651 * igc_io_slot_reset - called after the PCI bus has been reset.
7654 * Restart the card from scratch, as if from a cold-boot. Implementation
7655 * resembles the first-half of the __igc_resume routine.
7661 struct igc_hw *hw = &adapter->hw; in igc_io_slot_reset()
7665 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); in igc_io_slot_reset()
7679 * so we should re-assign it here. in igc_io_slot_reset()
7681 hw->hw_addr = adapter->io_addr; in igc_io_slot_reset()
7692 * igc_io_resume - called when traffic can start to flow again.
7697 * second-half of the __igc_resume routine.
7743 * igc_reinit_queues - return error
7748 struct net_device *netdev = adapter->netdev; in igc_reinit_queues()
7758 return -ENOMEM; in igc_reinit_queues()
7768 * igc_get_hw_dev - return device
7775 struct igc_adapter *adapter = hw->back; in igc_get_hw_dev()
7777 return adapter->netdev; in igc_get_hw_dev()
7782 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_rx_ring_hw()
7783 u8 idx = ring->reg_idx; in igc_disable_rx_ring_hw()
7800 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_rx_ring()
7804 if (ring->xsk_pool) in igc_enable_rx_ring()
7818 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_tx_ring()
7824 * igc_init_module - Driver Registration Routine
7843 * igc_exit_module - Driver Exit Cleanup Routine