Lines Matching +full:dcb +full:- +full:algorithm
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2024 Intel Corporation. */
66 "Copyright (c) 1999-2016 Intel Corporation.";
82 /* ixgbe_pci_tbl - PCI Device ID Table
162 …"Maximum number of virtual functions to allocate per physical function - default is zero and maxim…
168 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
171 static int debug = -1;
193 return dev && (dev->netdev_ops == &ixgbe_netdev_ops); in netif_is_ixgbe()
202 parent_bus = adapter->pdev->bus->parent; in ixgbe_read_pci_cfg_word_parent()
204 return -1; in ixgbe_read_pci_cfg_word_parent()
206 parent_dev = parent_bus->self; in ixgbe_read_pci_cfg_word_parent()
208 return -1; in ixgbe_read_pci_cfg_word_parent()
211 return -1; in ixgbe_read_pci_cfg_word_parent()
215 ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) in ixgbe_read_pci_cfg_word_parent()
216 return -1; in ixgbe_read_pci_cfg_word_parent()
222 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_get_parent_bus_info()
226 hw->bus.type = ixgbe_bus_type_pci_express; in ixgbe_get_parent_bus_info()
237 hw->bus.width = ixgbe_convert_bus_width(link_status); in ixgbe_get_parent_bus_info()
238 hw->bus.speed = ixgbe_convert_bus_speed(link_status); in ixgbe_get_parent_bus_info()
244 * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent
247 * This function is used by probe to determine whether a device's PCI-Express
257 switch (hw->device_id) { in ixgbe_pcie_from_parent()
269 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_minimum_link()
276 if (hw->bus.type == ixgbe_bus_type_internal) in ixgbe_check_minimum_link()
280 if (ixgbe_pcie_from_parent(&adapter->hw)) in ixgbe_check_minimum_link()
281 pdev = adapter->pdev->bus->parent->self; in ixgbe_check_minimum_link()
283 pdev = adapter->pdev; in ixgbe_check_minimum_link()
290 if (!test_bit(__IXGBE_DOWN, &adapter->state) && in ixgbe_service_event_schedule()
291 !test_bit(__IXGBE_REMOVING, &adapter->state) && in ixgbe_service_event_schedule()
292 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) in ixgbe_service_event_schedule()
293 queue_work(ixgbe_wq, &adapter->service_task); in ixgbe_service_event_schedule()
298 struct ixgbe_adapter *adapter = hw->back; in ixgbe_remove_adapter()
300 if (!hw->hw_addr) in ixgbe_remove_adapter()
302 hw->hw_addr = NULL; in ixgbe_remove_adapter()
304 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) in ixgbe_remove_adapter()
314 reg_addr = READ_ONCE(hw->hw_addr); in ixgbe_check_remove()
337 * ixgbe_read_reg - Read from device register
351 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); in ixgbe_read_reg()
356 if (unlikely(hw->phy.nw_mng_if_sel & in ixgbe_read_reg()
372 adapter = hw->back; in ixgbe_read_reg()
397 struct ixgbe_adapter *adapter = hw->back; in ixgbe_read_pci_cfg_word()
400 if (ixgbe_removed(hw->hw_addr)) in ixgbe_read_pci_cfg_word()
402 pci_read_config_word(adapter->pdev, reg, &value); in ixgbe_read_pci_cfg_word()
404 ixgbe_check_cfg_remove(hw, adapter->pdev)) in ixgbe_read_pci_cfg_word()
412 struct ixgbe_adapter *adapter = hw->back; in ixgbe_read_pci_cfg_dword()
415 if (ixgbe_removed(hw->hw_addr)) in ixgbe_read_pci_cfg_dword()
417 pci_read_config_dword(adapter->pdev, reg, &value); in ixgbe_read_pci_cfg_dword()
419 ixgbe_check_cfg_remove(hw, adapter->pdev)) in ixgbe_read_pci_cfg_dword()
427 struct ixgbe_adapter *adapter = hw->back; in ixgbe_write_pci_cfg_word()
429 if (ixgbe_removed(hw->hw_addr)) in ixgbe_write_pci_cfg_word()
431 pci_write_config_word(adapter->pdev, reg, value); in ixgbe_write_pci_cfg_word()
436 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); in ixgbe_service_event_complete()
440 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); in ixgbe_service_event_complete()
482 * ixgbe_regdump - register printout routine
490 switch (reginfo->ofs) { in ixgbe_regdump()
548 pr_info("%-15s %08x\n", in ixgbe_regdump()
549 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs)); in ixgbe_regdump()
559 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7); in ixgbe_regdump()
562 pr_err("%-15s%s\n", rname, buf); in ixgbe_regdump()
571 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; in ixgbe_print_buffer()
573 n, ring->next_to_use, ring->next_to_clean, in ixgbe_print_buffer()
576 tx_buffer->next_to_watch, in ixgbe_print_buffer()
577 (u64)tx_buffer->time_stamp); in ixgbe_print_buffer()
581 * ixgbe_dump - Print registers, tx-rings and rx-rings
585 struct net_device *netdev = adapter->netdev; in ixgbe_dump()
586 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_dump()
603 dev_info(&adapter->pdev->dev, "Net device Info\n"); in ixgbe_dump()
606 pr_info("%-15s %016lX %016lX\n", in ixgbe_dump()
607 netdev->name, in ixgbe_dump()
608 netdev->state, in ixgbe_dump()
613 dev_info(&adapter->pdev->dev, "Register Dump\n"); in ixgbe_dump()
616 reginfo->name; reginfo++) { in ixgbe_dump()
624 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); in ixgbe_dump()
626 "Queue [NTU] [NTC] [bi(ntc)->dma ]", in ixgbe_dump()
628 for (n = 0; n < adapter->num_tx_queues; n++) { in ixgbe_dump()
629 ring = adapter->tx_ring[n]; in ixgbe_dump()
633 for (n = 0; n < adapter->num_xdp_queues; n++) { in ixgbe_dump()
634 ring = adapter->xdp_ring[n]; in ixgbe_dump()
642 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); in ixgbe_dump()
647 * +--------------------------------------------------------------+ in ixgbe_dump()
649 * +--------------------------------------------------------------+ in ixgbe_dump()
651 * +--------------------------------------------------------------+ in ixgbe_dump()
654 * 82598 Advanced Transmit Descriptor (Write-Back Format) in ixgbe_dump()
655 * +--------------------------------------------------------------+ in ixgbe_dump()
657 * +--------------------------------------------------------------+ in ixgbe_dump()
659 * +--------------------------------------------------------------+ in ixgbe_dump()
663 * +--------------------------------------------------------------+ in ixgbe_dump()
665 * +--------------------------------------------------------------+ in ixgbe_dump()
667 * +--------------------------------------------------------------+ in ixgbe_dump()
670 * 82599+ Advanced Transmit Descriptor (Write-Back Format) in ixgbe_dump()
671 * +--------------------------------------------------------------+ in ixgbe_dump()
673 * +--------------------------------------------------------------+ in ixgbe_dump()
675 * +--------------------------------------------------------------+ in ixgbe_dump()
679 for (n = 0; n < adapter->num_tx_queues; n++) { in ixgbe_dump()
680 ring = adapter->tx_ring[n]; in ixgbe_dump()
681 pr_info("------------------------------------\n"); in ixgbe_dump()
682 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index); in ixgbe_dump()
683 pr_info("------------------------------------\n"); in ixgbe_dump()
686 "[PlPOIdStDDt Ln] [bi->dma ] ", in ixgbe_dump()
687 "leng", "ntw", "timestamp", "bi->skb"); in ixgbe_dump()
689 for (i = 0; ring->desc && (i < ring->count); i++) { in ixgbe_dump()
691 tx_buffer = &ring->tx_buffer_info[i]; in ixgbe_dump()
696 if (i == ring->next_to_use && in ixgbe_dump()
697 i == ring->next_to_clean) in ixgbe_dump()
699 else if (i == ring->next_to_use) in ixgbe_dump()
701 else if (i == ring->next_to_clean) in ixgbe_dump()
707 le64_to_cpu((__force __le64)u0->a), in ixgbe_dump()
708 le64_to_cpu((__force __le64)u0->b), in ixgbe_dump()
711 tx_buffer->next_to_watch, in ixgbe_dump()
712 (u64)tx_buffer->time_stamp, in ixgbe_dump()
713 tx_buffer->skb, in ixgbe_dump()
717 tx_buffer->skb) in ixgbe_dump()
720 tx_buffer->skb->data, in ixgbe_dump()
729 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); in ixgbe_dump()
731 for (n = 0; n < adapter->num_rx_queues; n++) { in ixgbe_dump()
732 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
734 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump()
741 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); in ixgbe_dump()
747 * +-----------------------------------------------------+ in ixgbe_dump()
749 * +----------------------------------------------+------+ in ixgbe_dump()
751 * +-----------------------------------------------------+ in ixgbe_dump()
754 * 82598 Advanced Receive Descriptor (Write-Back) Format in ixgbe_dump()
757 * +------------------------------------------------------+ in ixgbe_dump()
761 * +------------------------------------------------------+ in ixgbe_dump()
763 * +------------------------------------------------------+ in ixgbe_dump()
768 * +-----------------------------------------------------+ in ixgbe_dump()
770 * +----------------------------------------------+------+ in ixgbe_dump()
772 * +-----------------------------------------------------+ in ixgbe_dump()
775 * 82599+ Advanced Receive Descriptor (Write-Back) Format in ixgbe_dump()
778 * +------------------------------------------------------+ in ixgbe_dump()
779 * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | in ixgbe_dump()
782 * +------------------------------------------------------+ in ixgbe_dump()
784 * +------------------------------------------------------+ in ixgbe_dump()
788 for (n = 0; n < adapter->num_rx_queues; n++) { in ixgbe_dump()
789 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
790 pr_info("------------------------------------\n"); in ixgbe_dump()
791 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump()
792 pr_info("------------------------------------\n"); in ixgbe_dump()
795 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", in ixgbe_dump()
796 "<-- Adv Rx Read format"); in ixgbe_dump()
799 "[vl er S cks ln] ---------------- [bi->skb ] ", in ixgbe_dump()
800 "<-- Adv Rx Write-Back format"); in ixgbe_dump()
802 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump()
805 if (i == rx_ring->next_to_use) in ixgbe_dump()
807 else if (i == rx_ring->next_to_clean) in ixgbe_dump()
812 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump()
815 if (rx_desc->wb.upper.length) { in ixgbe_dump()
817 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n", in ixgbe_dump()
819 le64_to_cpu((__force __le64)u0->a), in ixgbe_dump()
820 le64_to_cpu((__force __le64)u0->b), in ixgbe_dump()
821 rx_buffer_info->skb, in ixgbe_dump()
826 le64_to_cpu((__force __le64)u0->a), in ixgbe_dump()
827 le64_to_cpu((__force __le64)u0->b), in ixgbe_dump()
828 (u64)rx_buffer_info->dma, in ixgbe_dump()
829 rx_buffer_info->skb, in ixgbe_dump()
833 rx_buffer_info->dma) { in ixgbe_dump()
836 page_address(rx_buffer_info->page) + in ixgbe_dump()
837 rx_buffer_info->page_offset, in ixgbe_dump()
850 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); in ixgbe_release_hw_control()
851 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, in ixgbe_release_hw_control()
860 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); in ixgbe_get_hw_control()
861 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, in ixgbe_get_hw_control()
866 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
868 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
877 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_ivar()
878 switch (hw->mac.type) { in ixgbe_set_ivar()
881 if (direction == -1) in ixgbe_set_ivar()
895 if (direction == -1) { in ixgbe_set_ivar()
899 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); in ixgbe_set_ivar()
902 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); in ixgbe_set_ivar()
924 switch (adapter->hw.mac.type) { in ixgbe_irq_rearm_queues()
927 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); in ixgbe_irq_rearm_queues()
936 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); in ixgbe_irq_rearm_queues()
938 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); in ixgbe_irq_rearm_queues()
947 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_xoff_rx_lfc()
948 struct ixgbe_hw_stats *hwstats = &adapter->stats; in ixgbe_update_xoff_rx_lfc()
952 if ((hw->fc.current_mode != ixgbe_fc_full) && in ixgbe_update_xoff_rx_lfc()
953 (hw->fc.current_mode != ixgbe_fc_rx_pause)) in ixgbe_update_xoff_rx_lfc()
956 switch (hw->mac.type) { in ixgbe_update_xoff_rx_lfc()
963 hwstats->lxoffrxc += data; in ixgbe_update_xoff_rx_lfc()
969 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_update_xoff_rx_lfc()
971 &adapter->tx_ring[i]->state); in ixgbe_update_xoff_rx_lfc()
976 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_xoff_received()
977 struct ixgbe_hw_stats *hwstats = &adapter->stats; in ixgbe_update_xoff_received()
981 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; in ixgbe_update_xoff_received()
983 if (adapter->ixgbe_ieee_pfc) in ixgbe_update_xoff_received()
984 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); in ixgbe_update_xoff_received()
986 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { in ixgbe_update_xoff_received()
995 switch (hw->mac.type) { in ixgbe_update_xoff_received()
1002 hwstats->pxoffrxc[i] += pxoffrxc; in ixgbe_update_xoff_received()
1004 tc = netdev_get_prio_tc_map(adapter->netdev, i); in ixgbe_update_xoff_received()
1009 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_update_xoff_received()
1010 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_xoff_received()
1012 tc = tx_ring->dcb_tc; in ixgbe_update_xoff_received()
1014 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_update_xoff_received()
1017 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_update_xoff_received()
1018 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; in ixgbe_update_xoff_received()
1020 tc = xdp_ring->dcb_tc; in ixgbe_update_xoff_received()
1022 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state); in ixgbe_update_xoff_received()
1028 return ring->stats.packets; in ixgbe_get_tx_completed()
1035 head = ring->next_to_clean; in ixgbe_get_tx_pending()
1036 tail = ring->next_to_use; in ixgbe_get_tx_pending()
1038 return ((head <= tail) ? tail : tail + ring->count) - head; in ixgbe_get_tx_pending()
1042 * ixgbe_get_vf_idx - provide VF index number based on queue index
1053 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_get_vf_idx()
1057 if (queue >= adapter->num_tx_queues) in ixgbe_get_vf_idx()
1058 return -EINVAL; in ixgbe_get_vf_idx()
1075 return -EINVAL; in ixgbe_get_vf_idx()
1086 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbe_check_tx_hang()
1106 &tx_ring->state); in ixgbe_check_tx_hang()
1108 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbe_check_tx_hang()
1110 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); in ixgbe_check_tx_hang()
1116 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
1123 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_tx_timeout_reset()
1124 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_tx_timeout_reset()
1131 * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
1140 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_tx_maxrate()
1164 * ixgbe_update_tx_ring_stats - Update Tx ring specific counters
1174 u64_stats_update_begin(&tx_ring->syncp); in ixgbe_update_tx_ring_stats()
1175 tx_ring->stats.bytes += bytes; in ixgbe_update_tx_ring_stats()
1176 tx_ring->stats.packets += pkts; in ixgbe_update_tx_ring_stats()
1177 u64_stats_update_end(&tx_ring->syncp); in ixgbe_update_tx_ring_stats()
1178 q_vector->tx.total_bytes += bytes; in ixgbe_update_tx_ring_stats()
1179 q_vector->tx.total_packets += pkts; in ixgbe_update_tx_ring_stats()
1183 * ixgbe_update_rx_ring_stats - Update Rx ring specific counters
1193 u64_stats_update_begin(&rx_ring->syncp); in ixgbe_update_rx_ring_stats()
1194 rx_ring->stats.bytes += bytes; in ixgbe_update_rx_ring_stats()
1195 rx_ring->stats.packets += pkts; in ixgbe_update_rx_ring_stats()
1196 u64_stats_update_end(&rx_ring->syncp); in ixgbe_update_rx_ring_stats()
1197 q_vector->rx.total_bytes += bytes; in ixgbe_update_rx_ring_stats()
1198 q_vector->rx.total_packets += pkts; in ixgbe_update_rx_ring_stats()
1202 * ixgbe_pf_handle_tx_hang - handle Tx hang on PF
1211 struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); in ixgbe_pf_handle_tx_hang()
1212 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_pf_handle_tx_hang()
1222 tx_ring->queue_index, in ixgbe_pf_handle_tx_hang()
1223 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), in ixgbe_pf_handle_tx_hang()
1224 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), in ixgbe_pf_handle_tx_hang()
1225 tx_ring->next_to_use, next, in ixgbe_pf_handle_tx_hang()
1226 tx_ring->tx_buffer_info[next].time_stamp, jiffies); in ixgbe_pf_handle_tx_hang()
1228 netif_stop_subqueue(tx_ring->netdev, in ixgbe_pf_handle_tx_hang()
1229 tx_ring->queue_index); in ixgbe_pf_handle_tx_hang()
1233 * ixgbe_vf_handle_tx_hang - handle Tx hang on VF
1242 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vf_handle_tx_hang()
1244 if (adapter->hw.mac.type != ixgbe_mac_e610) in ixgbe_vf_handle_tx_hang()
1249 hw->bus.func, vf, adapter->vfinfo[vf].vf_mac_addresses); in ixgbe_vf_handle_tx_hang()
1251 adapter->tx_hang_count[vf]++; in ixgbe_vf_handle_tx_hang()
1252 if (adapter->tx_hang_count[vf] == IXGBE_MAX_TX_VF_HANGS) { in ixgbe_vf_handle_tx_hang()
1255 adapter->tx_hang_count[vf] = 0; in ixgbe_vf_handle_tx_hang()
1266 * ixgbe_check_illegal_queue - search for queue with illegal packet
1279 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_illegal_queue()
1316 * ixgbe_handle_mdd_event - handle mdd event
1328 if (adapter->vfinfo && ixgbe_check_mdd_event(adapter)) { in ixgbe_handle_mdd_event()
1330 if (!ixgbe_get_vf_idx(adapter, tx_ring->queue_index, &vf)) in ixgbe_handle_mdd_event()
1344 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
1352 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_tx_irq()
1356 unsigned int budget = q_vector->tx.work_limit; in ixgbe_clean_tx_irq()
1357 unsigned int i = tx_ring->next_to_clean; in ixgbe_clean_tx_irq()
1360 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_clean_tx_irq()
1363 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_irq()
1365 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1368 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in ixgbe_clean_tx_irq()
1378 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) in ixgbe_clean_tx_irq()
1382 tx_buffer->next_to_watch = NULL; in ixgbe_clean_tx_irq()
1385 total_bytes += tx_buffer->bytecount; in ixgbe_clean_tx_irq()
1386 total_packets += tx_buffer->gso_segs; in ixgbe_clean_tx_irq()
1387 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) in ixgbe_clean_tx_irq()
1392 xdp_return_frame(tx_buffer->xdpf); in ixgbe_clean_tx_irq()
1394 napi_consume_skb(tx_buffer->skb, napi_budget); in ixgbe_clean_tx_irq()
1397 dma_unmap_single(tx_ring->dev, in ixgbe_clean_tx_irq()
1411 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1412 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1418 dma_unmap_page(tx_ring->dev, in ixgbe_clean_tx_irq()
1431 i -= tx_ring->count; in ixgbe_clean_tx_irq()
1432 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_irq()
1440 budget--; in ixgbe_clean_tx_irq()
1443 i += tx_ring->count; in ixgbe_clean_tx_irq()
1444 tx_ring->next_to_clean = i; in ixgbe_clean_tx_irq()
1447 adapter->tx_ipsec += total_ipsec; in ixgbe_clean_tx_irq()
1453 if (adapter->hw.mac.type == ixgbe_mac_e610) in ixgbe_clean_tx_irq()
1460 adapter->tx_timeout_count + 1, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1470 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1474 !netif_carrier_ok(tx_ring->netdev) || in ixgbe_clean_tx_irq()
1475 test_bit(__IXGBE_DOWN, &adapter->state))) in ixgbe_clean_tx_irq()
1476 ++tx_ring->tx_stats.restart_queue; in ixgbe_clean_tx_irq()
1486 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_tx_dca()
1490 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in ixgbe_update_tx_dca()
1491 txctrl = dca3_get_tag(tx_ring->dev, cpu); in ixgbe_update_tx_dca()
1493 switch (hw->mac.type) { in ixgbe_update_tx_dca()
1495 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); in ixgbe_update_tx_dca()
1499 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); in ixgbe_update_tx_dca()
1523 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_rx_dca()
1525 u8 reg_idx = rx_ring->reg_idx; in ixgbe_update_rx_dca()
1527 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in ixgbe_update_rx_dca()
1528 rxctrl = dca3_get_tag(rx_ring->dev, cpu); in ixgbe_update_rx_dca()
1530 switch (hw->mac.type) { in ixgbe_update_rx_dca()
1553 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_update_dca()
1557 if (q_vector->cpu == cpu) in ixgbe_update_dca()
1560 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_update_dca()
1563 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_update_dca()
1566 q_vector->cpu = cpu; in ixgbe_update_dca()
1576 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in ixgbe_setup_dca()
1577 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in ixgbe_setup_dca()
1580 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in ixgbe_setup_dca()
1583 for (i = 0; i < adapter->num_q_vectors; i++) { in ixgbe_setup_dca()
1584 adapter->q_vector[i]->cpu = -1; in ixgbe_setup_dca()
1585 ixgbe_update_dca(adapter->q_vector[i]); in ixgbe_setup_dca()
1594 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) in __ixgbe_notify_dca()
1600 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in __ixgbe_notify_dca()
1603 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; in __ixgbe_notify_dca()
1604 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in __ixgbe_notify_dca()
1610 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { in __ixgbe_notify_dca()
1612 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; in __ixgbe_notify_dca()
1613 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in __ixgbe_notify_dca()
1636 if (!(ring->netdev->features & NETIF_F_RXHASH)) in ixgbe_rx_hash()
1639 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & in ixgbe_rx_hash()
1645 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), in ixgbe_rx_hash()
1652 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1661 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; in ixgbe_rx_is_fcoe()
1663 return test_bit(__IXGBE_RX_FCOE, &ring->state) && in ixgbe_rx_is_fcoe()
1671 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1680 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; in ixgbe_rx_checksum()
1686 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in ixgbe_rx_checksum()
1692 skb->encapsulation = 1; in ixgbe_rx_checksum()
1698 ring->rx_stats.csum_err++; in ixgbe_rx_checksum()
1711 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) in ixgbe_rx_checksum()
1714 ring->rx_stats.csum_err++; in ixgbe_rx_checksum()
1719 skb->ip_summed = CHECKSUM_UNNECESSARY; in ixgbe_rx_checksum()
1725 skb->ip_summed = CHECKSUM_NONE; in ixgbe_rx_checksum()
1729 skb->csum_level = 1; in ixgbe_rx_checksum()
1741 struct page *page = bi->page; in ixgbe_alloc_mapped_page()
1751 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1756 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbe_alloc_mapped_page()
1765 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbe_alloc_mapped_page()
1768 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1772 bi->dma = dma; in ixgbe_alloc_mapped_page()
1773 bi->page = page; in ixgbe_alloc_mapped_page()
1774 bi->page_offset = rx_ring->rx_offset; in ixgbe_alloc_mapped_page()
1775 page_ref_add(page, USHRT_MAX - 1); in ixgbe_alloc_mapped_page()
1776 bi->pagecnt_bias = USHRT_MAX; in ixgbe_alloc_mapped_page()
1777 rx_ring->rx_stats.alloc_rx_page++; in ixgbe_alloc_mapped_page()
1783 * ixgbe_alloc_rx_buffers - Replace used receive buffers
1791 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers()
1799 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers()
1800 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1809 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbe_alloc_rx_buffers()
1810 bi->page_offset, bufsz, in ixgbe_alloc_rx_buffers()
1815 * because each write-back erases this info. in ixgbe_alloc_rx_buffers()
1817 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ixgbe_alloc_rx_buffers()
1824 bi = rx_ring->rx_buffer_info; in ixgbe_alloc_rx_buffers()
1825 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1829 rx_desc->wb.upper.length = 0; in ixgbe_alloc_rx_buffers()
1831 cleaned_count--; in ixgbe_alloc_rx_buffers()
1834 i += rx_ring->count; in ixgbe_alloc_rx_buffers()
1836 if (rx_ring->next_to_use != i) { in ixgbe_alloc_rx_buffers()
1837 rx_ring->next_to_use = i; in ixgbe_alloc_rx_buffers()
1840 rx_ring->next_to_alloc = i; in ixgbe_alloc_rx_buffers()
1844 * applicable for weak-ordered memory model archs, in ixgbe_alloc_rx_buffers()
1845 * such as IA-64). in ixgbe_alloc_rx_buffers()
1848 writel(i, rx_ring->tail); in ixgbe_alloc_rx_buffers()
1858 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), in ixgbe_set_rsc_gso_size()
1859 IXGBE_CB(skb)->append_cnt); in ixgbe_set_rsc_gso_size()
1860 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in ixgbe_set_rsc_gso_size()
1867 if (!IXGBE_CB(skb)->append_cnt) in ixgbe_update_rsc_stats()
1870 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; in ixgbe_update_rsc_stats()
1871 rx_ring->rx_stats.rsc_flush++; in ixgbe_update_rsc_stats()
1876 IXGBE_CB(skb)->append_cnt = 0; in ixgbe_update_rsc_stats()
1880 * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
1893 struct net_device *dev = rx_ring->netdev; in ixgbe_process_skb_fields()
1894 u32 flags = rx_ring->q_vector->adapter->flags; in ixgbe_process_skb_fields()
1905 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && in ixgbe_process_skb_fields()
1907 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in ixgbe_process_skb_fields()
1916 skb_record_rx_queue(skb, rx_ring->queue_index); in ixgbe_process_skb_fields()
1918 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, in ixgbe_process_skb_fields()
1921 skb->protocol = eth_type_trans(skb, dev); in ixgbe_process_skb_fields()
1927 napi_gro_receive(&q_vector->napi, skb); in ixgbe_rx_skb()
1931 * ixgbe_is_non_eop - process handling of non-EOP buffers
1939 * that this is in fact a non-EOP buffer.
1945 u32 ntc = rx_ring->next_to_clean + 1; in ixgbe_is_non_eop()
1948 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbe_is_non_eop()
1949 rx_ring->next_to_clean = ntc; in ixgbe_is_non_eop()
1955 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & in ixgbe_is_non_eop()
1962 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; in ixgbe_is_non_eop()
1965 ntc = le32_to_cpu(rx_desc->wb.upper.status_error); in ixgbe_is_non_eop()
1976 rx_ring->rx_buffer_info[ntc].skb = skb; in ixgbe_is_non_eop()
1977 rx_ring->rx_stats.non_eop_descs++; in ixgbe_is_non_eop()
1983 * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1997 skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; in ixgbe_pull_tail()
2010 * 60 bytes if the skb->len is less than 60 for skb_pad. in ixgbe_pull_tail()
2012 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE); in ixgbe_pull_tail()
2020 skb->data_len -= pull_len; in ixgbe_pull_tail()
2021 skb->tail += pull_len; in ixgbe_pull_tail()
2025 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
2038 unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; in ixgbe_dma_sync_frag()
2039 unsigned long offset = (unsigned long)(skb->data) & mask; in ixgbe_dma_sync_frag()
2041 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
2042 IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
2047 skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; in ixgbe_dma_sync_frag()
2049 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
2050 IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
2057 if (unlikely(IXGBE_CB(skb)->page_released)) { in ixgbe_dma_sync_frag()
2058 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
2066 * ixgbe_cleanup_headers - Correct corrupted or empty headers
2091 struct net_device *netdev = rx_ring->netdev; in ixgbe_cleanup_headers()
2099 !(netdev->features & NETIF_F_RXALL)))) { in ixgbe_cleanup_headers()
2122 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
2132 u16 nta = rx_ring->next_to_alloc; in ixgbe_reuse_rx_page()
2134 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbe_reuse_rx_page()
2138 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbe_reuse_rx_page()
2144 new_buff->dma = old_buff->dma; in ixgbe_reuse_rx_page()
2145 new_buff->page = old_buff->page; in ixgbe_reuse_rx_page()
2146 new_buff->page_offset = old_buff->page_offset; in ixgbe_reuse_rx_page()
2147 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in ixgbe_reuse_rx_page()
2153 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in ixgbe_can_reuse_rx_page()
2154 struct page *page = rx_buffer->page; in ixgbe_can_reuse_rx_page()
2156 /* avoid re-using remote and pfmemalloc pages */ in ixgbe_can_reuse_rx_page()
2162 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in ixgbe_can_reuse_rx_page()
2171 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) in ixgbe_can_reuse_rx_page()
2172 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) in ixgbe_can_reuse_rx_page()
2181 page_ref_add(page, USHRT_MAX - 1); in ixgbe_can_reuse_rx_page()
2182 rx_buffer->pagecnt_bias = USHRT_MAX; in ixgbe_can_reuse_rx_page()
2189 * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
2195 * This function will add the data contained in rx_buffer->page to the skb.
2211 unsigned int truesize = rx_ring->rx_offset ? in ixgbe_add_rx_frag()
2212 SKB_DATA_ALIGN(rx_ring->rx_offset + size) : in ixgbe_add_rx_frag()
2215 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in ixgbe_add_rx_frag()
2216 rx_buffer->page_offset, size, truesize); in ixgbe_add_rx_frag()
2218 rx_buffer->page_offset ^= truesize; in ixgbe_add_rx_frag()
2220 rx_buffer->page_offset += truesize; in ixgbe_add_rx_frag()
2232 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_get_rx_buffer()
2235 page_count(rx_buffer->page); in ixgbe_get_rx_buffer()
2239 prefetchw(rx_buffer->page); in ixgbe_get_rx_buffer()
2240 *skb = rx_buffer->skb; in ixgbe_get_rx_buffer()
2255 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_get_rx_buffer()
2256 rx_buffer->dma, in ixgbe_get_rx_buffer()
2257 rx_buffer->page_offset, in ixgbe_get_rx_buffer()
2261 rx_buffer->pagecnt_bias--; in ixgbe_get_rx_buffer()
2275 if (skb && IXGBE_CB(skb)->dma == rx_buffer->dma) { in ixgbe_put_rx_buffer()
2277 IXGBE_CB(skb)->page_released = true; in ixgbe_put_rx_buffer()
2280 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_put_rx_buffer()
2285 __page_frag_cache_drain(rx_buffer->page, in ixgbe_put_rx_buffer()
2286 rx_buffer->pagecnt_bias); in ixgbe_put_rx_buffer()
2290 rx_buffer->page = NULL; in ixgbe_put_rx_buffer()
2291 rx_buffer->skb = NULL; in ixgbe_put_rx_buffer()
2299 unsigned int size = xdp->data_end - xdp->data; in ixgbe_construct_skb()
2303 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - in ixgbe_construct_skb()
2304 xdp->data_hard_start); in ixgbe_construct_skb()
2309 net_prefetch(xdp->data); in ixgbe_construct_skb()
2311 /* Note, we get here by enabling legacy-rx via: in ixgbe_construct_skb()
2313 * ethtool --set-priv-flags <dev> legacy-rx on in ixgbe_construct_skb()
2316 * opposed to having legacy-rx off, where we process XDP in ixgbe_construct_skb()
2321 * xdp->data_meta will always point to xdp->data, since in ixgbe_construct_skb()
2323 * change in future for legacy-rx mode on, then lets also in ixgbe_construct_skb()
2324 * add xdp->data_meta handling here. in ixgbe_construct_skb()
2328 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); in ixgbe_construct_skb()
2334 IXGBE_CB(skb)->dma = rx_buffer->dma; in ixgbe_construct_skb()
2336 skb_add_rx_frag(skb, 0, rx_buffer->page, in ixgbe_construct_skb()
2337 xdp->data - page_address(rx_buffer->page), in ixgbe_construct_skb()
2340 rx_buffer->page_offset ^= truesize; in ixgbe_construct_skb()
2342 rx_buffer->page_offset += truesize; in ixgbe_construct_skb()
2346 xdp->data, ALIGN(size, sizeof(long))); in ixgbe_construct_skb()
2347 rx_buffer->pagecnt_bias++; in ixgbe_construct_skb()
2358 unsigned int metasize = xdp->data - xdp->data_meta; in ixgbe_build_skb()
2363 SKB_DATA_ALIGN(xdp->data_end - in ixgbe_build_skb()
2364 xdp->data_hard_start); in ixgbe_build_skb()
2368 /* Prefetch first cache line of first page. If xdp->data_meta in ixgbe_build_skb()
2369 * is unused, this points exactly as xdp->data, otherwise we in ixgbe_build_skb()
2373 net_prefetch(xdp->data_meta); in ixgbe_build_skb()
2376 skb = napi_build_skb(xdp->data_hard_start, truesize); in ixgbe_build_skb()
2381 skb_reserve(skb, xdp->data - xdp->data_hard_start); in ixgbe_build_skb()
2382 __skb_put(skb, xdp->data_end - xdp->data); in ixgbe_build_skb()
2388 IXGBE_CB(skb)->dma = rx_buffer->dma; in ixgbe_build_skb()
2392 rx_buffer->page_offset ^= truesize; in ixgbe_build_skb()
2394 rx_buffer->page_offset += truesize; in ixgbe_build_skb()
2410 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp()
2415 prefetchw(xdp->data_hard_start); /* xdp_frame write */ in ixgbe_run_xdp()
2427 spin_lock(&ring->tx_lock); in ixgbe_run_xdp()
2430 spin_unlock(&ring->tx_lock); in ixgbe_run_xdp()
2435 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); in ixgbe_run_xdp()
2441 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp()
2445 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp()
2461 truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ixgbe_rx_frame_truesize()
2463 truesize = rx_ring->rx_offset ? in ixgbe_rx_frame_truesize()
2464 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + in ixgbe_rx_frame_truesize()
2477 rx_buffer->page_offset ^= truesize; in ixgbe_rx_buffer_flip()
2479 rx_buffer->page_offset += truesize; in ixgbe_rx_buffer_flip()
2484 * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2501 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_rx_irq()
2507 unsigned int offset = rx_ring->rx_offset; in ixgbe_clean_rx_irq()
2516 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in ixgbe_clean_rx_irq()
2531 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbe_clean_rx_irq()
2532 size = le16_to_cpu(rx_desc->wb.upper.length); in ixgbe_clean_rx_irq()
2548 hard_start = page_address(rx_buffer->page) + in ixgbe_clean_rx_irq()
2549 rx_buffer->page_offset - offset; in ixgbe_clean_rx_irq()
2564 rx_buffer->pagecnt_bias++; in ixgbe_clean_rx_irq()
2580 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbe_clean_rx_irq()
2581 rx_buffer->pagecnt_bias++; in ixgbe_clean_rx_irq()
2597 total_rx_bytes += skb->len; in ixgbe_clean_rx_irq()
2609 mss = rx_ring->netdev->mtu - in ixgbe_clean_rx_irq()
2610 sizeof(struct fcoe_hdr) - in ixgbe_clean_rx_irq()
2611 sizeof(struct fc_frame_header) - in ixgbe_clean_rx_irq()
2649 * ixgbe_configure_msix - Configure MSI-X hardware
2652 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
2662 if (adapter->num_vfs > 32) { in ixgbe_configure_msix()
2663 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; in ixgbe_configure_msix()
2664 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); in ixgbe_configure_msix()
2671 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { in ixgbe_configure_msix()
2673 q_vector = adapter->q_vector[v_idx]; in ixgbe_configure_msix()
2675 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_configure_msix()
2676 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); in ixgbe_configure_msix()
2678 ixgbe_for_each_ring(ring, q_vector->tx) in ixgbe_configure_msix()
2679 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); in ixgbe_configure_msix()
2684 switch (adapter->hw.mac.type) { in ixgbe_configure_msix()
2686 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, in ixgbe_configure_msix()
2695 ixgbe_set_ivar(adapter, -1, 1, v_idx); in ixgbe_configure_msix()
2700 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); in ixgbe_configure_msix()
2708 if (adapter->hw.mac.type == ixgbe_mac_e610) in ixgbe_configure_msix()
2711 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); in ixgbe_configure_msix()
2715 * ixgbe_update_itr - update the dynamic ITR value based on statistics
2738 if (!ring_container->ring) in ixgbe_update_itr()
2741 /* If we didn't update within up to 1 - 2 jiffies we can assume in ixgbe_update_itr()
2746 if (time_after(next_update, ring_container->next_update)) in ixgbe_update_itr()
2749 packets = ring_container->total_packets; in ixgbe_update_itr()
2760 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; in ixgbe_update_itr()
2763 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY; in ixgbe_update_itr()
2767 bytes = ring_container->total_bytes; in ixgbe_update_itr()
2783 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; in ixgbe_update_itr()
2793 itr = q_vector->itr >> 2; in ixgbe_update_itr()
2802 itr = q_vector->itr >> 3; in ixgbe_update_itr()
2872 switch (q_vector->adapter->link_speed) { in ixgbe_update_itr()
2893 ring_container->itr = itr; in ixgbe_update_itr()
2896 ring_container->next_update = next_update + 1; in ixgbe_update_itr()
2898 ring_container->total_bytes = 0; in ixgbe_update_itr()
2899 ring_container->total_packets = 0; in ixgbe_update_itr()
2903 * ixgbe_write_eitr - write EITR register in hardware specific way
2912 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_write_eitr()
2913 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_write_eitr()
2914 int v_idx = q_vector->v_idx; in ixgbe_write_eitr()
2915 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; in ixgbe_write_eitr()
2917 switch (adapter->hw.mac.type) { in ixgbe_write_eitr()
2944 ixgbe_update_itr(q_vector, &q_vector->tx); in ixgbe_set_itr()
2945 ixgbe_update_itr(q_vector, &q_vector->rx); in ixgbe_set_itr()
2948 new_itr = min(q_vector->rx.itr, q_vector->tx.itr); in ixgbe_set_itr()
2954 if (new_itr != q_vector->itr) { in ixgbe_set_itr()
2955 /* save the algorithm value here */ in ixgbe_set_itr()
2956 q_vector->itr = new_itr; in ixgbe_set_itr()
2963 * ixgbe_check_overtemp_subtask - check for over temperature
2968 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_overtemp_subtask()
2969 u32 eicr = adapter->interrupt_event; in ixgbe_check_overtemp_subtask()
2971 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_check_overtemp_subtask()
2974 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) in ixgbe_check_overtemp_subtask()
2977 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; in ixgbe_check_overtemp_subtask()
2979 switch (hw->device_id) { in ixgbe_check_overtemp_subtask()
2984 * - This interrupt wasn't for our port. in ixgbe_check_overtemp_subtask()
2985 * - We may have missed the interrupt so always have to in ixgbe_check_overtemp_subtask()
2992 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { in ixgbe_check_overtemp_subtask()
2996 hw->mac.ops.check_link(hw, &speed, &link_up, false); in ixgbe_check_overtemp_subtask()
3003 if (!hw->phy.ops.check_overtemp(hw)) in ixgbe_check_overtemp_subtask()
3009 if (!hw->phy.ops.check_overtemp(hw)) in ixgbe_check_overtemp_subtask()
3013 if (adapter->hw.mac.type >= ixgbe_mac_X540) in ixgbe_check_overtemp_subtask()
3021 adapter->interrupt_event = 0; in ixgbe_check_overtemp_subtask()
3026 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_fan_failure()
3028 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && in ixgbe_check_fan_failure()
3038 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_overtemp_event()
3040 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) in ixgbe_check_overtemp_event()
3043 switch (adapter->hw.mac.type) { in ixgbe_check_overtemp_event()
3051 (!test_bit(__IXGBE_DOWN, &adapter->state))) { in ixgbe_check_overtemp_event()
3052 adapter->interrupt_event = eicr; in ixgbe_check_overtemp_event()
3053 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; in ixgbe_check_overtemp_event()
3060 adapter->interrupt_event = eicr; in ixgbe_check_overtemp_event()
3061 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; in ixgbe_check_overtemp_event()
3063 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, in ixgbe_check_overtemp_event()
3065 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, in ixgbe_check_overtemp_event()
3083 switch (hw->mac.type) { in ixgbe_is_sfp()
3085 if (hw->phy.type == ixgbe_phy_nl) in ixgbe_is_sfp()
3091 switch (hw->mac.ops.get_media_type(hw)) { in ixgbe_is_sfp()
3105 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_sfp_event()
3112 if (hw->mac.type >= ixgbe_mac_X540) in ixgbe_check_sfp_event()
3118 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_check_sfp_event()
3119 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_check_sfp_event()
3120 adapter->sfp_poll_time = 0; in ixgbe_check_sfp_event()
3125 if (adapter->hw.mac.type == ixgbe_mac_82599EB && in ixgbe_check_sfp_event()
3129 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_check_sfp_event()
3130 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_check_sfp_event()
3138 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_lsc()
3140 adapter->lsc_int++; in ixgbe_check_lsc()
3141 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_check_lsc()
3142 adapter->link_check_timeout = jiffies; in ixgbe_check_lsc()
3143 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_check_lsc()
3151 * ixgbe_check_phy_fw_load - check if PHY FW load failed
3161 adapter->flags2 &= ~IXGBE_FLAG2_PHY_FW_LOAD_FAILED; in ixgbe_check_phy_fw_load()
3165 if (adapter->flags2 & IXGBE_FLAG2_PHY_FW_LOAD_FAILED) in ixgbe_check_phy_fw_load()
3169 …netdev_err(adapter->netdev, "Device failed to load the FW for the external PHY. Please download an… in ixgbe_check_phy_fw_load()
3170 adapter->flags2 |= IXGBE_FLAG2_PHY_FW_LOAD_FAILED; in ixgbe_check_phy_fw_load()
3175 * ixgbe_check_module_power - check module power level
3188 adapter->flags2 &= ~IXGBE_FLAG2_MOD_POWER_UNSUPPORTED; in ixgbe_check_module_power()
3195 if (adapter->flags2 & IXGBE_FLAG2_MOD_POWER_UNSUPPORTED) in ixgbe_check_module_power()
3199 …netdev_err(adapter->netdev, "The installed module is incompatible with the device's NVM image. Can… in ixgbe_check_module_power()
3200 adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED; in ixgbe_check_module_power()
3202 …netdev_err(adapter->netdev, "The module's power requirements exceed the device's power supply. Can… in ixgbe_check_module_power()
3203 adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED; in ixgbe_check_module_power()
3208 * ixgbe_check_link_cfg_err - check if link configuration failed
3223 * ixgbe_process_link_status_event - process the link event
3234 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_process_link_status_event()
3237 /* Update the link info structures and re-enable link events, in ixgbe_process_link_status_event()
3243 status, hw->aci.last_status); in ixgbe_process_link_status_event()
3245 ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err); in ixgbe_process_link_status_event()
3250 if (hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) in ixgbe_process_link_status_event()
3254 if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA) && in ixgbe_process_link_status_event()
3255 !(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) in ixgbe_process_link_status_event()
3256 adapter->flags2 |= IXGBE_FLAG2_NO_MEDIA; in ixgbe_process_link_status_event()
3258 if (link_up == adapter->link_up && in ixgbe_process_link_status_event()
3259 link_up == netif_carrier_ok(adapter->netdev) && in ixgbe_process_link_status_event()
3260 link_speed == adapter->link_speed) in ixgbe_process_link_status_event()
3263 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_process_link_status_event()
3264 adapter->link_check_timeout = jiffies; in ixgbe_process_link_status_event()
3276 * ixgbe_handle_link_status_event - handle link status event via ACI
3288 link_data = (struct ixgbe_aci_cmd_get_link_status_data *)e->msg_buf; in ixgbe_handle_link_status_event()
3290 link_up = !!(link_data->link_info & IXGBE_ACI_LINK_UP); in ixgbe_handle_link_status_event()
3291 link_speed = le16_to_cpu(link_data->link_speed); in ixgbe_handle_link_status_event()
3298 * ixgbe_schedule_fw_event - schedule Firmware event
3306 if (!test_bit(__IXGBE_DOWN, &adapter->state) && in ixgbe_schedule_fw_event()
3307 !test_bit(__IXGBE_REMOVING, &adapter->state) && in ixgbe_schedule_fw_event()
3308 !test_bit(__IXGBE_RESETTING, &adapter->state)) { in ixgbe_schedule_fw_event()
3309 adapter->flags2 |= IXGBE_FLAG2_FW_ASYNC_EVENT; in ixgbe_schedule_fw_event()
3315 * ixgbe_aci_event_cleanup - release msg_buf memory
3322 kfree(event->msg_buf); in ixgbe_aci_event_cleanup()
3326 * ixgbe_handle_fw_event - handle Firmware event
3335 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_handle_fw_event()
3339 if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT) in ixgbe_handle_fw_event()
3340 adapter->flags2 &= ~IXGBE_FLAG2_FW_ASYNC_EVENT; in ixgbe_handle_fw_event()
3360 libie_get_fwlog_data(&hw->fwlog, event.msg_buf, in ixgbe_handle_fw_event()
3373 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_irq_enable_queues()
3376 switch (hw->mac.type) { in ixgbe_irq_enable_queues()
3401 * ixgbe_irq_enable - Enable default interrupt generation settings
3409 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_irq_enable()
3413 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) in ixgbe_irq_enable()
3416 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) in ixgbe_irq_enable()
3417 switch (adapter->hw.mac.type) { in ixgbe_irq_enable()
3430 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) in ixgbe_irq_enable()
3432 switch (adapter->hw.mac.type) { in ixgbe_irq_enable()
3444 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || in ixgbe_irq_enable()
3445 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || in ixgbe_irq_enable()
3446 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) in ixgbe_irq_enable()
3447 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); in ixgbe_irq_enable()
3448 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) in ixgbe_irq_enable()
3457 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && in ixgbe_irq_enable()
3458 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) in ixgbe_irq_enable()
3461 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); in ixgbe_irq_enable()
3465 IXGBE_WRITE_FLUSH(&adapter->hw); in ixgbe_irq_enable()
3471 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_msix_other()
3475 * Workaround for Silicon errata. Use clear-by-write instead in ixgbe_msix_other()
3476 * of clear-by-read. Reading with EICS will return the in ixgbe_msix_other()
3502 switch (hw->mac.type) { in ixgbe_msix_other()
3509 if (hw->phy.type == ixgbe_phy_x550em_ext_t && in ixgbe_msix_other()
3511 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; in ixgbe_msix_other()
3518 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_msix_other()
3526 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_msix_other()
3527 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_msix_other()
3529 &ring->state)) in ixgbe_msix_other()
3535 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; in ixgbe_msix_other()
3551 /* re-enable the original interrupt state, no lsc, no queues */ in ixgbe_msix_other()
3552 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_msix_other()
3564 if (q_vector->rx.ring || q_vector->tx.ring) in ixgbe_msix_clean_rings()
3565 napi_schedule_irqoff(&q_vector->napi); in ixgbe_msix_clean_rings()
3571 * ixgbe_poll - NAPI Rx polling callback
3581 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_poll()
3587 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in ixgbe_poll()
3591 ixgbe_for_each_ring(ring, q_vector->tx) { in ixgbe_poll()
3592 bool wd = ring->xsk_pool ? in ixgbe_poll()
3606 if (q_vector->rx.count > 1) in ixgbe_poll()
3607 per_ring_budget = max(budget/q_vector->rx.count, 1); in ixgbe_poll()
3611 ixgbe_for_each_ring(ring, q_vector->rx) { in ixgbe_poll()
3612 int cleaned = ring->xsk_pool ? in ixgbe_poll()
3629 if (adapter->rx_itr_setting & 1) in ixgbe_poll()
3631 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_poll()
3633 BIT_ULL(q_vector->v_idx)); in ixgbe_poll()
3636 return min(work_done, budget - 1); in ixgbe_poll()
3640 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
3643 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
3648 struct net_device *netdev = adapter->netdev; in ixgbe_request_msix_irqs()
3652 for (vector = 0; vector < adapter->num_q_vectors; vector++) { in ixgbe_request_msix_irqs()
3653 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; in ixgbe_request_msix_irqs()
3654 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbe_request_msix_irqs()
3656 if (q_vector->tx.ring && q_vector->rx.ring) { in ixgbe_request_msix_irqs()
3657 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbe_request_msix_irqs()
3658 "%s-TxRx-%u", netdev->name, ri++); in ixgbe_request_msix_irqs()
3660 } else if (q_vector->rx.ring) { in ixgbe_request_msix_irqs()
3661 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbe_request_msix_irqs()
3662 "%s-rx-%u", netdev->name, ri++); in ixgbe_request_msix_irqs()
3663 } else if (q_vector->tx.ring) { in ixgbe_request_msix_irqs()
3664 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbe_request_msix_irqs()
3665 "%s-tx-%u", netdev->name, ti++); in ixgbe_request_msix_irqs()
3670 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, in ixgbe_request_msix_irqs()
3671 q_vector->name, q_vector); in ixgbe_request_msix_irqs()
3678 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { in ixgbe_request_msix_irqs()
3680 irq_update_affinity_hint(entry->vector, in ixgbe_request_msix_irqs()
3681 &q_vector->affinity_mask); in ixgbe_request_msix_irqs()
3685 err = request_irq(adapter->msix_entries[vector].vector, in ixgbe_request_msix_irqs()
3686 ixgbe_msix_other, 0, netdev->name, adapter); in ixgbe_request_msix_irqs()
3696 vector--; in ixgbe_request_msix_irqs()
3697 irq_update_affinity_hint(adapter->msix_entries[vector].vector, in ixgbe_request_msix_irqs()
3699 free_irq(adapter->msix_entries[vector].vector, in ixgbe_request_msix_irqs()
3700 adapter->q_vector[vector]); in ixgbe_request_msix_irqs()
3702 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; in ixgbe_request_msix_irqs()
3703 pci_disable_msix(adapter->pdev); in ixgbe_request_msix_irqs()
3704 kfree(adapter->msix_entries); in ixgbe_request_msix_irqs()
3705 adapter->msix_entries = NULL; in ixgbe_request_msix_irqs()
3710 * ixgbe_intr - legacy mode Interrupt Handler
3717 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_intr()
3718 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; in ixgbe_intr()
3727 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read in ixgbe_intr()
3738 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_intr()
3749 switch (hw->mac.type) { in ixgbe_intr()
3760 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_intr()
3775 napi_schedule_irqoff(&q_vector->napi); in ixgbe_intr()
3778 * re-enable link(maybe) and non-queue interrupts, no flush. in ixgbe_intr()
3779 * ixgbe_poll will re-enable the queue interrupts in ixgbe_intr()
3781 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_intr()
3788 * ixgbe_request_irq - initialize interrupts
3796 struct net_device *netdev = adapter->netdev; in ixgbe_request_irq()
3799 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) in ixgbe_request_irq()
3801 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) in ixgbe_request_irq()
3802 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, in ixgbe_request_irq()
3803 netdev->name, adapter); in ixgbe_request_irq()
3805 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, in ixgbe_request_irq()
3806 netdev->name, adapter); in ixgbe_request_irq()
3818 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { in ixgbe_free_irq()
3819 free_irq(adapter->pdev->irq, adapter); in ixgbe_free_irq()
3823 if (!adapter->msix_entries) in ixgbe_free_irq()
3826 for (vector = 0; vector < adapter->num_q_vectors; vector++) { in ixgbe_free_irq()
3827 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; in ixgbe_free_irq()
3828 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbe_free_irq()
3831 if (!q_vector->rx.ring && !q_vector->tx.ring) in ixgbe_free_irq()
3835 irq_update_affinity_hint(entry->vector, NULL); in ixgbe_free_irq()
3837 free_irq(entry->vector, q_vector); in ixgbe_free_irq()
3840 free_irq(adapter->msix_entries[vector].vector, adapter); in ixgbe_free_irq()
3844 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
3849 switch (adapter->hw.mac.type) { in ixgbe_irq_disable()
3851 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); in ixgbe_irq_disable()
3859 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); in ixgbe_irq_disable()
3860 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); in ixgbe_irq_disable()
3861 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); in ixgbe_irq_disable()
3866 IXGBE_WRITE_FLUSH(&adapter->hw); in ixgbe_irq_disable()
3867 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { in ixgbe_irq_disable()
3870 for (vector = 0; vector < adapter->num_q_vectors; vector++) in ixgbe_irq_disable()
3871 synchronize_irq(adapter->msix_entries[vector].vector); in ixgbe_irq_disable()
3873 synchronize_irq(adapter->msix_entries[vector++].vector); in ixgbe_irq_disable()
3875 synchronize_irq(adapter->pdev->irq); in ixgbe_irq_disable()
3880 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
3886 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; in ixgbe_configure_msi_and_legacy()
3897 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
3906 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_tx_ring()
3907 u64 tdba = ring->dma; in ixgbe_configure_tx_ring()
3910 u8 reg_idx = ring->reg_idx; in ixgbe_configure_tx_ring()
3912 ring->xsk_pool = NULL; in ixgbe_configure_tx_ring()
3914 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); in ixgbe_configure_tx_ring()
3924 ring->count * sizeof(union ixgbe_adv_tx_desc)); in ixgbe_configure_tx_ring()
3927 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); in ixgbe_configure_tx_ring()
3932 * - ITR is 0 as it could cause false TX hangs in ixgbe_configure_tx_ring()
3933 * - ITR is set to > 100k int/sec and BQL is enabled in ixgbe_configure_tx_ring()
3939 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) in ixgbe_configure_tx_ring()
3952 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { in ixgbe_configure_tx_ring()
3953 ring->atr_sample_rate = adapter->atr_sample_rate; in ixgbe_configure_tx_ring()
3954 ring->atr_count = 0; in ixgbe_configure_tx_ring()
3955 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); in ixgbe_configure_tx_ring()
3957 ring->atr_sample_rate = 0; in ixgbe_configure_tx_ring()
3961 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { in ixgbe_configure_tx_ring()
3962 struct ixgbe_q_vector *q_vector = ring->q_vector; in ixgbe_configure_tx_ring()
3965 netif_set_xps_queue(ring->netdev, in ixgbe_configure_tx_ring()
3966 &q_vector->affinity_mask, in ixgbe_configure_tx_ring()
3967 ring->queue_index); in ixgbe_configure_tx_ring()
3970 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); in ixgbe_configure_tx_ring()
3973 memset(ring->tx_buffer_info, 0, in ixgbe_configure_tx_ring()
3974 sizeof(struct ixgbe_tx_buffer) * ring->count); in ixgbe_configure_tx_ring()
3980 if (hw->mac.type == ixgbe_mac_82598EB && in ixgbe_configure_tx_ring()
3988 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); in ixgbe_configure_tx_ring()
3995 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_mtqc()
3997 u8 tcs = adapter->hw_tcs; in ixgbe_setup_mtqc()
3999 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_setup_mtqc()
4008 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_setup_mtqc()
4014 else if (adapter->ring_feature[RING_F_VMDQ].mask == in ixgbe_setup_mtqc()
4025 u8 max_txq = adapter->num_tx_queues + in ixgbe_setup_mtqc()
4026 adapter->num_xdp_queues; in ixgbe_setup_mtqc()
4043 /* re-enable the arbiter */ in ixgbe_setup_mtqc()
4049 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
4056 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_tx()
4062 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_configure_tx()
4070 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_configure_tx()
4071 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbe_configure_tx()
4072 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_configure_tx()
4073 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); in ixgbe_configure_tx()
4079 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_enable_rx_drop()
4080 u8 reg_idx = ring->reg_idx; in ixgbe_enable_rx_drop()
4091 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_rx_drop()
4092 u8 reg_idx = ring->reg_idx; in ixgbe_disable_rx_drop()
4106 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; in ixgbe_set_rx_drop_en()
4107 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_rx_drop_en()
4110 if (hw->mac.ops.disable_mdd) in ixgbe_set_rx_drop_en()
4111 hw->mac.ops.disable_mdd(hw); in ixgbe_set_rx_drop_en()
4113 if (adapter->ixgbe_ieee_pfc) in ixgbe_set_rx_drop_en()
4114 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); in ixgbe_set_rx_drop_en()
4118 * SR-IOV is enabled in ixgbe_set_rx_drop_en()
4125 if (adapter->num_vfs || (adapter->num_rx_queues > 1 && in ixgbe_set_rx_drop_en()
4126 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { in ixgbe_set_rx_drop_en()
4127 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_set_rx_drop_en()
4128 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
4130 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_set_rx_drop_en()
4131 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
4134 if (hw->mac.ops.enable_mdd) in ixgbe_set_rx_drop_en()
4135 hw->mac.ops.enable_mdd(hw); in ixgbe_set_rx_drop_en()
4143 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_srrctl()
4145 u8 reg_idx = rx_ring->reg_idx; in ixgbe_configure_srrctl()
4147 if (hw->mac.type == ixgbe_mac_82598EB) { in ixgbe_configure_srrctl()
4148 u16 mask = adapter->ring_feature[RING_F_RSS].mask; in ixgbe_configure_srrctl()
4161 if (rx_ring->xsk_pool) { in ixgbe_configure_srrctl()
4162 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); in ixgbe_configure_srrctl()
4172 if (hw->mac.type != ixgbe_mac_82599EB) in ixgbe_configure_srrctl()
4176 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { in ixgbe_configure_srrctl()
4189 * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries
4192 * - 82598/82599/X540: 128
4193 * - X550(non-SRIOV mode): 512
4194 * - X550(SRIOV mode): 64
4198 if (adapter->hw.mac.type < ixgbe_mac_X550) in ixgbe_rss_indir_tbl_entries()
4200 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_rss_indir_tbl_entries()
4207 * ixgbe_store_key - Write the RSS key to HW
4214 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_store_key()
4218 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); in ixgbe_store_key()
4222 * ixgbe_init_rss_key - Initialize adapter RSS key
4231 if (!adapter->rss_key) { in ixgbe_init_rss_key()
4234 return -ENOMEM; in ixgbe_init_rss_key()
4237 adapter->rss_key = rss_key; in ixgbe_init_rss_key()
4244 * ixgbe_store_reta - Write the RETA table to HW
4252 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_store_reta()
4255 u8 *indir_tbl = adapter->rss_indir_tbl; in ixgbe_store_reta()
4258 * - 82598: 8 bit wide entries containing pair of 4 bit RSS in ixgbe_store_reta()
4260 * - 82599/X540: 8 bit wide entries containing 4 bit RSS index in ixgbe_store_reta()
4261 * - X550: 8 bit wide entries containing 6 bit RSS index in ixgbe_store_reta()
4263 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_store_reta()
4275 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), in ixgbe_store_reta()
4283 * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode)
4291 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_store_vfreta()
4296 u16 pool = adapter->num_rx_pools; in ixgbe_store_vfreta()
4298 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; in ixgbe_store_vfreta()
4302 while (pool--) in ixgbe_store_vfreta()
4314 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_setup_reta()
4316 /* Program table for at least 4 queues w/ SR-IOV so that VFs can in ixgbe_setup_reta()
4320 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4)) in ixgbe_setup_reta()
4327 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); in ixgbe_setup_reta()
4333 adapter->rss_indir_tbl[i] = j; in ixgbe_setup_reta()
4341 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_vfreta()
4342 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_setup_vfreta()
4347 u16 pool = adapter->num_rx_pools; in ixgbe_setup_vfreta()
4349 while (pool--) in ixgbe_setup_vfreta()
4352 *(adapter->rss_key + i)); in ixgbe_setup_vfreta()
4360 adapter->rss_indir_tbl[i] = j; in ixgbe_setup_vfreta()
4368 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_mrqc()
4377 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_setup_mrqc()
4378 if (adapter->ring_feature[RING_F_RSS].mask) in ixgbe_setup_mrqc()
4381 u8 tcs = adapter->hw_tcs; in ixgbe_setup_mrqc()
4383 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_setup_mrqc()
4388 else if (adapter->ring_feature[RING_F_VMDQ].mask == in ixgbe_setup_mrqc()
4397 if (hw->mac.type >= ixgbe_mac_X550) in ixgbe_setup_mrqc()
4415 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) in ixgbe_setup_mrqc()
4417 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) in ixgbe_setup_mrqc()
4420 if ((hw->mac.type >= ixgbe_mac_X550) && in ixgbe_setup_mrqc()
4421 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { in ixgbe_setup_mrqc()
4422 u16 pool = adapter->num_rx_pools; in ixgbe_setup_mrqc()
4433 while (pool--) in ixgbe_setup_mrqc()
4445 * ixgbe_configure_rscctl - enable RSC for the indicated ring
4452 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_rscctl()
4454 u8 reg_idx = ring->reg_idx; in ixgbe_configure_rscctl()
4474 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_rx_desc_queue_enable()
4477 u8 reg_idx = ring->reg_idx; in ixgbe_rx_desc_queue_enable()
4479 if (ixgbe_removed(hw->hw_addr)) in ixgbe_rx_desc_queue_enable()
4482 if (hw->mac.type == ixgbe_mac_82598EB && in ixgbe_rx_desc_queue_enable()
4489 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); in ixgbe_rx_desc_queue_enable()
4500 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_rx_ring()
4502 u64 rdba = ring->dma; in ixgbe_configure_rx_ring()
4504 u8 reg_idx = ring->reg_idx; in ixgbe_configure_rx_ring()
4506 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in ixgbe_configure_rx_ring()
4507 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); in ixgbe_configure_rx_ring()
4508 if (ring->xsk_pool) { in ixgbe_configure_rx_ring()
4509 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in ixgbe_configure_rx_ring()
4512 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ixgbe_configure_rx_ring()
4514 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in ixgbe_configure_rx_ring()
4529 ring->count * sizeof(union ixgbe_adv_rx_desc)); in ixgbe_configure_rx_ring()
4535 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); in ixgbe_configure_rx_ring()
4540 if (hw->mac.type == ixgbe_mac_82598EB) { in ixgbe_configure_rx_ring()
4552 } else if (hw->mac.type != ixgbe_mac_82599EB) { in ixgbe_configure_rx_ring()
4561 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) in ixgbe_configure_rx_ring()
4567 ring->rx_offset = ixgbe_rx_offset(ring); in ixgbe_configure_rx_ring()
4569 if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { in ixgbe_configure_rx_ring()
4570 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); in ixgbe_configure_rx_ring()
4576 ring->rx_buf_len = xsk_buf_len; in ixgbe_configure_rx_ring()
4580 memset(ring->rx_buffer_info, 0, in ixgbe_configure_rx_ring()
4581 sizeof(struct ixgbe_rx_buffer) * ring->count); in ixgbe_configure_rx_ring()
4585 rx_desc->wb.upper.length = 0; in ixgbe_configure_rx_ring()
4592 if (ring->xsk_pool) in ixgbe_configure_rx_ring()
4600 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_psrtype()
4601 int rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_setup_psrtype()
4602 u16 pool = adapter->num_rx_pools; in ixgbe_setup_psrtype()
4611 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_setup_psrtype()
4619 while (pool--) in ixgbe_setup_psrtype()
4625 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_virtualization()
4626 u16 pool = adapter->num_rx_pools; in ixgbe_configure_virtualization()
4631 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_configure_virtualization()
4645 while (pool--) in ixgbe_configure_virtualization()
4653 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); in ixgbe_configure_virtualization()
4655 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); in ixgbe_configure_virtualization()
4656 if (adapter->bridge_mode == BRIDGE_MODE_VEB) in ixgbe_configure_virtualization()
4660 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); in ixgbe_configure_virtualization()
4663 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; in ixgbe_configure_virtualization()
4667 * i.e. 32 or 64 VFs for SR-IOV in ixgbe_configure_virtualization()
4669 switch (adapter->ring_feature[RING_F_VMDQ].mask) { in ixgbe_configure_virtualization()
4683 for (i = 0; i < adapter->num_vfs; i++) { in ixgbe_configure_virtualization()
4685 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, in ixgbe_configure_virtualization()
4686 adapter->vfinfo[i].spoofchk_enabled); in ixgbe_configure_virtualization()
4689 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, in ixgbe_configure_virtualization()
4690 adapter->vfinfo[i].rss_query_enabled); in ixgbe_configure_virtualization()
4696 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_rx_buffer_len()
4697 struct net_device *netdev = adapter->netdev; in ixgbe_set_rx_buffer_len()
4698 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbe_set_rx_buffer_len()
4705 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && in ixgbe_set_rx_buffer_len()
4732 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_set_rx_buffer_len()
4733 rx_ring = adapter->rx_ring[i]; in ixgbe_set_rx_buffer_len()
4736 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4737 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4739 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_set_rx_buffer_len()
4742 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) in ixgbe_set_rx_buffer_len()
4743 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4745 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) in ixgbe_set_rx_buffer_len()
4748 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4751 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_set_rx_buffer_len()
4752 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4756 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4763 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_rdrxctl()
4766 switch (hw->mac.type) { in ixgbe_setup_rdrxctl()
4784 if (adapter->num_vfs) in ixgbe_setup_rdrxctl()
4806 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
4813 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_rx()
4818 hw->mac.ops.disable_rx(hw); in ixgbe_configure_rx()
4826 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) in ixgbe_configure_rx()
4843 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_configure_rx()
4844 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); in ixgbe_configure_rx()
4848 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_configure_rx()
4853 hw->mac.ops.enable_rx_dma(hw, rxctrl); in ixgbe_configure_rx()
4860 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_rx_add_vid()
4863 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) in ixgbe_vlan_rx_add_vid()
4864 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); in ixgbe_vlan_rx_add_vid()
4866 set_bit(vid, adapter->active_vlans); in ixgbe_vlan_rx_add_vid()
4881 for (idx = IXGBE_VLVF_ENTRIES; --idx;) { in ixgbe_find_vlvf_entry()
4892 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_pf_promisc_vlvf()
4909 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) in ixgbe_update_pf_promisc_vlvf()
4919 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_rx_kill_vid()
4922 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) in ixgbe_vlan_rx_kill_vid()
4923 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); in ixgbe_vlan_rx_kill_vid()
4925 clear_bit(vid, adapter->active_vlans); in ixgbe_vlan_rx_kill_vid()
4931 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
4936 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_strip_disable()
4940 switch (hw->mac.type) { in ixgbe_vlan_strip_disable()
4952 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_vlan_strip_disable()
4953 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_disable()
4955 if (!netif_is_ixgbe(ring->netdev)) in ixgbe_vlan_strip_disable()
4958 j = ring->reg_idx; in ixgbe_vlan_strip_disable()
4970 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
4975 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_strip_enable()
4979 switch (hw->mac.type) { in ixgbe_vlan_strip_enable()
4991 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_vlan_strip_enable()
4992 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_enable()
4994 if (!netif_is_ixgbe(ring->netdev)) in ixgbe_vlan_strip_enable()
4997 j = ring->reg_idx; in ixgbe_vlan_strip_enable()
5010 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_promisc_enable()
5015 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { in ixgbe_vlan_promisc_enable()
5016 /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ in ixgbe_vlan_promisc_enable()
5026 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_vlan_promisc_enable()
5030 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) in ixgbe_vlan_promisc_enable()
5034 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; in ixgbe_vlan_promisc_enable()
5037 for (i = IXGBE_VLVF_ENTRIES; --i;) { in ixgbe_vlan_promisc_enable()
5046 for (i = hw->mac.vft_size; i--;) in ixgbe_vlan_promisc_enable()
5053 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_scrub_vfta()
5059 for (i = IXGBE_VLVF_ENTRIES; --i;) { in ixgbe_scrub_vfta()
5071 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); in ixgbe_scrub_vfta()
5074 if (test_bit(vid, adapter->active_vlans)) in ixgbe_scrub_vfta()
5086 for (i = VFTA_BLOCK_SIZE; i--;) { in ixgbe_scrub_vfta()
5091 vfta[i] |= adapter->active_vlans[word] >> bits; in ixgbe_scrub_vfta()
5099 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_promisc_disable()
5107 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) || in ixgbe_vlan_promisc_disable()
5108 hw->mac.type == ixgbe_mac_82598EB) in ixgbe_vlan_promisc_disable()
5112 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) in ixgbe_vlan_promisc_disable()
5116 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; in ixgbe_vlan_promisc_disable()
5118 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) in ixgbe_vlan_promisc_disable()
5126 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in ixgbe_restore_vlan()
5128 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) in ixgbe_restore_vlan()
5129 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in ixgbe_restore_vlan()
5133 * ixgbe_write_mc_addr_list - write multicast addresses to MTA
5137 * Returns: -ENOMEM on failure
5144 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_write_mc_addr_list()
5149 if (hw->mac.ops.update_mc_addr_list) in ixgbe_write_mc_addr_list()
5150 hw->mac.ops.update_mc_addr_list(hw, netdev); in ixgbe_write_mc_addr_list()
5152 return -ENOMEM; in ixgbe_write_mc_addr_list()
5164 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_full_sync_mac_table()
5165 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_full_sync_mac_table()
5168 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_full_sync_mac_table()
5169 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; in ixgbe_full_sync_mac_table()
5171 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) in ixgbe_full_sync_mac_table()
5172 hw->mac.ops.set_rar(hw, i, in ixgbe_full_sync_mac_table()
5173 mac_table->addr, in ixgbe_full_sync_mac_table()
5174 mac_table->pool, in ixgbe_full_sync_mac_table()
5177 hw->mac.ops.clear_rar(hw, i); in ixgbe_full_sync_mac_table()
5184 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_sync_mac_table()
5185 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sync_mac_table()
5188 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_sync_mac_table()
5189 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) in ixgbe_sync_mac_table()
5192 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; in ixgbe_sync_mac_table()
5194 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) in ixgbe_sync_mac_table()
5195 hw->mac.ops.set_rar(hw, i, in ixgbe_sync_mac_table()
5196 mac_table->addr, in ixgbe_sync_mac_table()
5197 mac_table->pool, in ixgbe_sync_mac_table()
5200 hw->mac.ops.clear_rar(hw, i); in ixgbe_sync_mac_table()
5206 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_flush_sw_mac_table()
5207 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_flush_sw_mac_table()
5210 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_flush_sw_mac_table()
5211 mac_table->state |= IXGBE_MAC_STATE_MODIFIED; in ixgbe_flush_sw_mac_table()
5212 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; in ixgbe_flush_sw_mac_table()
5220 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_available_rars()
5221 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_available_rars()
5224 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_available_rars()
5226 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) in ixgbe_available_rars()
5230 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { in ixgbe_available_rars()
5231 if (mac_table->pool != pool) in ixgbe_available_rars()
5244 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_mac_set_default_filter()
5245 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_mac_set_default_filter()
5247 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); in ixgbe_mac_set_default_filter()
5248 mac_table->pool = VMDQ_P(0); in ixgbe_mac_set_default_filter()
5250 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; in ixgbe_mac_set_default_filter()
5252 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, in ixgbe_mac_set_default_filter()
5259 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_add_mac_filter()
5260 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_add_mac_filter()
5264 return -EINVAL; in ixgbe_add_mac_filter()
5266 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_add_mac_filter()
5267 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) in ixgbe_add_mac_filter()
5270 ether_addr_copy(mac_table->addr, addr); in ixgbe_add_mac_filter()
5271 mac_table->pool = pool; in ixgbe_add_mac_filter()
5273 mac_table->state |= IXGBE_MAC_STATE_MODIFIED | in ixgbe_add_mac_filter()
5281 return -ENOMEM; in ixgbe_add_mac_filter()
5287 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; in ixgbe_del_mac_filter()
5288 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_del_mac_filter()
5292 return -EINVAL; in ixgbe_del_mac_filter()
5295 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { in ixgbe_del_mac_filter()
5297 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) in ixgbe_del_mac_filter()
5300 if (mac_table->pool != pool) in ixgbe_del_mac_filter()
5303 if (!ether_addr_equal(addr, mac_table->addr)) in ixgbe_del_mac_filter()
5306 mac_table->state |= IXGBE_MAC_STATE_MODIFIED; in ixgbe_del_mac_filter()
5307 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; in ixgbe_del_mac_filter()
5314 return -ENOMEM; in ixgbe_del_mac_filter()
5337 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
5348 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_rx_mode()
5350 netdev_features_t features = netdev->features; in ixgbe_set_rx_mode()
5357 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ in ixgbe_set_rx_mode()
5364 if (netdev->flags & IFF_PROMISC) { in ixgbe_set_rx_mode()
5365 hw->addr_ctrl.user_set_promisc = true; in ixgbe_set_rx_mode()
5370 if (netdev->flags & IFF_ALLMULTI) { in ixgbe_set_rx_mode()
5374 hw->addr_ctrl.user_set_promisc = false; in ixgbe_set_rx_mode()
5399 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_set_rx_mode()
5435 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) in ixgbe_napi_enable_all()
5436 napi_enable(&adapter->q_vector[q_idx]->napi); in ixgbe_napi_enable_all()
5443 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) in ixgbe_napi_disable_all()
5444 napi_disable(&adapter->q_vector[q_idx]->napi); in ixgbe_napi_disable_all()
5450 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_udp_tunnel_sync()
5455 adapter->vxlan_port = ti.port; in ixgbe_udp_tunnel_sync()
5457 adapter->geneve_port = ti.port; in ixgbe_udp_tunnel_sync()
5460 ntohs(adapter->vxlan_port) | in ixgbe_udp_tunnel_sync()
5461 ntohs(adapter->geneve_port) << in ixgbe_udp_tunnel_sync()
5485 * ixgbe_configure_dcb - Configure DCB hardware
5488 * This is called by the driver on open to configure the DCB hardware.
5490 * the DCB state.
5494 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_dcb()
5495 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbe_configure_dcb()
5497 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { in ixgbe_configure_dcb()
5498 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_configure_dcb()
5499 netif_set_tso_max_size(adapter->netdev, 65536); in ixgbe_configure_dcb()
5503 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_configure_dcb()
5504 netif_set_tso_max_size(adapter->netdev, 32768); in ixgbe_configure_dcb()
5507 if (adapter->netdev->fcoe_mtu) in ixgbe_configure_dcb()
5512 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { in ixgbe_configure_dcb()
5513 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, in ixgbe_configure_dcb()
5515 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, in ixgbe_configure_dcb()
5517 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); in ixgbe_configure_dcb()
5518 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { in ixgbe_configure_dcb()
5519 ixgbe_dcb_hw_ets(&adapter->hw, in ixgbe_configure_dcb()
5520 adapter->ixgbe_ieee_ets, in ixgbe_configure_dcb()
5522 ixgbe_dcb_hw_pfc_config(&adapter->hw, in ixgbe_configure_dcb()
5523 adapter->ixgbe_ieee_pfc->pfc_en, in ixgbe_configure_dcb()
5524 adapter->ixgbe_ieee_ets->prio_tc); in ixgbe_configure_dcb()
5528 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_configure_dcb()
5530 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; in ixgbe_configure_dcb()
5547 * ixgbe_hpbthresh - calculate high water mark for flow control
5554 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_hpbthresh()
5555 struct net_device *dev = adapter->netdev; in ixgbe_hpbthresh()
5560 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; in ixgbe_hpbthresh()
5564 if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE && in ixgbe_hpbthresh()
5570 switch (hw->mac.type) { in ixgbe_hpbthresh()
5584 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_hpbthresh()
5591 marker = rx_pba - kb; in ixgbe_hpbthresh()
5608 * ixgbe_lpbthresh - calculate low water mark for flow control
5615 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_lpbthresh()
5616 struct net_device *dev = adapter->netdev; in ixgbe_lpbthresh()
5621 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbe_lpbthresh()
5625 if (dev->fcoe_mtu && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE && in ixgbe_lpbthresh()
5626 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) in ixgbe_lpbthresh()
5631 switch (hw->mac.type) { in ixgbe_lpbthresh()
5649 * ixgbe_pbthresh_setup - calculate and setup high low water marks
5653 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_pbthresh_setup()
5654 int num_tc = adapter->hw_tcs; in ixgbe_pbthresh_setup()
5661 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); in ixgbe_pbthresh_setup()
5662 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); in ixgbe_pbthresh_setup()
5665 if (hw->fc.low_water[i] > hw->fc.high_water[i]) in ixgbe_pbthresh_setup()
5666 hw->fc.low_water[i] = 0; in ixgbe_pbthresh_setup()
5670 hw->fc.high_water[i] = 0; in ixgbe_pbthresh_setup()
5675 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_pb()
5677 u8 tc = adapter->hw_tcs; in ixgbe_configure_pb()
5679 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || in ixgbe_configure_pb()
5680 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) in ixgbe_configure_pb()
5681 hdrm = 32 << adapter->fdir_pballoc; in ixgbe_configure_pb()
5685 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); in ixgbe_configure_pb()
5691 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_fdir_filter_restore()
5696 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_restore()
5698 if (!hlist_empty(&adapter->fdir_filter_list)) in ixgbe_fdir_filter_restore()
5699 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); in ixgbe_fdir_filter_restore()
5702 &adapter->fdir_filter_list, fdir_node) { in ixgbe_fdir_filter_restore()
5703 if (filter->action == IXGBE_FDIR_DROP_QUEUE) { in ixgbe_fdir_filter_restore()
5706 u32 ring = ethtool_get_flow_spec_ring(filter->action); in ixgbe_fdir_filter_restore()
5707 u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); in ixgbe_fdir_filter_restore()
5709 if (!vf && (ring >= adapter->num_rx_queues)) { in ixgbe_fdir_filter_restore()
5714 ((vf > adapter->num_vfs) || in ixgbe_fdir_filter_restore()
5715 ring >= adapter->num_rx_queues_per_pool)) { in ixgbe_fdir_filter_restore()
5723 queue = adapter->rx_ring[ring]->reg_idx; in ixgbe_fdir_filter_restore()
5725 queue = ((vf - 1) * in ixgbe_fdir_filter_restore()
5726 adapter->num_rx_queues_per_pool) + ring; in ixgbe_fdir_filter_restore()
5730 &filter->filter, filter->sw_idx, queue); in ixgbe_fdir_filter_restore()
5733 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_restore()
5737 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
5742 u16 i = rx_ring->next_to_clean; in ixgbe_clean_rx_ring()
5743 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbe_clean_rx_ring()
5745 if (rx_ring->xsk_pool) { in ixgbe_clean_rx_ring()
5751 while (i != rx_ring->next_to_alloc) { in ixgbe_clean_rx_ring()
5752 if (rx_buffer->skb) { in ixgbe_clean_rx_ring()
5753 struct sk_buff *skb = rx_buffer->skb; in ixgbe_clean_rx_ring()
5754 if (IXGBE_CB(skb)->page_released) in ixgbe_clean_rx_ring()
5755 dma_unmap_page_attrs(rx_ring->dev, in ixgbe_clean_rx_ring()
5756 IXGBE_CB(skb)->dma, in ixgbe_clean_rx_ring()
5766 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_clean_rx_ring()
5767 rx_buffer->dma, in ixgbe_clean_rx_ring()
5768 rx_buffer->page_offset, in ixgbe_clean_rx_ring()
5773 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_clean_rx_ring()
5777 __page_frag_cache_drain(rx_buffer->page, in ixgbe_clean_rx_ring()
5778 rx_buffer->pagecnt_bias); in ixgbe_clean_rx_ring()
5782 if (i == rx_ring->count) { in ixgbe_clean_rx_ring()
5784 rx_buffer = rx_ring->rx_buffer_info; in ixgbe_clean_rx_ring()
5789 rx_ring->next_to_alloc = 0; in ixgbe_clean_rx_ring()
5790 rx_ring->next_to_clean = 0; in ixgbe_clean_rx_ring()
5791 rx_ring->next_to_use = 0; in ixgbe_clean_rx_ring()
5797 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_fwd_ring_up()
5798 int num_tc = netdev_get_num_tc(adapter->netdev); in ixgbe_fwd_ring_up()
5799 struct net_device *vdev = accel->netdev; in ixgbe_fwd_ring_up()
5802 baseq = accel->pool * adapter->num_rx_queues_per_pool; in ixgbe_fwd_ring_up()
5804 accel->pool, adapter->num_rx_pools, in ixgbe_fwd_ring_up()
5805 baseq, baseq + adapter->num_rx_queues_per_pool); in ixgbe_fwd_ring_up()
5807 accel->rx_base_queue = baseq; in ixgbe_fwd_ring_up()
5808 accel->tx_base_queue = baseq; in ixgbe_fwd_ring_up()
5812 netdev_bind_sb_channel_queue(adapter->netdev, vdev, in ixgbe_fwd_ring_up()
5815 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) in ixgbe_fwd_ring_up()
5816 adapter->rx_ring[baseq + i]->netdev = vdev; in ixgbe_fwd_ring_up()
5826 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr, in ixgbe_fwd_ring_up()
5827 VMDQ_P(accel->pool)); in ixgbe_fwd_ring_up()
5834 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) in ixgbe_fwd_ring_up()
5835 adapter->rx_ring[baseq + i]->netdev = NULL; in ixgbe_fwd_ring_up()
5840 netdev_unbind_sb_channel(adapter->netdev, vdev); in ixgbe_fwd_ring_up()
5843 clear_bit(accel->pool, adapter->fwd_bitmask); in ixgbe_fwd_ring_up()
5852 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; in ixgbe_macvlan_up()
5873 netdev_walk_all_upper_dev_rcu(adapter->netdev, in ixgbe_configure_dfwd()
5879 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure()
5891 ixgbe_set_rx_mode(adapter->netdev); in ixgbe_configure()
5895 switch (hw->mac.type) { in ixgbe_configure()
5898 hw->mac.ops.disable_rx_buff(hw); in ixgbe_configure()
5904 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { in ixgbe_configure()
5905 ixgbe_init_fdir_signature_82599(&adapter->hw, in ixgbe_configure()
5906 adapter->fdir_pballoc); in ixgbe_configure()
5907 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { in ixgbe_configure()
5908 ixgbe_init_fdir_perfect_82599(&adapter->hw, in ixgbe_configure()
5909 adapter->fdir_pballoc); in ixgbe_configure()
5913 switch (hw->mac.type) { in ixgbe_configure()
5916 hw->mac.ops.enable_rx_buff(hw); in ixgbe_configure()
5924 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) in ixgbe_configure()
5939 * ixgbe_enable_link_status_events - enable link status events
5952 err = ixgbe_configure_lse(&adapter->hw, true, mask); in ixgbe_enable_link_status_events()
5956 adapter->lse_mask = mask; in ixgbe_enable_link_status_events()
5961 * ixgbe_disable_link_status_events - disable link status events
5972 err = ixgbe_configure_lse(&adapter->hw, false, adapter->lse_mask); in ixgbe_disable_link_status_events()
5976 adapter->lse_mask = 0; in ixgbe_disable_link_status_events()
5981 * ixgbe_sfp_link_config - set up SFP+ link
5992 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_sfp_link_config()
5993 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_sfp_link_config()
5995 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_sfp_link_config()
5996 adapter->sfp_poll_time = 0; in ixgbe_sfp_link_config()
6000 * ixgbe_non_sfp_link_config - set up non-SFP+ link
6003 * Configure non-SFP link.
6016 int ret = -EIO; in ixgbe_non_sfp_link_config()
6019 if (hw->mac.ops.check_link) in ixgbe_non_sfp_link_config()
6020 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); in ixgbe_non_sfp_link_config()
6025 speed = hw->phy.autoneg_advertised; in ixgbe_non_sfp_link_config()
6026 if (!speed && hw->mac.ops.get_link_capabilities) { in ixgbe_non_sfp_link_config()
6027 ret = hw->mac.ops.get_link_capabilities(hw, &speed, in ixgbe_non_sfp_link_config()
6029 /* remove NBASE-T speeds from default autonegotiation in ixgbe_non_sfp_link_config()
6031 * which cannot cope with advertised NBASE-T speeds in ixgbe_non_sfp_link_config()
6040 if (hw->mac.ops.setup_link) { in ixgbe_non_sfp_link_config()
6041 if (adapter->hw.mac.type == ixgbe_mac_e610) { in ixgbe_non_sfp_link_config()
6046 ret = hw->mac.ops.setup_link(hw, speed, link_up); in ixgbe_non_sfp_link_config()
6053 * ixgbe_check_media_subtask - check for media
6061 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_media_subtask()
6064 if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA)) in ixgbe_check_media_subtask()
6071 ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err); in ixgbe_check_media_subtask()
6073 if (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE) { in ixgbe_check_media_subtask()
6077 if (!(ixgbe_non_sfp_link_config(&adapter->hw))) in ixgbe_check_media_subtask()
6078 adapter->flags2 &= ~IXGBE_FLAG2_NO_MEDIA; in ixgbe_check_media_subtask()
6087 * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
6095 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_clear_vf_stats_counters()
6098 for (i = 0; i < adapter->num_vfs; i++) { in ixgbe_clear_vf_stats_counters()
6099 adapter->vfinfo[i].last_vfstats.gprc = in ixgbe_clear_vf_stats_counters()
6101 adapter->vfinfo[i].saved_rst_vfstats.gprc += in ixgbe_clear_vf_stats_counters()
6102 adapter->vfinfo[i].vfstats.gprc; in ixgbe_clear_vf_stats_counters()
6103 adapter->vfinfo[i].vfstats.gprc = 0; in ixgbe_clear_vf_stats_counters()
6104 adapter->vfinfo[i].last_vfstats.gptc = in ixgbe_clear_vf_stats_counters()
6106 adapter->vfinfo[i].saved_rst_vfstats.gptc += in ixgbe_clear_vf_stats_counters()
6107 adapter->vfinfo[i].vfstats.gptc; in ixgbe_clear_vf_stats_counters()
6108 adapter->vfinfo[i].vfstats.gptc = 0; in ixgbe_clear_vf_stats_counters()
6109 adapter->vfinfo[i].last_vfstats.gorc = in ixgbe_clear_vf_stats_counters()
6111 adapter->vfinfo[i].saved_rst_vfstats.gorc += in ixgbe_clear_vf_stats_counters()
6112 adapter->vfinfo[i].vfstats.gorc; in ixgbe_clear_vf_stats_counters()
6113 adapter->vfinfo[i].vfstats.gorc = 0; in ixgbe_clear_vf_stats_counters()
6114 adapter->vfinfo[i].last_vfstats.gotc = in ixgbe_clear_vf_stats_counters()
6116 adapter->vfinfo[i].saved_rst_vfstats.gotc += in ixgbe_clear_vf_stats_counters()
6117 adapter->vfinfo[i].vfstats.gotc; in ixgbe_clear_vf_stats_counters()
6118 adapter->vfinfo[i].vfstats.gotc = 0; in ixgbe_clear_vf_stats_counters()
6119 adapter->vfinfo[i].last_vfstats.mprc = in ixgbe_clear_vf_stats_counters()
6121 adapter->vfinfo[i].saved_rst_vfstats.mprc += in ixgbe_clear_vf_stats_counters()
6122 adapter->vfinfo[i].vfstats.mprc; in ixgbe_clear_vf_stats_counters()
6123 adapter->vfinfo[i].vfstats.mprc = 0; in ixgbe_clear_vf_stats_counters()
6129 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_gpie()
6132 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { in ixgbe_setup_gpie()
6137 * use EIAM to auto-mask when MSI-X interrupt is asserted in ixgbe_setup_gpie()
6140 switch (hw->mac.type) { in ixgbe_setup_gpie()
6156 /* legacy interrupts, use EIAM to auto-mask when reading EICR, in ixgbe_setup_gpie()
6164 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_setup_gpie()
6167 switch (adapter->ring_feature[RING_F_VMDQ].mask) { in ixgbe_setup_gpie()
6181 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { in ixgbe_setup_gpie()
6182 switch (adapter->hw.mac.type) { in ixgbe_setup_gpie()
6192 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) in ixgbe_setup_gpie()
6195 switch (hw->mac.type) { in ixgbe_setup_gpie()
6212 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_up_complete()
6219 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) in ixgbe_up_complete()
6225 if (hw->mac.ops.enable_tx_laser) in ixgbe_up_complete()
6226 hw->mac.ops.enable_tx_laser(hw); in ixgbe_up_complete()
6228 if (hw->phy.ops.set_phy_power) in ixgbe_up_complete()
6229 hw->phy.ops.set_phy_power(hw, true); in ixgbe_up_complete()
6232 clear_bit(__IXGBE_DOWN, &adapter->state); in ixgbe_up_complete()
6251 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { in ixgbe_up_complete()
6259 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_up_complete()
6260 adapter->link_check_timeout = jiffies; in ixgbe_up_complete()
6261 mod_timer(&adapter->service_timer, jiffies); in ixgbe_up_complete()
6276 netif_trans_update(adapter->netdev); in ixgbe_reinit_locked()
6278 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_reinit_locked()
6280 if (adapter->hw.phy.type == ixgbe_phy_fw) in ixgbe_reinit_locked()
6284 * If SR-IOV enabled then wait a bit before bringing the adapter in ixgbe_reinit_locked()
6289 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_reinit_locked()
6292 clear_bit(__IXGBE_RESETTING, &adapter->state); in ixgbe_reinit_locked()
6307 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2); in ixgbe_get_completion_timeout()
6343 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_rx()
6348 hw->mac.ops.disable_rx(hw); in ixgbe_disable_rx()
6350 if (ixgbe_removed(hw->hw_addr)) in ixgbe_disable_rx()
6354 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_disable_rx()
6355 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
6356 u8 reg_idx = ring->reg_idx; in ixgbe_disable_rx()
6367 if (hw->mac.type == ixgbe_mac_82598EB && in ixgbe_disable_rx()
6387 while (wait_loop--) { in ixgbe_disable_rx()
6397 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_disable_rx()
6398 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
6399 u8 reg_idx = ring->reg_idx; in ixgbe_disable_rx()
6415 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_tx()
6419 if (ixgbe_removed(hw->hw_addr)) in ixgbe_disable_tx()
6423 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_disable_tx()
6424 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_disable_tx()
6425 u8 reg_idx = ring->reg_idx; in ixgbe_disable_tx()
6431 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_disable_tx()
6432 struct ixgbe_ring *ring = adapter->xdp_ring[i]; in ixgbe_disable_tx()
6433 u8 reg_idx = ring->reg_idx; in ixgbe_disable_tx()
6462 while (wait_loop--) { in ixgbe_disable_tx()
6472 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_disable_tx()
6473 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_disable_tx()
6474 u8 reg_idx = ring->reg_idx; in ixgbe_disable_tx()
6478 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_disable_tx()
6479 struct ixgbe_ring *ring = adapter->xdp_ring[i]; in ixgbe_disable_tx()
6480 u8 reg_idx = ring->reg_idx; in ixgbe_disable_tx()
6494 switch (hw->mac.type) { in ixgbe_disable_tx()
6512 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_reset()
6513 struct net_device *netdev = adapter->netdev; in ixgbe_reset()
6516 if (ixgbe_removed(hw->hw_addr)) in ixgbe_reset()
6519 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) in ixgbe_reset()
6523 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | in ixgbe_reset()
6525 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_reset()
6527 err = hw->mac.ops.init_hw(hw); in ixgbe_reset()
6530 case -ENOENT: in ixgbe_reset()
6531 case -EOPNOTSUPP: in ixgbe_reset()
6533 case -EALREADY: in ixgbe_reset()
6536 case -EACCES: in ixgbe_reset()
6537 /* We are running on a pre-production device, log a warning */ in ixgbe_reset()
6538 e_dev_warn("This device is a pre-production adapter/LOM. " in ixgbe_reset()
6549 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); in ixgbe_reset()
6559 if (hw->mac.san_mac_rar_index) in ixgbe_reset()
6560 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); in ixgbe_reset()
6562 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) in ixgbe_reset()
6565 if (hw->phy.ops.set_phy_power) { in ixgbe_reset()
6566 if (!netif_running(adapter->netdev) && !adapter->wol) in ixgbe_reset()
6567 hw->phy.ops.set_phy_power(hw, false); in ixgbe_reset()
6569 hw->phy.ops.set_phy_power(hw, true); in ixgbe_reset()
6574 * ixgbe_clean_tx_ring - Free Tx Buffers
6579 u16 i = tx_ring->next_to_clean; in ixgbe_clean_tx_ring()
6580 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_clean_tx_ring()
6582 if (tx_ring->xsk_pool) { in ixgbe_clean_tx_ring()
6587 while (i != tx_ring->next_to_use) { in ixgbe_clean_tx_ring()
6592 xdp_return_frame(tx_buffer->xdpf); in ixgbe_clean_tx_ring()
6594 dev_kfree_skb_any(tx_buffer->skb); in ixgbe_clean_tx_ring()
6597 dma_unmap_single(tx_ring->dev, in ixgbe_clean_tx_ring()
6603 eop_desc = tx_buffer->next_to_watch; in ixgbe_clean_tx_ring()
6611 if (unlikely(i == tx_ring->count)) { in ixgbe_clean_tx_ring()
6613 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_ring()
6619 dma_unmap_page(tx_ring->dev, in ixgbe_clean_tx_ring()
6628 if (unlikely(i == tx_ring->count)) { in ixgbe_clean_tx_ring()
6630 tx_buffer = tx_ring->tx_buffer_info; in ixgbe_clean_tx_ring()
6640 tx_ring->next_to_use = 0; in ixgbe_clean_tx_ring()
6641 tx_ring->next_to_clean = 0; in ixgbe_clean_tx_ring()
6645 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
6652 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_clean_all_rx_rings()
6653 ixgbe_clean_rx_ring(adapter->rx_ring[i]); in ixgbe_clean_all_rx_rings()
6657 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
6664 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_clean_all_tx_rings()
6665 ixgbe_clean_tx_ring(adapter->tx_ring[i]); in ixgbe_clean_all_tx_rings()
6666 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_clean_all_tx_rings()
6667 ixgbe_clean_tx_ring(adapter->xdp_ring[i]); in ixgbe_clean_all_tx_rings()
6675 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_exit()
6678 &adapter->fdir_filter_list, fdir_node) { in ixgbe_fdir_filter_exit()
6679 hlist_del(&filter->fdir_node); in ixgbe_fdir_filter_exit()
6682 adapter->fdir_filter_count = 0; in ixgbe_fdir_filter_exit()
6684 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_exit()
6689 struct net_device *netdev = adapter->netdev; in ixgbe_down()
6690 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_down()
6694 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_down()
6708 if (adapter->xdp_ring[0]) in ixgbe_down()
6715 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_down()
6716 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; in ixgbe_down()
6717 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_down()
6719 timer_delete_sync(&adapter->service_timer); in ixgbe_down()
6721 if (adapter->num_vfs) { in ixgbe_down()
6723 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); in ixgbe_down()
6726 for (i = 0 ; i < adapter->num_vfs; i++) in ixgbe_down()
6727 adapter->vfinfo[i].clear_to_send = false; in ixgbe_down()
6736 if (!pci_channel_offline(adapter->pdev)) in ixgbe_down()
6740 if (hw->mac.ops.disable_tx_laser) in ixgbe_down()
6741 hw->mac.ops.disable_tx_laser(hw); in ixgbe_down()
6745 if (adapter->hw.mac.type == ixgbe_mac_e610) in ixgbe_down()
6750 * ixgbe_set_eee_capable - helper function to determine EEE support on X550
6755 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_eee_capable()
6757 switch (hw->device_id) { in ixgbe_set_eee_capable()
6760 if (!hw->phy.eee_speeds_supported) in ixgbe_set_eee_capable()
6762 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; in ixgbe_set_eee_capable()
6763 if (!hw->phy.eee_speeds_advertised) in ixgbe_set_eee_capable()
6765 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; in ixgbe_set_eee_capable()
6768 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; in ixgbe_set_eee_capable()
6769 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; in ixgbe_set_eee_capable()
6775 * ixgbe_tx_timeout - Respond to a Tx Hang
6790 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_init_dcb()
6794 switch (hw->mac.type) { in ixgbe_init_dcb()
6797 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; in ixgbe_init_dcb()
6798 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; in ixgbe_init_dcb()
6803 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; in ixgbe_init_dcb()
6804 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; in ixgbe_init_dcb()
6809 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; in ixgbe_init_dcb()
6810 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; in ixgbe_init_dcb()
6814 /* Configure DCB traffic classes */ in ixgbe_init_dcb()
6816 tc = &adapter->dcb_cfg.tc_config[j]; in ixgbe_init_dcb()
6817 tc->path[DCB_TX_CONFIG].bwg_id = 0; in ixgbe_init_dcb()
6818 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); in ixgbe_init_dcb()
6819 tc->path[DCB_RX_CONFIG].bwg_id = 0; in ixgbe_init_dcb()
6820 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); in ixgbe_init_dcb()
6821 tc->dcb_pfc = pfc_disabled; in ixgbe_init_dcb()
6824 /* Initialize default user to priority mapping, UPx->TC0 */ in ixgbe_init_dcb()
6825 tc = &adapter->dcb_cfg.tc_config[0]; in ixgbe_init_dcb()
6826 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; in ixgbe_init_dcb()
6827 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; in ixgbe_init_dcb()
6829 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; in ixgbe_init_dcb()
6830 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; in ixgbe_init_dcb()
6831 adapter->dcb_cfg.pfc_mode_enable = false; in ixgbe_init_dcb()
6832 adapter->dcb_set_bitmap = 0x00; in ixgbe_init_dcb()
6833 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) in ixgbe_init_dcb()
6834 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; in ixgbe_init_dcb()
6835 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, in ixgbe_init_dcb()
6836 sizeof(adapter->temp_dcb_cfg)); in ixgbe_init_dcb()
6841 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
6852 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sw_init()
6853 struct pci_dev *pdev = adapter->pdev; in ixgbe_sw_init()
6860 hw->vendor_id = pdev->vendor; in ixgbe_sw_init()
6861 hw->device_id = pdev->device; in ixgbe_sw_init()
6862 hw->revision_id = pdev->revision; in ixgbe_sw_init()
6863 hw->subsystem_vendor_id = pdev->subsystem_vendor; in ixgbe_sw_init()
6864 hw->subsystem_device_id = pdev->subsystem_device; in ixgbe_sw_init()
6866 hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; in ixgbe_sw_init()
6869 ii->get_invariants(hw); in ixgbe_sw_init()
6873 adapter->ring_feature[RING_F_RSS].limit = rss; in ixgbe_sw_init()
6874 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; in ixgbe_sw_init()
6875 adapter->max_q_vectors = MAX_Q_VECTORS_82599; in ixgbe_sw_init()
6876 adapter->atr_sample_rate = 20; in ixgbe_sw_init()
6878 adapter->ring_feature[RING_F_FDIR].limit = fdir; in ixgbe_sw_init()
6879 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; in ixgbe_sw_init()
6880 adapter->ring_feature[RING_F_VMDQ].limit = 1; in ixgbe_sw_init()
6882 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; in ixgbe_sw_init()
6885 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; in ixgbe_sw_init()
6886 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; in ixgbe_sw_init()
6889 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_sw_init()
6890 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; in ixgbe_sw_init()
6893 adapter->fcoe.up = IXGBE_FCOE_DEFTC; in ixgbe_sw_init()
6898 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), in ixgbe_sw_init()
6900 if (!adapter->jump_tables[0]) in ixgbe_sw_init()
6901 return -ENOMEM; in ixgbe_sw_init()
6902 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; in ixgbe_sw_init()
6905 adapter->jump_tables[i] = NULL; in ixgbe_sw_init()
6907 adapter->mac_table = kcalloc(hw->mac.num_rar_entries, in ixgbe_sw_init()
6910 if (!adapter->mac_table) in ixgbe_sw_init()
6911 return -ENOMEM; in ixgbe_sw_init()
6914 return -ENOMEM; in ixgbe_sw_init()
6916 adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL); in ixgbe_sw_init()
6917 if (!adapter->af_xdp_zc_qps) in ixgbe_sw_init()
6918 return -ENOMEM; in ixgbe_sw_init()
6921 switch (hw->mac.type) { in ixgbe_sw_init()
6923 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; in ixgbe_sw_init()
6925 if (hw->device_id == IXGBE_DEV_ID_82598AT) in ixgbe_sw_init()
6926 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; in ixgbe_sw_init()
6928 adapter->max_q_vectors = MAX_Q_VECTORS_82598; in ixgbe_sw_init()
6929 adapter->ring_feature[RING_F_FDIR].limit = 0; in ixgbe_sw_init()
6930 adapter->atr_sample_rate = 0; in ixgbe_sw_init()
6931 adapter->fdir_pballoc = 0; in ixgbe_sw_init()
6933 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_sw_init()
6934 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; in ixgbe_sw_init()
6936 adapter->fcoe.up = 0; in ixgbe_sw_init()
6941 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) in ixgbe_sw_init()
6942 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
6947 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
6950 switch (hw->device_id) { in ixgbe_sw_init()
6953 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
6961 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; in ixgbe_sw_init()
6964 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_sw_init()
6966 adapter->fcoe.up = 0; in ixgbe_sw_init()
6971 if (hw->mac.type == ixgbe_mac_X550) in ixgbe_sw_init()
6972 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
6974 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; in ixgbe_sw_init()
6982 if (hw->mac.ops.init_swfw_sync) in ixgbe_sw_init()
6983 hw->mac.ops.init_swfw_sync(hw); in ixgbe_sw_init()
6985 if (hw->mac.type == ixgbe_mac_e610) in ixgbe_sw_init()
6986 mutex_init(&hw->aci.lock); in ixgbe_sw_init()
6990 spin_lock_init(&adapter->fcoe.lock); in ixgbe_sw_init()
6993 /* n-tuple support exists, always init our spinlock */ in ixgbe_sw_init()
6994 spin_lock_init(&adapter->fdir_perfect_lock); in ixgbe_sw_init()
6997 spin_lock_init(&adapter->vfs_lock); in ixgbe_sw_init()
7005 hw->fc.requested_mode = ixgbe_fc_full; in ixgbe_sw_init()
7006 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ in ixgbe_sw_init()
7008 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; in ixgbe_sw_init()
7009 hw->fc.send_xon = true; in ixgbe_sw_init()
7010 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); in ixgbe_sw_init()
7014 …e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the … in ixgbe_sw_init()
7016 /* assign number of SR-IOV VFs */ in ixgbe_sw_init()
7017 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_sw_init()
7020 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); in ixgbe_sw_init()
7026 adapter->rx_itr_setting = 1; in ixgbe_sw_init()
7027 adapter->tx_itr_setting = 1; in ixgbe_sw_init()
7030 adapter->tx_ring_count = IXGBE_DEFAULT_TXD; in ixgbe_sw_init()
7031 adapter->rx_ring_count = IXGBE_DEFAULT_RXD; in ixgbe_sw_init()
7034 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; in ixgbe_sw_init()
7037 if (hw->eeprom.ops.init_params(hw)) { in ixgbe_sw_init()
7039 return -EIO; in ixgbe_sw_init()
7043 set_bit(0, adapter->fwd_bitmask); in ixgbe_sw_init()
7044 set_bit(__IXGBE_DOWN, &adapter->state); in ixgbe_sw_init()
7054 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
7061 struct device *dev = tx_ring->dev; in ixgbe_setup_tx_resources()
7066 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; in ixgbe_setup_tx_resources()
7068 if (tx_ring->q_vector) in ixgbe_setup_tx_resources()
7069 ring_node = tx_ring->q_vector->numa_node; in ixgbe_setup_tx_resources()
7071 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node); in ixgbe_setup_tx_resources()
7072 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
7073 tx_ring->tx_buffer_info = vmalloc(size); in ixgbe_setup_tx_resources()
7074 if (!tx_ring->tx_buffer_info) in ixgbe_setup_tx_resources()
7078 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbe_setup_tx_resources()
7079 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbe_setup_tx_resources()
7082 tx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_tx_resources()
7083 tx_ring->size, in ixgbe_setup_tx_resources()
7084 &tx_ring->dma, in ixgbe_setup_tx_resources()
7087 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
7088 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in ixgbe_setup_tx_resources()
7089 &tx_ring->dma, GFP_KERNEL); in ixgbe_setup_tx_resources()
7090 if (!tx_ring->desc) in ixgbe_setup_tx_resources()
7093 tx_ring->next_to_use = 0; in ixgbe_setup_tx_resources()
7094 tx_ring->next_to_clean = 0; in ixgbe_setup_tx_resources()
7098 vfree(tx_ring->tx_buffer_info); in ixgbe_setup_tx_resources()
7099 tx_ring->tx_buffer_info = NULL; in ixgbe_setup_tx_resources()
7101 return -ENOMEM; in ixgbe_setup_tx_resources()
7105 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
7118 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_setup_all_tx_resources()
7119 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
7126 for (j = 0; j < adapter->num_xdp_queues; j++) { in ixgbe_setup_all_tx_resources()
7127 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]); in ixgbe_setup_all_tx_resources()
7138 while (j--) in ixgbe_setup_all_tx_resources()
7139 ixgbe_free_tx_resources(adapter->xdp_ring[j]); in ixgbe_setup_all_tx_resources()
7140 while (i--) in ixgbe_setup_all_tx_resources()
7141 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
7147 struct ixgbe_q_vector *q_vector = rx_ring->q_vector; in ixgbe_rx_napi_id()
7149 return q_vector ? q_vector->napi.napi_id : 0; in ixgbe_rx_napi_id()
7153 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
7162 struct device *dev = rx_ring->dev; in ixgbe_setup_rx_resources()
7167 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; in ixgbe_setup_rx_resources()
7169 if (rx_ring->q_vector) in ixgbe_setup_rx_resources()
7170 ring_node = rx_ring->q_vector->numa_node; in ixgbe_setup_rx_resources()
7172 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); in ixgbe_setup_rx_resources()
7173 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
7174 rx_ring->rx_buffer_info = vmalloc(size); in ixgbe_setup_rx_resources()
7175 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
7179 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbe_setup_rx_resources()
7180 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbe_setup_rx_resources()
7183 rx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_rx_resources()
7184 rx_ring->size, in ixgbe_setup_rx_resources()
7185 &rx_ring->dma, in ixgbe_setup_rx_resources()
7188 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
7189 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in ixgbe_setup_rx_resources()
7190 &rx_ring->dma, GFP_KERNEL); in ixgbe_setup_rx_resources()
7191 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
7194 rx_ring->next_to_clean = 0; in ixgbe_setup_rx_resources()
7195 rx_ring->next_to_use = 0; in ixgbe_setup_rx_resources()
7197 /* XDP RX-queue info */ in ixgbe_setup_rx_resources()
7198 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbe_setup_rx_resources()
7199 rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) in ixgbe_setup_rx_resources()
7202 WRITE_ONCE(rx_ring->xdp_prog, adapter->xdp_prog); in ixgbe_setup_rx_resources()
7206 vfree(rx_ring->rx_buffer_info); in ixgbe_setup_rx_resources()
7207 rx_ring->rx_buffer_info = NULL; in ixgbe_setup_rx_resources()
7209 return -ENOMEM; in ixgbe_setup_rx_resources()
7213 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
7226 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_setup_all_rx_resources()
7227 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
7242 while (i--) in ixgbe_setup_all_rx_resources()
7243 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
7248 * ixgbe_free_tx_resources - Free Tx Resources per Queue
7257 vfree(tx_ring->tx_buffer_info); in ixgbe_free_tx_resources()
7258 tx_ring->tx_buffer_info = NULL; in ixgbe_free_tx_resources()
7261 if (!tx_ring->desc) in ixgbe_free_tx_resources()
7264 dma_free_coherent(tx_ring->dev, tx_ring->size, in ixgbe_free_tx_resources()
7265 tx_ring->desc, tx_ring->dma); in ixgbe_free_tx_resources()
7267 tx_ring->desc = NULL; in ixgbe_free_tx_resources()
7271 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
7280 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_free_all_tx_resources()
7281 if (adapter->tx_ring[i]->desc) in ixgbe_free_all_tx_resources()
7282 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_free_all_tx_resources()
7283 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_free_all_tx_resources()
7284 if (adapter->xdp_ring[i]->desc) in ixgbe_free_all_tx_resources()
7285 ixgbe_free_tx_resources(adapter->xdp_ring[i]); in ixgbe_free_all_tx_resources()
7289 * ixgbe_free_rx_resources - Free Rx Resources
7298 rx_ring->xdp_prog = NULL; in ixgbe_free_rx_resources()
7299 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbe_free_rx_resources()
7300 vfree(rx_ring->rx_buffer_info); in ixgbe_free_rx_resources()
7301 rx_ring->rx_buffer_info = NULL; in ixgbe_free_rx_resources()
7304 if (!rx_ring->desc) in ixgbe_free_rx_resources()
7307 dma_free_coherent(rx_ring->dev, rx_ring->size, in ixgbe_free_rx_resources()
7308 rx_ring->desc, rx_ring->dma); in ixgbe_free_rx_resources()
7310 rx_ring->desc = NULL; in ixgbe_free_rx_resources()
7314 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
7327 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_free_all_rx_resources()
7328 if (adapter->rx_ring[i]->desc) in ixgbe_free_all_rx_resources()
7329 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_free_all_rx_resources()
7333 * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
7338 if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) in ixgbe_max_xdp_frame_size()
7345 * ixgbe_change_mtu - Change the Maximum Transfer Unit
7360 return -EINVAL; in ixgbe_change_mtu()
7369 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && in ixgbe_change_mtu()
7370 (adapter->hw.mac.type == ixgbe_mac_82599EB) && in ixgbe_change_mtu()
7375 netdev->mtu, new_mtu); in ixgbe_change_mtu()
7378 WRITE_ONCE(netdev->mtu, new_mtu); in ixgbe_change_mtu()
7387 * ixgbe_open - Called when a network interface is made active
7401 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_open()
7405 if (test_bit(__IXGBE_TESTING, &adapter->state)) in ixgbe_open()
7406 return -EBUSY; in ixgbe_open()
7427 queues = adapter->num_tx_queues; in ixgbe_open()
7432 queues = adapter->num_rx_queues; in ixgbe_open()
7442 if (adapter->hw.mac.type == ixgbe_mac_e610) { in ixgbe_open()
7443 int err = ixgbe_update_link_info(&adapter->hw); in ixgbe_open()
7449 adapter->hw.link.link_info.link_cfg_err); in ixgbe_open()
7451 err = ixgbe_non_sfp_link_config(&adapter->hw); in ixgbe_open()
7452 if (ixgbe_non_sfp_link_config(&adapter->hw)) in ixgbe_open()
7462 if (hw->phy.ops.set_phy_power && !adapter->wol) in ixgbe_open()
7463 hw->phy.ops.set_phy_power(&adapter->hw, false); in ixgbe_open()
7476 if (adapter->hw.phy.ops.enter_lplu) { in ixgbe_close_suspend()
7477 adapter->hw.phy.reset_disable = true; in ixgbe_close_suspend()
7479 adapter->hw.phy.ops.enter_lplu(&adapter->hw); in ixgbe_close_suspend()
7480 adapter->hw.phy.reset_disable = false; in ixgbe_close_suspend()
7492 * ixgbe_close - Disables a network interface
7497 * The close entry point is called when an interface is de-activated
7522 struct net_device *netdev = adapter->netdev; in ixgbe_resume()
7525 adapter->hw.hw_addr = adapter->io_addr; in ixgbe_resume()
7533 clear_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_resume()
7540 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); in ixgbe_resume()
7558 struct net_device *netdev = adapter->netdev; in __ixgbe_shutdown()
7559 struct ixgbe_hw *hw = &adapter->hw; in __ixgbe_shutdown()
7561 u32 wufc = adapter->wol; in __ixgbe_shutdown()
7572 if (hw->mac.ops.stop_link_on_d3) in __ixgbe_shutdown()
7573 hw->mac.ops.stop_link_on_d3(hw); in __ixgbe_shutdown()
7581 if (hw->mac.ops.enable_tx_laser) in __ixgbe_shutdown()
7582 hw->mac.ops.enable_tx_laser(hw); in __ixgbe_shutdown()
7599 switch (hw->mac.type) { in __ixgbe_shutdown()
7616 if (hw->phy.ops.set_phy_power && !*enable_wake) in __ixgbe_shutdown()
7617 hw->phy.ops.set_phy_power(hw, false); in __ixgbe_shutdown()
7621 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) in __ixgbe_shutdown()
7653 * ixgbe_update_stats - Update the board statistics counters.
7658 struct net_device *netdev = adapter->netdev; in ixgbe_update_stats()
7659 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_stats()
7660 struct ixgbe_hw_stats *hwstats = &adapter->stats; in ixgbe_update_stats()
7668 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_update_stats()
7669 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_update_stats()
7672 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { in ixgbe_update_stats()
7675 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_update_stats()
7676 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; in ixgbe_update_stats()
7677 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; in ixgbe_update_stats()
7679 adapter->rsc_total_count = rsc_count; in ixgbe_update_stats()
7680 adapter->rsc_total_flush = rsc_flush; in ixgbe_update_stats()
7683 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_update_stats()
7684 struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); in ixgbe_update_stats()
7688 non_eop_descs += rx_ring->rx_stats.non_eop_descs; in ixgbe_update_stats()
7689 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbe_update_stats()
7690 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbe_update_stats()
7691 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbe_update_stats()
7692 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbe_update_stats()
7693 bytes += rx_ring->stats.bytes; in ixgbe_update_stats()
7694 packets += rx_ring->stats.packets; in ixgbe_update_stats()
7696 adapter->non_eop_descs = non_eop_descs; in ixgbe_update_stats()
7697 adapter->alloc_rx_page = alloc_rx_page; in ixgbe_update_stats()
7698 adapter->alloc_rx_page_failed = alloc_rx_page_failed; in ixgbe_update_stats()
7699 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; in ixgbe_update_stats()
7700 adapter->hw_csum_rx_error = hw_csum_rx_error; in ixgbe_update_stats()
7701 netdev->stats.rx_bytes = bytes; in ixgbe_update_stats()
7702 netdev->stats.rx_packets = packets; in ixgbe_update_stats()
7707 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_update_stats()
7708 struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); in ixgbe_update_stats()
7712 restart_queue += tx_ring->tx_stats.restart_queue; in ixgbe_update_stats()
7713 tx_busy += tx_ring->tx_stats.tx_busy; in ixgbe_update_stats()
7714 bytes += tx_ring->stats.bytes; in ixgbe_update_stats()
7715 packets += tx_ring->stats.packets; in ixgbe_update_stats()
7717 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_update_stats()
7718 struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); in ixgbe_update_stats()
7722 restart_queue += xdp_ring->tx_stats.restart_queue; in ixgbe_update_stats()
7723 tx_busy += xdp_ring->tx_stats.tx_busy; in ixgbe_update_stats()
7724 bytes += xdp_ring->stats.bytes; in ixgbe_update_stats()
7725 packets += xdp_ring->stats.packets; in ixgbe_update_stats()
7727 adapter->restart_queue = restart_queue; in ixgbe_update_stats()
7728 adapter->tx_busy = tx_busy; in ixgbe_update_stats()
7729 netdev->stats.tx_bytes = bytes; in ixgbe_update_stats()
7730 netdev->stats.tx_packets = packets; in ixgbe_update_stats()
7732 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); in ixgbe_update_stats()
7739 hwstats->mpc[i] += mpc; in ixgbe_update_stats()
7740 total_mpc += hwstats->mpc[i]; in ixgbe_update_stats()
7741 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); in ixgbe_update_stats()
7742 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); in ixgbe_update_stats()
7743 switch (hw->mac.type) { in ixgbe_update_stats()
7745 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); in ixgbe_update_stats()
7746 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); in ixgbe_update_stats()
7747 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); in ixgbe_update_stats()
7748 hwstats->pxonrxc[i] += in ixgbe_update_stats()
7757 hwstats->pxonrxc[i] += in ixgbe_update_stats()
7767 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); in ixgbe_update_stats()
7768 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); in ixgbe_update_stats()
7769 if (hw->mac.type == ixgbe_mac_82599EB || in ixgbe_update_stats()
7770 hw->mac.type == ixgbe_mac_X540 || in ixgbe_update_stats()
7771 hw->mac.type == ixgbe_mac_X550 || in ixgbe_update_stats()
7772 hw->mac.type == ixgbe_mac_X550EM_x || in ixgbe_update_stats()
7773 hw->mac.type == ixgbe_mac_x550em_a || in ixgbe_update_stats()
7774 hw->mac.type == ixgbe_mac_e610) { in ixgbe_update_stats()
7775 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); in ixgbe_update_stats()
7777 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); in ixgbe_update_stats()
7782 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); in ixgbe_update_stats()
7784 hwstats->gprc -= missed_rx; in ixgbe_update_stats()
7789 switch (hw->mac.type) { in ixgbe_update_stats()
7791 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); in ixgbe_update_stats()
7792 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); in ixgbe_update_stats()
7793 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); in ixgbe_update_stats()
7794 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); in ixgbe_update_stats()
7802 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); in ixgbe_update_stats()
7803 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); in ixgbe_update_stats()
7804 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); in ixgbe_update_stats()
7805 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); in ixgbe_update_stats()
7809 adapter->hw_rx_no_dma_resources += in ixgbe_update_stats()
7811 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); in ixgbe_update_stats()
7813 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); in ixgbe_update_stats()
7815 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); in ixgbe_update_stats()
7817 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); in ixgbe_update_stats()
7818 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); in ixgbe_update_stats()
7819 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); in ixgbe_update_stats()
7821 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); in ixgbe_update_stats()
7822 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); in ixgbe_update_stats()
7823 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); in ixgbe_update_stats()
7824 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); in ixgbe_update_stats()
7825 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); in ixgbe_update_stats()
7826 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); in ixgbe_update_stats()
7828 if (adapter->fcoe.ddp_pool) { in ixgbe_update_stats()
7829 struct ixgbe_fcoe *fcoe = &adapter->fcoe; in ixgbe_update_stats()
7834 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_update_stats()
7835 noddp += ddp_pool->noddp; in ixgbe_update_stats()
7836 noddp_ext_buff += ddp_pool->noddp_ext_buff; in ixgbe_update_stats()
7838 hwstats->fcoe_noddp = noddp; in ixgbe_update_stats()
7839 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; in ixgbe_update_stats()
7847 hwstats->bprc += bprc; in ixgbe_update_stats()
7848 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); in ixgbe_update_stats()
7849 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_update_stats()
7850 hwstats->mprc -= bprc; in ixgbe_update_stats()
7851 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); in ixgbe_update_stats()
7852 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); in ixgbe_update_stats()
7853 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); in ixgbe_update_stats()
7854 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); in ixgbe_update_stats()
7855 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); in ixgbe_update_stats()
7856 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); in ixgbe_update_stats()
7857 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); in ixgbe_update_stats()
7858 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); in ixgbe_update_stats()
7860 hwstats->lxontxc += lxon; in ixgbe_update_stats()
7862 hwstats->lxofftxc += lxoff; in ixgbe_update_stats()
7863 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); in ixgbe_update_stats()
7864 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); in ixgbe_update_stats()
7866 * 82598 errata - tx of flow control packets is included in tx counters in ixgbe_update_stats()
7869 hwstats->gptc -= xon_off_tot; in ixgbe_update_stats()
7870 hwstats->mptc -= xon_off_tot; in ixgbe_update_stats()
7871 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); in ixgbe_update_stats()
7872 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); in ixgbe_update_stats()
7873 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); in ixgbe_update_stats()
7874 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); in ixgbe_update_stats()
7875 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); in ixgbe_update_stats()
7876 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); in ixgbe_update_stats()
7877 hwstats->ptc64 -= xon_off_tot; in ixgbe_update_stats()
7878 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); in ixgbe_update_stats()
7879 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); in ixgbe_update_stats()
7880 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); in ixgbe_update_stats()
7881 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); in ixgbe_update_stats()
7882 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); in ixgbe_update_stats()
7883 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); in ixgbe_update_stats()
7886 netdev->stats.multicast = hwstats->mprc; in ixgbe_update_stats()
7889 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; in ixgbe_update_stats()
7890 netdev->stats.rx_dropped = 0; in ixgbe_update_stats()
7891 netdev->stats.rx_length_errors = hwstats->rlec; in ixgbe_update_stats()
7892 netdev->stats.rx_crc_errors = hwstats->crcerrs; in ixgbe_update_stats()
7893 netdev->stats.rx_missed_errors = total_mpc; in ixgbe_update_stats()
7895 /* VF Stats Collection - skip while resetting because these in ixgbe_update_stats()
7899 if (!test_bit(__IXGBE_RESETTING, &adapter->state)) { in ixgbe_update_stats()
7900 for (i = 0; i < adapter->num_vfs; i++) { in ixgbe_update_stats()
7902 adapter->vfinfo[i].last_vfstats.gprc, in ixgbe_update_stats()
7903 adapter->vfinfo[i].vfstats.gprc); in ixgbe_update_stats()
7905 adapter->vfinfo[i].last_vfstats.gptc, in ixgbe_update_stats()
7906 adapter->vfinfo[i].vfstats.gptc); in ixgbe_update_stats()
7909 adapter->vfinfo[i].last_vfstats.gorc, in ixgbe_update_stats()
7910 adapter->vfinfo[i].vfstats.gorc); in ixgbe_update_stats()
7913 adapter->vfinfo[i].last_vfstats.gotc, in ixgbe_update_stats()
7914 adapter->vfinfo[i].vfstats.gotc); in ixgbe_update_stats()
7916 adapter->vfinfo[i].last_vfstats.mprc, in ixgbe_update_stats()
7917 adapter->vfinfo[i].vfstats.mprc); in ixgbe_update_stats()
7923 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
7928 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_fdir_reinit_subtask()
7931 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) in ixgbe_fdir_reinit_subtask()
7934 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; in ixgbe_fdir_reinit_subtask()
7937 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_fdir_reinit_subtask()
7941 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) in ixgbe_fdir_reinit_subtask()
7944 adapter->fdir_overflow++; in ixgbe_fdir_reinit_subtask()
7947 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_fdir_reinit_subtask()
7949 &(adapter->tx_ring[i]->state)); in ixgbe_fdir_reinit_subtask()
7950 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_fdir_reinit_subtask()
7952 &adapter->xdp_ring[i]->state); in ixgbe_fdir_reinit_subtask()
7953 /* re-enable flow director interrupts */ in ixgbe_fdir_reinit_subtask()
7956 e_err(probe, "failed to finish FDIR re-initialization, " in ixgbe_fdir_reinit_subtask()
7962 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
7972 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_hang_subtask()
7977 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_check_hang_subtask()
7978 test_bit(__IXGBE_REMOVING, &adapter->state) || in ixgbe_check_hang_subtask()
7979 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_check_hang_subtask()
7983 if (netif_carrier_ok(adapter->netdev)) in ixgbe_check_hang_subtask()
7984 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_check_hang_subtask()
7985 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbe_check_hang_subtask()
7987 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { in ixgbe_check_hang_subtask()
7997 for (i = 0; i < adapter->num_q_vectors; i++) { in ixgbe_check_hang_subtask()
7998 struct ixgbe_q_vector *qv = adapter->q_vector[i]; in ixgbe_check_hang_subtask()
7999 if (qv->rx.ring || qv->tx.ring) in ixgbe_check_hang_subtask()
8009 * ixgbe_watchdog_update_link - update the link status
8014 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_watchdog_update_link()
8015 u32 link_speed = adapter->link_speed; in ixgbe_watchdog_update_link()
8016 bool link_up = adapter->link_up; in ixgbe_watchdog_update_link()
8017 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; in ixgbe_watchdog_update_link()
8019 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) in ixgbe_watchdog_update_link()
8022 if (hw->mac.ops.check_link) { in ixgbe_watchdog_update_link()
8023 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); in ixgbe_watchdog_update_link()
8030 if (adapter->ixgbe_ieee_pfc) in ixgbe_watchdog_update_link()
8031 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); in ixgbe_watchdog_update_link()
8033 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { in ixgbe_watchdog_update_link()
8034 hw->mac.ops.fc_enable(hw); in ixgbe_watchdog_update_link()
8039 time_after(jiffies, (adapter->link_check_timeout + in ixgbe_watchdog_update_link()
8041 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_watchdog_update_link()
8046 adapter->link_up = link_up; in ixgbe_watchdog_update_link()
8047 adapter->link_speed = link_speed; in ixgbe_watchdog_update_link()
8053 struct net_device *netdev = adapter->netdev; in ixgbe_update_default_up()
8060 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) in ixgbe_update_default_up()
8063 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; in ixgbe_update_default_up()
8068 * ixgbe_watchdog_link_is_up - update netif_carrier status and
8074 struct net_device *netdev = adapter->netdev; in ixgbe_watchdog_link_is_up()
8075 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_watchdog_link_is_up()
8076 u32 link_speed = adapter->link_speed; in ixgbe_watchdog_link_is_up()
8084 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_watchdog_link_is_up()
8086 switch (hw->mac.type) { in ixgbe_watchdog_link_is_up()
8112 adapter->last_rx_ptp_check = jiffies; in ixgbe_watchdog_link_is_up()
8114 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) in ixgbe_watchdog_link_is_up()
8148 if (adapter->num_vfs && hw->mac.ops.enable_mdd) in ixgbe_watchdog_link_is_up()
8149 hw->mac.ops.enable_mdd(hw); in ixgbe_watchdog_link_is_up()
8152 netif_tx_wake_all_queues(adapter->netdev); in ixgbe_watchdog_link_is_up()
8162 * ixgbe_watchdog_link_is_down - update netif_carrier status and
8168 struct net_device *netdev = adapter->netdev; in ixgbe_watchdog_link_is_down()
8169 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_watchdog_link_is_down()
8171 adapter->link_up = false; in ixgbe_watchdog_link_is_down()
8172 adapter->link_speed = 0; in ixgbe_watchdog_link_is_down()
8178 adapter->link_down_events++; in ixgbe_watchdog_link_is_down()
8181 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) in ixgbe_watchdog_link_is_down()
8182 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_watchdog_link_is_down()
8184 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) in ixgbe_watchdog_link_is_down()
8198 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_ring_tx_pending()
8199 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_ring_tx_pending()
8201 if (tx_ring->next_to_use != tx_ring->next_to_clean) in ixgbe_ring_tx_pending()
8210 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vf_tx_pending()
8211 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_tx_pending()
8212 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_vf_tx_pending()
8216 if (!adapter->num_vfs) in ixgbe_vf_tx_pending()
8220 if (hw->mac.type >= ixgbe_mac_X550) in ixgbe_vf_tx_pending()
8223 for (i = 0; i < adapter->num_vfs; i++) { in ixgbe_vf_tx_pending()
8239 * ixgbe_watchdog_flush_tx - flush queues on link down
8244 if (!netif_carrier_ok(adapter->netdev)) { in ixgbe_watchdog_flush_tx()
8253 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); in ixgbe_watchdog_flush_tx()
8261 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_bad_vf_abort()
8263 if (adapter->hw.mac.type == ixgbe_mac_82599EB && in ixgbe_bad_vf_abort()
8264 adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) { in ixgbe_bad_vf_abort()
8265 adapter->vfinfo[vf].primary_abort_count++; in ixgbe_bad_vf_abort()
8266 if (adapter->vfinfo[vf].primary_abort_count == in ixgbe_bad_vf_abort()
8270 adapter->vfinfo[vf].primary_abort_count = 0; in ixgbe_bad_vf_abort()
8273 "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on", in ixgbe_bad_vf_abort()
8274 hw->bus.func, vf, in ixgbe_bad_vf_abort()
8275 adapter->vfinfo[vf].vf_mac_addresses); in ixgbe_bad_vf_abort()
8282 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_for_bad_vf()
8283 struct pci_dev *pdev = adapter->pdev; in ixgbe_check_for_bad_vf()
8287 if (!(netif_carrier_ok(adapter->netdev))) in ixgbe_check_for_bad_vf()
8303 for (vf = 0; vf < adapter->num_vfs; ++vf) { in ixgbe_check_for_bad_vf()
8304 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; in ixgbe_check_for_bad_vf()
8323 if (adapter->hw.mac.type == ixgbe_mac_82598EB || in ixgbe_spoof_check()
8324 adapter->num_vfs == 0) in ixgbe_spoof_check()
8327 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); in ixgbe_spoof_check()
8351 * ixgbe_watchdog_subtask - check and bring link up
8357 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_watchdog_subtask()
8358 test_bit(__IXGBE_REMOVING, &adapter->state) || in ixgbe_watchdog_subtask()
8359 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_watchdog_subtask()
8364 if (adapter->link_up) in ixgbe_watchdog_subtask()
8377 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
8382 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sfp_detection_subtask()
8386 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && in ixgbe_sfp_detection_subtask()
8387 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) in ixgbe_sfp_detection_subtask()
8390 if (adapter->sfp_poll_time && in ixgbe_sfp_detection_subtask()
8391 time_after(adapter->sfp_poll_time, jiffies)) in ixgbe_sfp_detection_subtask()
8395 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) in ixgbe_sfp_detection_subtask()
8398 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; in ixgbe_sfp_detection_subtask()
8400 err = hw->phy.ops.identify_sfp(hw); in ixgbe_sfp_detection_subtask()
8401 if (err == -EOPNOTSUPP) in ixgbe_sfp_detection_subtask()
8404 if (err == -ENOENT) { in ixgbe_sfp_detection_subtask()
8407 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_sfp_detection_subtask()
8415 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) in ixgbe_sfp_detection_subtask()
8418 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_sfp_detection_subtask()
8425 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_sfp_detection_subtask()
8426 err = hw->phy.ops.reset(hw); in ixgbe_sfp_detection_subtask()
8428 err = hw->mac.ops.setup_sfp(hw); in ixgbe_sfp_detection_subtask()
8430 if (err == -EOPNOTSUPP) in ixgbe_sfp_detection_subtask()
8433 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_sfp_detection_subtask()
8434 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); in ixgbe_sfp_detection_subtask()
8437 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); in ixgbe_sfp_detection_subtask()
8439 if (err == -EOPNOTSUPP && in ixgbe_sfp_detection_subtask()
8440 adapter->netdev->reg_state == NETREG_REGISTERED) { in ixgbe_sfp_detection_subtask()
8445 unregister_netdev(adapter->netdev); in ixgbe_sfp_detection_subtask()
8450 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
8455 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sfp_link_config_subtask()
8460 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) in ixgbe_sfp_link_config_subtask()
8464 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) in ixgbe_sfp_link_config_subtask()
8467 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_sfp_link_config_subtask()
8469 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg); in ixgbe_sfp_link_config_subtask()
8478 if (hw->mac.ops.setup_link) in ixgbe_sfp_link_config_subtask()
8479 hw->mac.ops.setup_link(hw, speed, true); in ixgbe_sfp_link_config_subtask()
8481 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_sfp_link_config_subtask()
8482 adapter->link_check_timeout = jiffies; in ixgbe_sfp_link_config_subtask()
8483 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); in ixgbe_sfp_link_config_subtask()
8487 * ixgbe_service_timer - Timer Call-back
8497 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) in ixgbe_service_timer()
8503 mod_timer(&adapter->service_timer, next_event_offset + jiffies); in ixgbe_service_timer()
8510 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_phy_interrupt_subtask()
8513 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) in ixgbe_phy_interrupt_subtask()
8516 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; in ixgbe_phy_interrupt_subtask()
8518 if (!hw->phy.ops.handle_lasi) in ixgbe_phy_interrupt_subtask()
8521 hw->phy.ops.handle_lasi(&adapter->hw, &overtemp); in ixgbe_phy_interrupt_subtask()
8528 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) in ixgbe_reset_subtask()
8533 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_reset_subtask()
8534 test_bit(__IXGBE_REMOVING, &adapter->state) || in ixgbe_reset_subtask()
8535 test_bit(__IXGBE_RESETTING, &adapter->state)) { in ixgbe_reset_subtask()
8541 netdev_err(adapter->netdev, "Reset adapter\n"); in ixgbe_reset_subtask()
8542 adapter->tx_timeout_count++; in ixgbe_reset_subtask()
8550 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_fw_api_mismatch()
8552 if (hw->mac.type != ixgbe_mac_e610) in ixgbe_check_fw_api_mismatch()
8555 if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw)) in ixgbe_check_fw_api_mismatch()
8558 if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) { in ixgbe_check_fw_api_mismatch()
8561 adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH; in ixgbe_check_fw_api_mismatch()
8562 return -EOPNOTSUPP; in ixgbe_check_fw_api_mismatch()
8563 } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR && in ixgbe_check_fw_api_mismatch()
8564 hw->api_min_ver > IXGBE_FW_API_VER_MINOR + IXGBE_FW_API_VER_DIFF_ALLOWED) { in ixgbe_check_fw_api_mismatch()
8566 adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH; in ixgbe_check_fw_api_mismatch()
8567 } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR || in ixgbe_check_fw_api_mismatch()
8568 hw->api_min_ver < IXGBE_FW_API_VER_MINOR - IXGBE_FW_API_VER_DIFF_ALLOWED) { in ixgbe_check_fw_api_mismatch()
8570 adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH; in ixgbe_check_fw_api_mismatch()
8577 * ixgbe_check_fw_error - Check firmware for errors
8584 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_fw_error()
8593 (!(fwsm & IXGBE_FWSM_FW_VAL_BIT) && !(hw->mac.type == ixgbe_mac_e610))) in ixgbe_check_fw_error()
8597 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { in ixgbe_check_fw_error()
8601 if (!(adapter->flags2 & IXGBE_FLAG2_API_MISMATCH)) { in ixgbe_check_fw_error()
8608 if (adapter->flags2 & IXGBE_FLAG2_FW_ROLLBACK) in ixgbe_check_fw_error()
8611 if (hw->mac.ops.fw_rollback_mode && hw->mac.ops.fw_rollback_mode(hw)) { in ixgbe_check_fw_error()
8612 struct ixgbe_nvm_info *nvm_info = &adapter->hw.flash.nvm; in ixgbe_check_fw_error()
8615 if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw)) in ixgbe_check_fw_error()
8618 if (hw->mac.ops.get_nvm_ver && in ixgbe_check_fw_error()
8619 hw->mac.ops.get_nvm_ver(hw, nvm_info)) in ixgbe_check_fw_error()
8624 nvm_info->major, nvm_info->minor, nvm_info->eetrack, in ixgbe_check_fw_error()
8625 hw->fw_maj_ver, hw->fw_maj_ver); in ixgbe_check_fw_error()
8630 adapter->flags2 |= IXGBE_FLAG2_FW_ROLLBACK; in ixgbe_check_fw_error()
8645 mod_timer(&adapter->service_timer, jiffies + msecs_to_jiffies(100)); in ixgbe_recovery_service_task()
8649 * ixgbe_service_task - manages and runs subtasks
8657 if (ixgbe_removed(adapter->hw.hw_addr)) { in ixgbe_service_task()
8658 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_service_task()
8667 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_service_task()
8668 if (adapter->mii_bus) { in ixgbe_service_task()
8669 mdiobus_unregister(adapter->mii_bus); in ixgbe_service_task()
8670 adapter->mii_bus = NULL; in ixgbe_service_task()
8672 unregister_netdev(adapter->netdev); in ixgbe_service_task()
8677 if (adapter->hw.mac.type == ixgbe_mac_e610) { in ixgbe_service_task()
8678 if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT) in ixgbe_service_task()
8691 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { in ixgbe_service_task()
8693 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) in ixgbe_service_task()
8707 struct sk_buff *skb = first->skb; in ixgbe_tso()
8722 if (skb->ip_summed != CHECKSUM_PARTIAL) in ixgbe_tso()
8732 if (eth_p_mpls(first->protocol)) in ixgbe_tso()
8739 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? in ixgbe_tso()
8743 if (ip.v4->version == 4) { in ixgbe_tso()
8745 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in ixgbe_tso()
8746 int len = csum_start - trans_start; in ixgbe_tso()
8752 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? in ixgbe_tso()
8757 ip.v4->tot_len = 0; in ixgbe_tso()
8758 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbe_tso()
8762 ip.v6->payload_len = 0; in ixgbe_tso()
8763 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbe_tso()
8768 l4_offset = l4.hdr - skb->data; in ixgbe_tso()
8771 paylen = skb->len - l4_offset; in ixgbe_tso()
8775 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in ixgbe_tso()
8776 csum_replace_by_diff(&l4.tcp->check, in ixgbe_tso()
8781 csum_replace_by_diff(&l4.udp->check, in ixgbe_tso()
8786 first->gso_segs = skb_shinfo(skb)->gso_segs; in ixgbe_tso()
8787 first->bytecount += (first->gso_segs - 1) * *hdr_len; in ixgbe_tso()
8790 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; in ixgbe_tso()
8791 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; in ixgbe_tso()
8793 fceof_saidx |= itd->sa_idx; in ixgbe_tso()
8794 type_tucmd |= itd->flags | itd->trailer_len; in ixgbe_tso()
8797 vlan_macip_lens = l4.hdr - ip.hdr; in ixgbe_tso()
8798 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; in ixgbe_tso()
8799 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbe_tso()
8811 struct sk_buff *skb = first->skb; in ixgbe_tx_csum()
8816 if (skb->ip_summed != CHECKSUM_PARTIAL) { in ixgbe_tx_csum()
8818 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | in ixgbe_tx_csum()
8824 switch (skb->csum_offset) { in ixgbe_tx_csum()
8843 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; in ixgbe_tx_csum()
8844 vlan_macip_lens = skb_checksum_start_offset(skb) - in ixgbe_tx_csum()
8849 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbe_tx_csum()
8851 fceof_saidx |= itd->sa_idx; in ixgbe_tx_csum()
8852 type_tucmd |= itd->flags | itd->trailer_len; in ixgbe_tx_csum()
8882 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); in ixgbe_tx_cmd_type()
8915 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in ixgbe_tx_olinfo_status()
8920 if (!netif_subqueue_try_stop(tx_ring->netdev, tx_ring->queue_index, in __ixgbe_maybe_stop_tx()
8922 return -EBUSY; in __ixgbe_maybe_stop_tx()
8924 ++tx_ring->tx_stats.restart_queue; in __ixgbe_maybe_stop_tx()
8940 struct sk_buff *skb = first->skb; in ixgbe_tx_map()
8946 u32 tx_flags = first->tx_flags; in ixgbe_tx_map()
8948 u16 i = tx_ring->next_to_use; in ixgbe_tx_map()
8952 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); in ixgbe_tx_map()
8955 data_len = skb->data_len; in ixgbe_tx_map()
8960 size -= sizeof(struct fcoe_crc_eof) - data_len; in ixgbe_tx_map()
8963 data_len -= sizeof(struct fcoe_crc_eof); in ixgbe_tx_map()
8968 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbe_tx_map()
8972 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in ixgbe_tx_map()
8973 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbe_tx_map()
8980 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbe_tx_map()
8983 tx_desc->read.cmd_type_len = in ixgbe_tx_map()
8988 if (i == tx_ring->count) { in ixgbe_tx_map()
8992 tx_desc->read.olinfo_status = 0; in ixgbe_tx_map()
8995 size -= IXGBE_MAX_DATA_PER_TXD; in ixgbe_tx_map()
8997 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbe_tx_map()
9003 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); in ixgbe_tx_map()
9007 if (i == tx_ring->count) { in ixgbe_tx_map()
9011 tx_desc->read.olinfo_status = 0; in ixgbe_tx_map()
9018 data_len -= size; in ixgbe_tx_map()
9020 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbe_tx_map()
9023 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
9028 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbe_tx_map()
9030 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in ixgbe_tx_map()
9033 first->time_stamp = jiffies; in ixgbe_tx_map()
9039 * are new descriptors to fetch. (Only applicable for weak-ordered in ixgbe_tx_map()
9040 * memory model archs, such as IA-64). in ixgbe_tx_map()
9048 first->next_to_watch = tx_desc; in ixgbe_tx_map()
9051 if (i == tx_ring->count) in ixgbe_tx_map()
9054 tx_ring->next_to_use = i; in ixgbe_tx_map()
9059 writel(i, tx_ring->tail); in ixgbe_tx_map()
9064 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbe_tx_map()
9068 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbe_tx_map()
9070 dma_unmap_page(tx_ring->dev, in ixgbe_tx_map()
9078 i += tx_ring->count; in ixgbe_tx_map()
9079 i--; in ixgbe_tx_map()
9082 dev_kfree_skb_any(first->skb); in ixgbe_tx_map()
9083 first->skb = NULL; in ixgbe_tx_map()
9085 tx_ring->next_to_use = i; in ixgbe_tx_map()
9087 return -1; in ixgbe_tx_map()
9093 struct ixgbe_q_vector *q_vector = ring->q_vector; in ixgbe_atr()
9112 if (!ring->atr_sample_rate) in ixgbe_atr()
9115 ring->atr_count++; in ixgbe_atr()
9118 if ((first->protocol != htons(ETH_P_IP)) && in ixgbe_atr()
9119 (first->protocol != htons(ETH_P_IPV6))) in ixgbe_atr()
9123 skb = first->skb; in ixgbe_atr()
9125 if (unlikely(hdr.network <= skb->data)) in ixgbe_atr()
9127 if (skb->encapsulation && in ixgbe_atr()
9128 first->protocol == htons(ETH_P_IP) && in ixgbe_atr()
9129 hdr.ipv4->protocol == IPPROTO_UDP) { in ixgbe_atr()
9130 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_atr()
9137 if (adapter->vxlan_port && in ixgbe_atr()
9138 udp_hdr(skb)->dest == adapter->vxlan_port) in ixgbe_atr()
9141 if (adapter->geneve_port && in ixgbe_atr()
9142 udp_hdr(skb)->dest == adapter->geneve_port) in ixgbe_atr()
9153 switch (hdr.ipv4->version) { in ixgbe_atr()
9157 l4_proto = hdr.ipv4->protocol; in ixgbe_atr()
9160 hlen = hdr.network - skb->data; in ixgbe_atr()
9162 hlen -= hdr.network - skb->data; in ixgbe_atr()
9178 if (th->fin) in ixgbe_atr()
9182 if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) in ixgbe_atr()
9186 ring->atr_count = 0; in ixgbe_atr()
9188 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); in ixgbe_atr()
9193 * The input is broken into two sections, a non-compressed section in ixgbe_atr()
9203 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) in ixgbe_atr()
9204 common.port.src ^= th->dest ^ htons(ETH_P_8021Q); in ixgbe_atr()
9206 common.port.src ^= th->dest ^ first->protocol; in ixgbe_atr()
9207 common.port.dst ^= th->source; in ixgbe_atr()
9209 switch (hdr.ipv4->version) { in ixgbe_atr()
9212 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; in ixgbe_atr()
9216 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ in ixgbe_atr()
9217 hdr.ipv6->saddr.s6_addr32[1] ^ in ixgbe_atr()
9218 hdr.ipv6->saddr.s6_addr32[2] ^ in ixgbe_atr()
9219 hdr.ipv6->saddr.s6_addr32[3] ^ in ixgbe_atr()
9220 hdr.ipv6->daddr.s6_addr32[0] ^ in ixgbe_atr()
9221 hdr.ipv6->daddr.s6_addr32[1] ^ in ixgbe_atr()
9222 hdr.ipv6->daddr.s6_addr32[2] ^ in ixgbe_atr()
9223 hdr.ipv6->daddr.s6_addr32[3]; in ixgbe_atr()
9233 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, in ixgbe_atr()
9234 input, common, ring->queue_index); in ixgbe_atr()
9246 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); in ixgbe_select_queue()
9249 txq = vdev->tc_to_txq[tc].offset; in ixgbe_select_queue()
9251 vdev->tc_to_txq[tc].count); in ixgbe_select_queue()
9265 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) in ixgbe_select_queue()
9272 f = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_select_queue()
9277 while (txq >= f->indices) in ixgbe_select_queue()
9278 txq -= f->indices; in ixgbe_select_queue()
9280 return txq + f->offset; in ixgbe_select_queue()
9288 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in ixgbe_xmit_xdp_ring()
9289 u16 i = 0, index = ring->next_to_use; in ixgbe_xmit_xdp_ring()
9290 struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index]; in ixgbe_xmit_xdp_ring()
9293 u32 cmd_type, len = xdpf->len; in ixgbe_xmit_xdp_ring()
9294 void *data = xdpf->data; in ixgbe_xmit_xdp_ring()
9299 tx_head->bytecount = xdp_get_frame_len(xdpf); in ixgbe_xmit_xdp_ring()
9300 tx_head->gso_segs = 1; in ixgbe_xmit_xdp_ring()
9301 tx_head->xdpf = xdpf; in ixgbe_xmit_xdp_ring()
9303 tx_desc->read.olinfo_status = in ixgbe_xmit_xdp_ring()
9304 cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT); in ixgbe_xmit_xdp_ring()
9309 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); in ixgbe_xmit_xdp_ring()
9310 if (dma_mapping_error(ring->dev, dma)) in ixgbe_xmit_xdp_ring()
9318 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbe_xmit_xdp_ring()
9319 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbe_xmit_xdp_ring()
9320 tx_buff->protocol = 0; in ixgbe_xmit_xdp_ring()
9322 if (++index == ring->count) in ixgbe_xmit_xdp_ring()
9328 tx_buff = &ring->tx_buffer_info[index]; in ixgbe_xmit_xdp_ring()
9330 tx_desc->read.olinfo_status = 0; in ixgbe_xmit_xdp_ring()
9332 data = skb_frag_address(&sinfo->frags[i]); in ixgbe_xmit_xdp_ring()
9333 len = skb_frag_size(&sinfo->frags[i]); in ixgbe_xmit_xdp_ring()
9337 tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); in ixgbe_xmit_xdp_ring()
9342 tx_head->next_to_watch = tx_desc; in ixgbe_xmit_xdp_ring()
9343 ring->next_to_use = index; in ixgbe_xmit_xdp_ring()
9349 tx_buff = &ring->tx_buffer_info[index]; in ixgbe_xmit_xdp_ring()
9351 dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma), in ixgbe_xmit_xdp_ring()
9359 index += ring->count; in ixgbe_xmit_xdp_ring()
9360 index--; in ixgbe_xmit_xdp_ring()
9376 __be16 protocol = skb->protocol; in ixgbe_xmit_frame_ring()
9386 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in ixgbe_xmit_frame_ring()
9388 &skb_shinfo(skb)->frags[f])); in ixgbe_xmit_frame_ring()
9391 tx_ring->tx_stats.tx_busy++; in ixgbe_xmit_frame_ring()
9396 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbe_xmit_frame_ring()
9397 first->skb = skb; in ixgbe_xmit_frame_ring()
9398 first->bytecount = skb->len; in ixgbe_xmit_frame_ring()
9399 first->gso_segs = 1; in ixgbe_xmit_frame_ring()
9412 tx_flags |= ntohs(vhdr->h_vlan_TCI) << in ixgbe_xmit_frame_ring()
9418 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in ixgbe_xmit_frame_ring()
9419 adapter->ptp_clock) { in ixgbe_xmit_frame_ring()
9420 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && in ixgbe_xmit_frame_ring()
9422 &adapter->state)) { in ixgbe_xmit_frame_ring()
9423 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in ixgbe_xmit_frame_ring()
9427 adapter->ptp_tx_skb = skb_get(skb); in ixgbe_xmit_frame_ring()
9428 adapter->ptp_tx_start = jiffies; in ixgbe_xmit_frame_ring()
9429 schedule_work(&adapter->ptp_tx_work); in ixgbe_xmit_frame_ring()
9431 adapter->tx_hwtstamp_skipped++; in ixgbe_xmit_frame_ring()
9437 * Use the l2switch_enable flag - would be false if the DMA in ixgbe_xmit_frame_ring()
9440 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_xmit_frame_ring()
9444 /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ in ixgbe_xmit_frame_ring()
9445 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && in ixgbe_xmit_frame_ring()
9447 (skb->priority != TC_PRIO_CONTROL))) { in ixgbe_xmit_frame_ring()
9449 tx_flags |= (skb->priority & 0x7) << in ixgbe_xmit_frame_ring()
9457 vhdr->h_vlan_TCI = htons(tx_flags >> in ixgbe_xmit_frame_ring()
9465 first->tx_flags = tx_flags; in ixgbe_xmit_frame_ring()
9466 first->protocol = protocol; in ixgbe_xmit_frame_ring()
9471 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { in ixgbe_xmit_frame_ring()
9493 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) in ixgbe_xmit_frame_ring()
9505 dev_kfree_skb_any(first->skb); in ixgbe_xmit_frame_ring()
9506 first->skb = NULL; in ixgbe_xmit_frame_ring()
9509 dev_kfree_skb_any(adapter->ptp_tx_skb); in ixgbe_xmit_frame_ring()
9510 adapter->ptp_tx_skb = NULL; in ixgbe_xmit_frame_ring()
9511 cancel_work_sync(&adapter->ptp_tx_work); in ixgbe_xmit_frame_ring()
9512 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); in ixgbe_xmit_frame_ring()
9532 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; in __ixgbe_xmit_frame()
9533 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) in __ixgbe_xmit_frame()
9546 * ixgbe_set_mac - Change the Ethernet Address of the NIC
9555 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_mac()
9558 if (!is_valid_ether_addr(addr->sa_data)) in ixgbe_set_mac()
9559 return -EADDRNOTAVAIL; in ixgbe_set_mac()
9561 eth_hw_addr_set(netdev, addr->sa_data); in ixgbe_set_mac()
9562 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in ixgbe_set_mac()
9573 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_mdio_read()
9577 if (adapter->mii_bus) { in ixgbe_mdio_read()
9581 return mdiobus_c45_read(adapter->mii_bus, prtad, in ixgbe_mdio_read()
9584 return mdiobus_read(adapter->mii_bus, prtad, regnum); in ixgbe_mdio_read()
9587 if (prtad != hw->phy.mdio.prtad) in ixgbe_mdio_read()
9588 return -EINVAL; in ixgbe_mdio_read()
9589 rc = hw->phy.ops.read_reg(hw, addr, devad, &value); in ixgbe_mdio_read()
9599 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_mdio_write()
9601 if (adapter->mii_bus) { in ixgbe_mdio_write()
9605 return mdiobus_c45_write(adapter->mii_bus, prtad, devad, in ixgbe_mdio_write()
9608 return mdiobus_write(adapter->mii_bus, prtad, regnum, value); in ixgbe_mdio_write()
9611 if (prtad != hw->phy.mdio.prtad) in ixgbe_mdio_write()
9612 return -EINVAL; in ixgbe_mdio_write()
9613 return hw->phy.ops.write_reg(hw, addr, devad, value); in ixgbe_mdio_write()
9622 if (!adapter->hw.phy.ops.read_reg) in ixgbe_ioctl()
9623 return -EOPNOTSUPP; in ixgbe_ioctl()
9626 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); in ixgbe_ioctl()
9631 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
9632 * netdev->dev_addrs
9635 * Returns non-zero on failure
9641 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_add_sanmac_netdev()
9643 if (is_valid_ether_addr(hw->mac.san_addr)) { in ixgbe_add_sanmac_netdev()
9645 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); in ixgbe_add_sanmac_netdev()
9649 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); in ixgbe_add_sanmac_netdev()
9655 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
9656 * netdev->dev_addrs
9659 * Returns non-zero on failure
9665 struct ixgbe_mac_info *mac = &adapter->hw.mac; in ixgbe_del_sanmac_netdev()
9667 if (is_valid_ether_addr(mac->san_addr)) { in ixgbe_del_sanmac_netdev()
9669 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); in ixgbe_del_sanmac_netdev()
9683 start = u64_stats_fetch_begin(&ring->syncp); in ixgbe_get_ring_stats64()
9684 packets = ring->stats.packets; in ixgbe_get_ring_stats64()
9685 bytes = ring->stats.bytes; in ixgbe_get_ring_stats64()
9686 } while (u64_stats_fetch_retry(&ring->syncp, start)); in ixgbe_get_ring_stats64()
9687 stats->tx_packets += packets; in ixgbe_get_ring_stats64()
9688 stats->tx_bytes += bytes; in ixgbe_get_ring_stats64()
9699 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_get_stats64()
9700 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); in ixgbe_get_stats64()
9706 start = u64_stats_fetch_begin(&ring->syncp); in ixgbe_get_stats64()
9707 packets = ring->stats.packets; in ixgbe_get_stats64()
9708 bytes = ring->stats.bytes; in ixgbe_get_stats64()
9709 } while (u64_stats_fetch_retry(&ring->syncp, start)); in ixgbe_get_stats64()
9710 stats->rx_packets += packets; in ixgbe_get_stats64()
9711 stats->rx_bytes += bytes; in ixgbe_get_stats64()
9715 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_get_stats64()
9716 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); in ixgbe_get_stats64()
9720 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_get_stats64()
9721 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); in ixgbe_get_stats64()
9728 stats->multicast = netdev->stats.multicast; in ixgbe_get_stats64()
9729 stats->rx_errors = netdev->stats.rx_errors; in ixgbe_get_stats64()
9730 stats->rx_length_errors = netdev->stats.rx_length_errors; in ixgbe_get_stats64()
9731 stats->rx_crc_errors = netdev->stats.rx_crc_errors; in ixgbe_get_stats64()
9732 stats->rx_missed_errors = netdev->stats.rx_missed_errors; in ixgbe_get_stats64()
9740 if (vf < 0 || vf >= adapter->num_vfs) in ixgbe_ndo_get_vf_stats()
9741 return -EINVAL; in ixgbe_ndo_get_vf_stats()
9743 vf_stats->rx_packets = adapter->vfinfo[vf].vfstats.gprc; in ixgbe_ndo_get_vf_stats()
9744 vf_stats->rx_bytes = adapter->vfinfo[vf].vfstats.gorc; in ixgbe_ndo_get_vf_stats()
9745 vf_stats->tx_packets = adapter->vfinfo[vf].vfstats.gptc; in ixgbe_ndo_get_vf_stats()
9746 vf_stats->tx_bytes = adapter->vfinfo[vf].vfstats.gotc; in ixgbe_ndo_get_vf_stats()
9747 vf_stats->multicast = adapter->vfinfo[vf].vfstats.mprc; in ixgbe_ndo_get_vf_stats()
9754 * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
9763 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_validate_rtr()
9770 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_validate_rtr()
9791 * ixgbe_set_prio_tc_map - Configure netdev prio tc map
9798 struct net_device *dev = adapter->netdev; in ixgbe_set_prio_tc_map()
9799 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; in ixgbe_set_prio_tc_map()
9800 struct ieee_ets *ets = adapter->ixgbe_ieee_ets; in ixgbe_set_prio_tc_map()
9806 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) in ixgbe_set_prio_tc_map()
9809 tc = ets->prio_tc[prio]; in ixgbe_set_prio_tc_map()
9819 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; in ixgbe_reassign_macvlan_pool()
9833 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); in ixgbe_reassign_macvlan_pool()
9834 if (pool < adapter->num_rx_pools) { in ixgbe_reassign_macvlan_pool()
9835 set_bit(pool, adapter->fwd_bitmask); in ixgbe_reassign_macvlan_pool()
9836 accel->pool = pool; in ixgbe_reassign_macvlan_pool()
9845 netdev_unbind_sb_channel(adapter->netdev, vdev); in ixgbe_reassign_macvlan_pool()
9861 bitmap_clear(adapter->fwd_bitmask, 1, 63); in ixgbe_defrag_macvlan_pools()
9869 * ixgbe_setup_tc - configure net_device for multiple traffic classes
9877 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_tc()
9880 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) in ixgbe_setup_tc()
9881 return -EINVAL; in ixgbe_setup_tc()
9883 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) in ixgbe_setup_tc()
9884 return -EINVAL; in ixgbe_setup_tc()
9899 if (adapter->xdp_prog) { in ixgbe_setup_tc()
9900 e_warn(probe, "DCB is not supported with XDP\n"); in ixgbe_setup_tc()
9905 return -EINVAL; in ixgbe_setup_tc()
9911 adapter->hw_tcs = tc; in ixgbe_setup_tc()
9912 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; in ixgbe_setup_tc()
9914 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_setup_tc()
9915 adapter->last_lfc_mode = adapter->hw.fc.requested_mode; in ixgbe_setup_tc()
9916 adapter->hw.fc.requested_mode = ixgbe_fc_none; in ixgbe_setup_tc()
9921 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_setup_tc()
9922 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; in ixgbe_setup_tc()
9924 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; in ixgbe_setup_tc()
9925 adapter->hw_tcs = tc; in ixgbe_setup_tc()
9927 adapter->temp_dcb_cfg.pfc_mode_enable = false; in ixgbe_setup_tc()
9928 adapter->dcb_cfg.pfc_mode_enable = false; in ixgbe_setup_tc()
9947 u32 hdl = cls->knode.handle; in ixgbe_delete_clsu32()
9948 u32 uhtid = TC_U32_USERHTID(cls->knode.handle); in ixgbe_delete_clsu32()
9949 u32 loc = cls->knode.handle & 0xfffff; in ixgbe_delete_clsu32()
9954 return -EINVAL; in ixgbe_delete_clsu32()
9957 return -EINVAL; in ixgbe_delete_clsu32()
9961 jump = adapter->jump_tables[uhtid]; in ixgbe_delete_clsu32()
9963 return -EINVAL; in ixgbe_delete_clsu32()
9964 if (!test_bit(loc - 1, jump->child_loc_map)) in ixgbe_delete_clsu32()
9965 return -EINVAL; in ixgbe_delete_clsu32()
9966 clear_bit(loc - 1, jump->child_loc_map); in ixgbe_delete_clsu32()
9971 jump = adapter->jump_tables[i]; in ixgbe_delete_clsu32()
9972 if (jump && jump->link_hdl == hdl) { in ixgbe_delete_clsu32()
9977 if (!test_bit(j, jump->child_loc_map)) in ixgbe_delete_clsu32()
9979 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_delete_clsu32()
9983 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_delete_clsu32()
9984 clear_bit(j, jump->child_loc_map); in ixgbe_delete_clsu32()
9987 kfree(jump->input); in ixgbe_delete_clsu32()
9988 kfree(jump->mask); in ixgbe_delete_clsu32()
9990 adapter->jump_tables[i] = NULL; in ixgbe_delete_clsu32()
9995 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_delete_clsu32()
9997 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_delete_clsu32()
10004 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); in ixgbe_configure_clsu32_add_hnode()
10007 return -EINVAL; in ixgbe_configure_clsu32_add_hnode()
10012 if (cls->hnode.divisor > 0) in ixgbe_configure_clsu32_add_hnode()
10013 return -EINVAL; in ixgbe_configure_clsu32_add_hnode()
10015 set_bit(uhtid - 1, &adapter->tables); in ixgbe_configure_clsu32_add_hnode()
10022 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); in ixgbe_configure_clsu32_del_hnode()
10025 return -EINVAL; in ixgbe_configure_clsu32_del_hnode()
10027 clear_bit(uhtid - 1, &adapter->tables); in ixgbe_configure_clsu32_del_hnode()
10048 data = (struct upper_walk_data *)priv->data; in get_macvlan_queue()
10049 ifindex = data->ifindex; in get_macvlan_queue()
10050 adapter = data->adapter; in get_macvlan_queue()
10051 if (vadapter && upper->ifindex == ifindex) { in get_macvlan_queue()
10052 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; in get_macvlan_queue()
10053 data->action = data->queue; in get_macvlan_queue()
10064 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in handle_redirect_action()
10065 unsigned int num_vfs = adapter->num_vfs, vf; in handle_redirect_action()
10072 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); in handle_redirect_action()
10073 if (upper->ifindex == ifindex) { in handle_redirect_action()
10074 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); in handle_redirect_action()
10087 if (netdev_walk_all_upper_dev_rcu(adapter->netdev, in handle_redirect_action()
10095 return -EINVAL; in handle_redirect_action()
10105 return -EINVAL; in parse_tc_actions()
10120 return -EINVAL; in parse_tc_actions()
10121 return handle_redirect_action(adapter, dev->ifindex, in parse_tc_actions()
10125 return -EINVAL; in parse_tc_actions()
10128 return -EINVAL; in parse_tc_actions()
10134 return -EINVAL; in parse_tc_actions()
10148 for (i = 0; i < cls->knode.sel->nkeys; i++) { in ixgbe_clsu32_build_input()
10149 off = cls->knode.sel->keys[i].off; in ixgbe_clsu32_build_input()
10150 val = cls->knode.sel->keys[i].val; in ixgbe_clsu32_build_input()
10151 m = cls->knode.sel->keys[i].mask; in ixgbe_clsu32_build_input()
10157 input->filter.formatted.flow_type |= in ixgbe_clsu32_build_input()
10164 if (nexthdr->off == cls->knode.sel->keys[i].off && in ixgbe_clsu32_build_input()
10165 nexthdr->val == in ixgbe_clsu32_build_input()
10166 (__force u32)cls->knode.sel->keys[i].val && in ixgbe_clsu32_build_input()
10167 nexthdr->mask == in ixgbe_clsu32_build_input()
10168 (__force u32)cls->knode.sel->keys[i].mask) in ixgbe_clsu32_build_input()
10176 return -EINVAL; in ixgbe_clsu32_build_input()
10181 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | in ixgbe_clsu32_build_input()
10184 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) in ixgbe_clsu32_build_input()
10185 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; in ixgbe_clsu32_build_input()
10193 __be16 protocol = cls->common.protocol; in ixgbe_configure_clsu32()
10194 u32 loc = cls->knode.handle & 0xfffff; in ixgbe_configure_clsu32()
10195 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_clsu32()
10200 int i, err = -EINVAL; in ixgbe_configure_clsu32()
10204 uhtid = TC_U32_USERHTID(cls->knode.handle); in ixgbe_configure_clsu32()
10205 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); in ixgbe_configure_clsu32()
10217 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { in ixgbe_configure_clsu32()
10230 field_ptr = (adapter->jump_tables[0])->mat; in ixgbe_configure_clsu32()
10234 if (!adapter->jump_tables[uhtid]) in ixgbe_configure_clsu32()
10236 field_ptr = (adapter->jump_tables[uhtid])->mat; in ixgbe_configure_clsu32()
10254 if (!test_bit(link_uhtid - 1, &adapter->tables)) in ixgbe_configure_clsu32()
10262 if (adapter->jump_tables[link_uhtid] && in ixgbe_configure_clsu32()
10263 (adapter->jump_tables[link_uhtid])->link_hdl) { in ixgbe_configure_clsu32()
10270 if (nexthdr[i].o != cls->knode.sel->offoff || in ixgbe_configure_clsu32()
10271 nexthdr[i].s != cls->knode.sel->offshift || in ixgbe_configure_clsu32()
10273 (__force u32)cls->knode.sel->offmask) in ixgbe_configure_clsu32()
10278 return -ENOMEM; in ixgbe_configure_clsu32()
10281 err = -ENOMEM; in ixgbe_configure_clsu32()
10286 err = -ENOMEM; in ixgbe_configure_clsu32()
10289 jump->input = input; in ixgbe_configure_clsu32()
10290 jump->mask = mask; in ixgbe_configure_clsu32()
10291 jump->link_hdl = cls->knode.handle; in ixgbe_configure_clsu32()
10296 jump->mat = nexthdr[i].jump; in ixgbe_configure_clsu32()
10297 adapter->jump_tables[link_uhtid] = jump; in ixgbe_configure_clsu32()
10310 return -ENOMEM; in ixgbe_configure_clsu32()
10313 err = -ENOMEM; in ixgbe_configure_clsu32()
10317 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { in ixgbe_configure_clsu32()
10318 if ((adapter->jump_tables[uhtid])->input) in ixgbe_configure_clsu32()
10319 memcpy(input, (adapter->jump_tables[uhtid])->input, in ixgbe_configure_clsu32()
10321 if ((adapter->jump_tables[uhtid])->mask) in ixgbe_configure_clsu32()
10322 memcpy(mask, (adapter->jump_tables[uhtid])->mask, in ixgbe_configure_clsu32()
10329 struct ixgbe_jump_table *link = adapter->jump_tables[i]; in ixgbe_configure_clsu32()
10331 if (link && (test_bit(loc - 1, link->child_loc_map))) { in ixgbe_configure_clsu32()
10334 err = -EINVAL; in ixgbe_configure_clsu32()
10343 err = parse_tc_actions(adapter, cls->knode.exts, &input->action, in ixgbe_configure_clsu32()
10348 input->sw_idx = loc; in ixgbe_configure_clsu32()
10350 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_configure_clsu32()
10352 if (hlist_empty(&adapter->fdir_filter_list)) { in ixgbe_configure_clsu32()
10353 memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); in ixgbe_configure_clsu32()
10357 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { in ixgbe_configure_clsu32()
10358 err = -EINVAL; in ixgbe_configure_clsu32()
10362 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); in ixgbe_configure_clsu32()
10363 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, in ixgbe_configure_clsu32()
10364 input->sw_idx, queue); in ixgbe_configure_clsu32()
10368 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); in ixgbe_configure_clsu32()
10369 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_configure_clsu32()
10371 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) in ixgbe_configure_clsu32()
10372 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); in ixgbe_configure_clsu32()
10377 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_configure_clsu32()
10390 switch (cls_u32->command) { in ixgbe_setup_tc_cls_u32()
10402 return -EOPNOTSUPP; in ixgbe_setup_tc_cls_u32()
10411 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) in ixgbe_setup_tc_block_cb()
10412 return -EOPNOTSUPP; in ixgbe_setup_tc_block_cb()
10418 return -EOPNOTSUPP; in ixgbe_setup_tc_block_cb()
10425 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in ixgbe_setup_tc_mqprio()
10426 return ixgbe_setup_tc(dev, mqprio->num_tc); in ixgbe_setup_tc_mqprio()
10445 return -EOPNOTSUPP; in __ixgbe_setup_tc()
10452 struct net_device *netdev = adapter->netdev; in ixgbe_sriov_reinit()
10455 ixgbe_setup_tc(netdev, adapter->hw_tcs); in ixgbe_sriov_reinit()
10480 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) in ixgbe_fix_features()
10483 if (adapter->xdp_prog && (features & NETIF_F_LRO)) { in ixgbe_fix_features()
10496 /* go back to full RSS if we're not running SR-IOV */ in ixgbe_reset_l2fw_offload()
10497 if (!adapter->ring_feature[RING_F_VMDQ].offset) in ixgbe_reset_l2fw_offload()
10498 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED | in ixgbe_reset_l2fw_offload()
10501 adapter->ring_feature[RING_F_RSS].limit = rss; in ixgbe_reset_l2fw_offload()
10502 adapter->ring_feature[RING_F_VMDQ].limit = 1; in ixgbe_reset_l2fw_offload()
10504 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs); in ixgbe_reset_l2fw_offload()
10511 netdev_features_t changed = netdev->features ^ features; in ixgbe_set_features()
10516 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_set_features()
10518 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; in ixgbe_set_features()
10519 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && in ixgbe_set_features()
10520 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { in ixgbe_set_features()
10521 if (adapter->rx_itr_setting == 1 || in ixgbe_set_features()
10522 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { in ixgbe_set_features()
10523 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; in ixgbe_set_features()
10526 e_info(probe, "rx-usecs set too low, " in ixgbe_set_features()
10532 * Check if Flow Director n-tuple support or hw_tc support was in ixgbe_set_features()
10537 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) in ixgbe_set_features()
10540 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_features()
10541 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; in ixgbe_set_features()
10544 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) in ixgbe_set_features()
10547 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; in ixgbe_set_features()
10549 /* We cannot enable ATR if SR-IOV is enabled */ in ixgbe_set_features()
10550 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || in ixgbe_set_features()
10552 (adapter->hw_tcs > 1) || in ixgbe_set_features()
10554 (adapter->ring_feature[RING_F_RSS].limit <= 1) || in ixgbe_set_features()
10556 (!adapter->atr_sample_rate)) in ixgbe_set_features()
10559 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_features()
10565 netdev->features = features; in ixgbe_set_features()
10567 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) in ixgbe_set_features()
10590 return -ENOMEM; in ixgbe_ndo_fdb_add()
10597 * ixgbe_configure_bridge_mode - set various bridge modes
10606 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_bridge_mode()
10613 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); in ixgbe_configure_bridge_mode()
10626 num_pools = adapter->num_vfs + adapter->num_rx_pools; in ixgbe_configure_bridge_mode()
10628 if (hw->mac.ops.set_source_address_pruning) in ixgbe_configure_bridge_mode()
10629 hw->mac.ops.set_source_address_pruning(hw, in ixgbe_configure_bridge_mode()
10636 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, in ixgbe_configure_bridge_mode()
10639 /* disable Rx switching replication unless we have SR-IOV in ixgbe_configure_bridge_mode()
10643 if (!adapter->num_vfs) in ixgbe_configure_bridge_mode()
10650 num_pools = adapter->num_vfs + adapter->num_rx_pools; in ixgbe_configure_bridge_mode()
10652 if (hw->mac.ops.set_source_address_pruning) in ixgbe_configure_bridge_mode()
10653 hw->mac.ops.set_source_address_pruning(hw, in ixgbe_configure_bridge_mode()
10659 return -EINVAL; in ixgbe_configure_bridge_mode()
10662 adapter->bridge_mode = mode; in ixgbe_configure_bridge_mode()
10678 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_ndo_bridge_setlink()
10679 return -EOPNOTSUPP; in ixgbe_ndo_bridge_setlink()
10683 return -EINVAL; in ixgbe_ndo_bridge_setlink()
10704 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_ndo_bridge_getlink()
10708 adapter->bridge_mode, 0, 0, nlflags, in ixgbe_ndo_bridge_getlink()
10716 int tcs = adapter->hw_tcs ? : 1; in ixgbe_fwd_add()
10719 if (adapter->xdp_prog) { in ixgbe_fwd_add()
10721 return ERR_PTR(-EINVAL); in ixgbe_fwd_add()
10729 return ERR_PTR(-EMEDIUMTYPE); in ixgbe_fwd_add()
10736 return ERR_PTR(-ERANGE); in ixgbe_fwd_add()
10738 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); in ixgbe_fwd_add()
10739 if (pool == adapter->num_rx_pools) { in ixgbe_fwd_add()
10740 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; in ixgbe_fwd_add()
10743 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && in ixgbe_fwd_add()
10744 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || in ixgbe_fwd_add()
10745 adapter->num_rx_pools > IXGBE_MAX_MACVLANS) in ixgbe_fwd_add()
10746 return ERR_PTR(-EBUSY); in ixgbe_fwd_add()
10753 return ERR_PTR(-EBUSY); in ixgbe_fwd_add()
10756 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | in ixgbe_fwd_add()
10763 if (used_pools < 32 && adapter->num_rx_pools < 16) in ixgbe_fwd_add()
10765 32 - used_pools, in ixgbe_fwd_add()
10766 16 - adapter->num_rx_pools); in ixgbe_fwd_add()
10767 else if (adapter->num_rx_pools < 32) in ixgbe_fwd_add()
10769 64 - used_pools, in ixgbe_fwd_add()
10770 32 - adapter->num_rx_pools); in ixgbe_fwd_add()
10772 reserved_pools = 64 - used_pools; in ixgbe_fwd_add()
10776 return ERR_PTR(-EBUSY); in ixgbe_fwd_add()
10778 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools; in ixgbe_fwd_add()
10781 err = ixgbe_setup_tc(pdev, adapter->hw_tcs); in ixgbe_fwd_add()
10785 if (pool >= adapter->num_rx_pools) in ixgbe_fwd_add()
10786 return ERR_PTR(-ENOMEM); in ixgbe_fwd_add()
10791 return ERR_PTR(-ENOMEM); in ixgbe_fwd_add()
10793 set_bit(pool, adapter->fwd_bitmask); in ixgbe_fwd_add()
10795 accel->pool = pool; in ixgbe_fwd_add()
10796 accel->netdev = vdev; in ixgbe_fwd_add()
10812 unsigned int rxbase = accel->rx_base_queue; in ixgbe_fwd_del()
10816 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr, in ixgbe_fwd_del()
10817 VMDQ_P(accel->pool)); in ixgbe_fwd_del()
10824 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { in ixgbe_fwd_del()
10825 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; in ixgbe_fwd_del()
10826 struct ixgbe_q_vector *qv = ring->q_vector; in ixgbe_fwd_del()
10831 if (netif_running(adapter->netdev)) in ixgbe_fwd_del()
10832 napi_synchronize(&qv->napi); in ixgbe_fwd_del()
10833 ring->netdev = NULL; in ixgbe_fwd_del()
10837 netdev_unbind_sb_channel(pdev, accel->netdev); in ixgbe_fwd_del()
10838 netdev_set_sb_channel(accel->netdev, 0); in ixgbe_fwd_del()
10840 clear_bit(accel->pool, adapter->fwd_bitmask); in ixgbe_fwd_del()
10863 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in ixgbe_features_check()
10873 * IPsec offoad sets skb->encapsulation but still can handle in ixgbe_features_check()
10876 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { in ixgbe_features_check()
10888 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; in ixgbe_xdp_setup()
10894 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_xdp_setup()
10895 return -EINVAL; in ixgbe_xdp_setup()
10897 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) in ixgbe_xdp_setup()
10898 return -EINVAL; in ixgbe_xdp_setup()
10901 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_xdp_setup()
10902 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_xdp_setup()
10905 return -EINVAL; in ixgbe_xdp_setup()
10908 return -EINVAL; in ixgbe_xdp_setup()
10915 return -ENOMEM; in ixgbe_xdp_setup()
10917 old_prog = xchg(&adapter->xdp_prog, prog); in ixgbe_xdp_setup()
10927 err = ixgbe_setup_tc(dev, adapter->hw_tcs); in ixgbe_xdp_setup()
10930 return -EINVAL; in ixgbe_xdp_setup()
10934 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_xdp_setup()
10935 WRITE_ONCE(adapter->rx_ring[i]->xdp_prog, in ixgbe_xdp_setup()
10936 adapter->xdp_prog); in ixgbe_xdp_setup()
10947 num_queues = min_t(int, adapter->num_rx_queues, in ixgbe_xdp_setup()
10948 adapter->num_xdp_queues); in ixgbe_xdp_setup()
10950 if (adapter->xdp_ring[i]->xsk_pool) in ixgbe_xdp_setup()
10951 (void)ixgbe_xsk_wakeup(adapter->netdev, i, in ixgbe_xdp_setup()
10963 switch (xdp->command) { in ixgbe_xdp()
10965 return ixgbe_xdp_setup(dev, xdp->prog); in ixgbe_xdp()
10967 return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool, in ixgbe_xdp()
10968 xdp->xsk.queue_id); in ixgbe_xdp()
10971 return -EINVAL; in ixgbe_xdp()
10981 writel(ring->next_to_use, ring->tail); in ixgbe_xdp_ring_update_tail()
10987 spin_lock(&ring->tx_lock); in ixgbe_xdp_ring_update_tail_locked()
10990 spin_unlock(&ring->tx_lock); in ixgbe_xdp_ring_update_tail_locked()
11001 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) in ixgbe_xdp_xmit()
11002 return -ENETDOWN; in ixgbe_xdp_xmit()
11004 if (!netif_carrier_ok(adapter->netdev) || in ixgbe_xdp_xmit()
11005 !netif_running(adapter->netdev)) in ixgbe_xdp_xmit()
11006 return -ENETDOWN; in ixgbe_xdp_xmit()
11009 return -EINVAL; in ixgbe_xdp_xmit()
11011 /* During program transitions its possible adapter->xdp_prog is assigned in ixgbe_xdp_xmit()
11014 ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL; in ixgbe_xdp_xmit()
11016 return -ENXIO; in ixgbe_xdp_xmit()
11018 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) in ixgbe_xdp_xmit()
11019 return -ENXIO; in ixgbe_xdp_xmit()
11022 spin_lock(&ring->tx_lock); in ixgbe_xdp_xmit()
11038 spin_unlock(&ring->tx_lock); in ixgbe_xdp_xmit()
11096 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_txr_hw()
11097 u8 reg_idx = tx_ring->reg_idx; in ixgbe_disable_txr_hw()
11109 while (wait_loop--) { in ixgbe_disable_txr_hw()
11124 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state); in ixgbe_disable_txr()
11132 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_rxr_hw()
11133 u8 reg_idx = rx_ring->reg_idx; in ixgbe_disable_rxr_hw()
11145 if (hw->mac.type == ixgbe_mac_82598EB && in ixgbe_disable_rxr_hw()
11155 while (wait_loop--) { in ixgbe_disable_rxr_hw()
11169 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); in ixgbe_reset_txr_stats()
11170 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); in ixgbe_reset_txr_stats()
11175 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); in ixgbe_reset_rxr_stats()
11176 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); in ixgbe_reset_rxr_stats()
11180 * ixgbe_irq_disable_single - Disable single IRQ vector
11186 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_irq_disable_single()
11190 switch (adapter->hw.mac.type) { in ixgbe_irq_disable_single()
11193 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); in ixgbe_irq_disable_single()
11210 IXGBE_WRITE_FLUSH(&adapter->hw); in ixgbe_irq_disable_single()
11211 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) in ixgbe_irq_disable_single()
11212 synchronize_irq(adapter->msix_entries[ring].vector); in ixgbe_irq_disable_single()
11214 synchronize_irq(adapter->pdev->irq); in ixgbe_irq_disable_single()
11218 * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
11229 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_disable()
11230 tx_ring = adapter->tx_ring[ring]; in ixgbe_txrx_ring_disable()
11231 xdp_ring = adapter->xdp_ring[ring]; in ixgbe_txrx_ring_disable()
11236 napi_disable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_disable()
11258 * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
11269 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_enable()
11270 tx_ring = adapter->tx_ring[ring]; in ixgbe_txrx_ring_enable()
11271 xdp_ring = adapter->xdp_ring[ring]; in ixgbe_txrx_ring_enable()
11278 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); in ixgbe_txrx_ring_enable()
11280 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); in ixgbe_txrx_ring_enable()
11283 napi_enable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_enable()
11285 IXGBE_WRITE_FLUSH(&adapter->hw); in ixgbe_txrx_ring_enable()
11289 * ixgbe_enumerate_functions - Get the number of ports this device has
11292 * This function enumerates the physical functions co-located on a single slot,
11299 struct pci_dev *entry, *pdev = adapter->pdev; in ixgbe_enumerate_functions()
11306 if (ixgbe_pcie_from_parent(&adapter->hw)) in ixgbe_enumerate_functions()
11309 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { in ixgbe_enumerate_functions()
11311 if (entry->is_virtfn) in ixgbe_enumerate_functions()
11317 * attached to a virtual machine using VT-d, for example. In in ixgbe_enumerate_functions()
11318 * this case, simply return -1 to indicate this. in ixgbe_enumerate_functions()
11320 if ((entry->vendor != pdev->vendor) || in ixgbe_enumerate_functions()
11321 (entry->device != pdev->device)) in ixgbe_enumerate_functions()
11322 return -1; in ixgbe_enumerate_functions()
11331 * ixgbe_wol_supported - Check whether device supports WoL
11343 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_wol_supported()
11344 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; in ixgbe_wol_supported()
11347 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_wol_supported()
11351 if (hw->mac.type >= ixgbe_mac_X540) { in ixgbe_wol_supported()
11354 (hw->bus.func == 0))) in ixgbe_wol_supported()
11368 if (hw->bus.func != 0) in ixgbe_wol_supported()
11403 * ixgbe_set_fw_version_e610 - Set FW version specifically on E610 adapters
11412 struct ixgbe_orom_info *orom = &adapter->hw.flash.orom; in ixgbe_set_fw_version_e610()
11413 struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm; in ixgbe_set_fw_version_e610()
11415 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), in ixgbe_set_fw_version_e610()
11416 "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, in ixgbe_set_fw_version_e610()
11417 nvm->eetrack, orom->major, orom->build, orom->patch); in ixgbe_set_fw_version_e610()
11421 * ixgbe_set_fw_version - Set FW version
11429 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_fw_version()
11432 if (adapter->hw.mac.type == ixgbe_mac_e610) { in ixgbe_set_fw_version()
11439 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), in ixgbe_set_fw_version()
11449 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), in ixgbe_set_fw_version()
11456 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), in ixgbe_set_fw_version()
11461 * ixgbe_recovery_probe - Handle FW recovery mode during probe
11466 * Return: 0 on successful probe for E610, -EIO if recovery mode is detected
11467 * for non-E610 adapter, error status code on any other case.
11471 struct net_device *netdev = adapter->netdev; in ixgbe_recovery_probe()
11472 struct pci_dev *pdev = adapter->pdev; in ixgbe_recovery_probe()
11473 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_recovery_probe()
11475 int err = -EIO; in ixgbe_recovery_probe()
11477 if (hw->mac.type != ixgbe_mac_e610) in ixgbe_recovery_probe()
11481 mutex_init(&hw->aci.lock); in ixgbe_recovery_probe()
11482 err = ixgbe_get_flash_data(&adapter->hw); in ixgbe_recovery_probe()
11486 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); in ixgbe_recovery_probe()
11487 INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task); in ixgbe_recovery_probe()
11488 set_bit(__IXGBE_SERVICE_INITED, &adapter->state); in ixgbe_recovery_probe()
11489 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); in ixgbe_recovery_probe()
11491 if (hw->mac.ops.get_bus_info) in ixgbe_recovery_probe()
11492 hw->mac.ops.get_bus_info(hw); in ixgbe_recovery_probe()
11498 devl_lock(adapter->devlink); in ixgbe_recovery_probe()
11500 SET_NETDEV_DEVLINK_PORT(adapter->netdev, in ixgbe_recovery_probe()
11501 &adapter->devlink_port); in ixgbe_recovery_probe()
11503 devl_register(adapter->devlink); in ixgbe_recovery_probe()
11504 devl_unlock(adapter->devlink); in ixgbe_recovery_probe()
11508 mutex_destroy(&adapter->hw.aci.lock); in ixgbe_recovery_probe()
11510 devlink_free(adapter->devlink); in ixgbe_recovery_probe()
11512 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_recovery_probe()
11521 * ixgbe_probe - Device Initialization Routine
11537 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; in ixgbe_probe()
11548 * the PCIe SR-IOV capability. in ixgbe_probe()
11550 if (pdev->is_virtfn) { in ixgbe_probe()
11552 pci_name(pdev), pdev->vendor, pdev->device); in ixgbe_probe()
11553 return -EINVAL; in ixgbe_probe()
11560 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in ixgbe_probe()
11562 dev_err(&pdev->dev, in ixgbe_probe()
11569 dev_err(&pdev->dev, in ixgbe_probe()
11577 if (ii->mac == ixgbe_mac_82598EB) { in ixgbe_probe()
11584 } else if (ii->mac == ixgbe_mac_e610) { in ixgbe_probe()
11588 adapter = ixgbe_allocate_devlink(&pdev->dev); in ixgbe_probe()
11596 err = -ENOMEM; in ixgbe_probe()
11600 SET_NETDEV_DEV(netdev, &pdev->dev); in ixgbe_probe()
11603 netdev_priv_wrapper->adapter = adapter; in ixgbe_probe()
11605 adapter->netdev = netdev; in ixgbe_probe()
11606 adapter->pdev = pdev; in ixgbe_probe()
11607 hw = &adapter->hw; in ixgbe_probe()
11608 hw->back = adapter; in ixgbe_probe()
11609 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in ixgbe_probe()
11611 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), in ixgbe_probe()
11613 adapter->io_addr = hw->hw_addr; in ixgbe_probe()
11614 if (!hw->hw_addr) { in ixgbe_probe()
11615 err = -EIO; in ixgbe_probe()
11620 hw->mac.ops = *ii->mac_ops; in ixgbe_probe()
11621 hw->mac.type = ii->mac; in ixgbe_probe()
11622 hw->mvals = ii->mvals; in ixgbe_probe()
11623 if (ii->link_ops) in ixgbe_probe()
11624 hw->link.ops = *ii->link_ops; in ixgbe_probe()
11627 hw->eeprom.ops = *ii->eeprom_ops; in ixgbe_probe()
11629 if (ixgbe_removed(hw->hw_addr)) { in ixgbe_probe()
11630 err = -EIO; in ixgbe_probe()
11635 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; in ixgbe_probe()
11638 hw->phy.ops = *ii->phy_ops; in ixgbe_probe()
11639 hw->phy.sfp_type = ixgbe_sfp_type_unknown; in ixgbe_probe()
11641 hw->phy.mdio.prtad = MDIO_PRTAD_NONE; in ixgbe_probe()
11642 hw->phy.mdio.mmds = 0; in ixgbe_probe()
11643 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; in ixgbe_probe()
11644 hw->phy.mdio.dev = netdev; in ixgbe_probe()
11645 hw->phy.mdio.mdio_read = ixgbe_mdio_read; in ixgbe_probe()
11646 hw->phy.mdio.mdio_write = ixgbe_mdio_write; in ixgbe_probe()
11648 netdev->netdev_ops = &ixgbe_netdev_ops; in ixgbe_probe()
11650 netdev->watchdog_timeo = 5 * HZ; in ixgbe_probe()
11651 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in ixgbe_probe()
11661 if (adapter->hw.mac.type == ixgbe_mac_e610) { in ixgbe_probe()
11662 err = ixgbe_get_caps(&adapter->hw); in ixgbe_probe()
11664 dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err); in ixgbe_probe()
11666 err = ixgbe_get_flash_data(&adapter->hw); in ixgbe_probe()
11671 if (adapter->hw.mac.type == ixgbe_mac_82599EB) in ixgbe_probe()
11672 adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; in ixgbe_probe()
11674 switch (adapter->hw.mac.type) { in ixgbe_probe()
11678 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; in ixgbe_probe()
11681 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a; in ixgbe_probe()
11688 switch (adapter->hw.mac.type) { in ixgbe_probe()
11695 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); in ixgbe_probe()
11705 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { in ixgbe_probe()
11712 hw->allow_unsupported_sfp = allow_unsupported_sfp; in ixgbe_probe()
11715 hw->phy.reset_if_overtemp = true; in ixgbe_probe()
11716 err = hw->mac.ops.reset_hw(hw); in ixgbe_probe()
11717 hw->phy.reset_if_overtemp = false; in ixgbe_probe()
11719 if (err == -ENOENT) { in ixgbe_probe()
11721 } else if (err == -EOPNOTSUPP) { in ixgbe_probe()
11731 /* SR-IOV not supported on the 82598 */ in ixgbe_probe()
11732 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_probe()
11736 hw->mbx.ops = ii->mbx_ops; in ixgbe_probe()
11742 netdev->features = NETIF_F_SG | in ixgbe_probe()
11756 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; in ixgbe_probe()
11757 netdev->features |= NETIF_F_GSO_PARTIAL | in ixgbe_probe()
11760 if (hw->mac.type >= ixgbe_mac_82599EB) in ixgbe_probe()
11761 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; in ixgbe_probe()
11768 if (adapter->ipsec) in ixgbe_probe()
11769 netdev->features |= IXGBE_ESP_FEATURES; in ixgbe_probe()
11772 netdev->hw_features |= netdev->features | in ixgbe_probe()
11779 if (hw->mac.type >= ixgbe_mac_82599EB) in ixgbe_probe()
11780 netdev->hw_features |= NETIF_F_NTUPLE | in ixgbe_probe()
11783 netdev->features |= NETIF_F_HIGHDMA; in ixgbe_probe()
11785 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in ixgbe_probe()
11786 netdev->hw_enc_features |= netdev->vlan_features; in ixgbe_probe()
11787 netdev->mpls_features |= NETIF_F_SG | in ixgbe_probe()
11791 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES; in ixgbe_probe()
11794 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in ixgbe_probe()
11798 netdev->priv_flags |= IFF_UNICAST_FLT; in ixgbe_probe()
11799 netdev->priv_flags |= IFF_SUPP_NOFCS; in ixgbe_probe()
11801 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in ixgbe_probe()
11804 /* MTU range: 68 - 9710 */ in ixgbe_probe()
11805 netdev->min_mtu = ETH_MIN_MTU; in ixgbe_probe()
11806 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); in ixgbe_probe()
11809 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) in ixgbe_probe()
11810 netdev->dcbnl_ops = &ixgbe_dcbnl_ops; in ixgbe_probe()
11814 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { in ixgbe_probe()
11817 if (hw->mac.ops.get_device_caps) { in ixgbe_probe()
11818 hw->mac.ops.get_device_caps(hw, &device_caps); in ixgbe_probe()
11820 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_probe()
11825 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; in ixgbe_probe()
11827 netdev->features |= NETIF_F_FSO | in ixgbe_probe()
11830 netdev->vlan_features |= NETIF_F_FSO | in ixgbe_probe()
11834 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) in ixgbe_probe()
11835 netdev->hw_features |= NETIF_F_LRO; in ixgbe_probe()
11836 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_probe()
11837 netdev->features |= NETIF_F_LRO; in ixgbe_probe()
11840 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { in ixgbe_probe()
11842 err = -EIO; in ixgbe_probe()
11846 eth_platform_get_mac_address(&adapter->pdev->dev, in ixgbe_probe()
11847 adapter->hw.mac.perm_addr); in ixgbe_probe()
11849 eth_hw_addr_set(netdev, hw->mac.perm_addr); in ixgbe_probe()
11851 if (!is_valid_ether_addr(netdev->dev_addr)) { in ixgbe_probe()
11853 err = -EIO; in ixgbe_probe()
11857 /* Set hw->mac.addr to permanent MAC address */ in ixgbe_probe()
11858 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); in ixgbe_probe()
11861 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); in ixgbe_probe()
11863 if (ixgbe_removed(hw->hw_addr)) { in ixgbe_probe()
11864 err = -EIO; in ixgbe_probe()
11867 INIT_WORK(&adapter->service_task, ixgbe_service_task); in ixgbe_probe()
11868 set_bit(__IXGBE_SERVICE_INITED, &adapter->state); in ixgbe_probe()
11869 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); in ixgbe_probe()
11875 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_probe()
11876 u64_stats_init(&adapter->rx_ring[i]->syncp); in ixgbe_probe()
11877 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_probe()
11878 u64_stats_init(&adapter->tx_ring[i]->syncp); in ixgbe_probe()
11879 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbe_probe()
11880 u64_stats_init(&adapter->xdp_ring[i]->syncp); in ixgbe_probe()
11883 adapter->wol = 0; in ixgbe_probe()
11884 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); in ixgbe_probe()
11885 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, in ixgbe_probe()
11886 pdev->subsystem_device); in ixgbe_probe()
11887 if (hw->wol_enabled) in ixgbe_probe()
11888 adapter->wol = IXGBE_WUFC_MAG; in ixgbe_probe()
11890 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); in ixgbe_probe()
11899 hw->mac.ops.get_bus_info(hw); in ixgbe_probe()
11906 switch (hw->mac.type) { in ixgbe_probe()
11919 err = hw->eeprom.ops.read_pba_string(hw, part_str, sizeof(part_str)); in ixgbe_probe()
11922 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) in ixgbe_probe()
11924 hw->mac.type, hw->phy.type, hw->phy.sfp_type, in ixgbe_probe()
11928 hw->mac.type, hw->phy.type, part_str); in ixgbe_probe()
11930 e_dev_info("%pM\n", netdev->dev_addr); in ixgbe_probe()
11933 err = hw->mac.ops.start_hw(hw); in ixgbe_probe()
11934 if (err == -EACCES) { in ixgbe_probe()
11935 /* We are running on a pre-production device, log a warning */ in ixgbe_probe()
11936 e_dev_warn("This device is a pre-production adapter/LOM. " in ixgbe_probe()
11943 strcpy(netdev->name, "eth%d"); in ixgbe_probe()
11946 devl_lock(adapter->devlink); in ixgbe_probe()
11948 SET_NETDEV_DEVLINK_PORT(adapter->netdev, &adapter->devlink_port); in ixgbe_probe()
11956 if (hw->mac.ops.disable_tx_laser) in ixgbe_probe()
11957 hw->mac.ops.disable_tx_laser(hw); in ixgbe_probe()
11963 if (dca_add_requester(&pdev->dev) == 0) { in ixgbe_probe()
11964 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; in ixgbe_probe()
11968 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_probe()
11969 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); in ixgbe_probe()
11970 for (i = 0; i < adapter->num_vfs; i++) in ixgbe_probe()
11977 if (hw->mac.ops.set_fw_drv_ver) in ixgbe_probe()
11978 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, in ixgbe_probe()
11979 sizeof(UTS_RELEASE) - 1, in ixgbe_probe()
11995 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) in ixgbe_probe()
11996 hw->mac.ops.setup_link(hw, in ixgbe_probe()
12005 devl_register(adapter->devlink); in ixgbe_probe()
12006 devl_unlock(adapter->devlink); in ixgbe_probe()
12016 devl_port_unregister(&adapter->devlink_port); in ixgbe_probe()
12017 devl_unlock(adapter->devlink); in ixgbe_probe()
12021 if (hw->mac.type == ixgbe_mac_e610) in ixgbe_probe()
12022 mutex_destroy(&adapter->hw.aci.lock); in ixgbe_probe()
12024 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_probe()
12025 iounmap(adapter->io_addr); in ixgbe_probe()
12026 kfree(adapter->jump_tables[0]); in ixgbe_probe()
12027 kfree(adapter->mac_table); in ixgbe_probe()
12028 kfree(adapter->rss_key); in ixgbe_probe()
12029 bitmap_free(adapter->af_xdp_zc_qps); in ixgbe_probe()
12031 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_probe()
12034 devlink_free(adapter->devlink); in ixgbe_probe()
12045 * ixgbe_remove - Device Removal Routine
12050 * Hot-Plug event, or because the driver is going to be removed from
12064 netdev = adapter->netdev; in ixgbe_remove()
12065 devl_lock(adapter->devlink); in ixgbe_remove()
12066 devl_unregister(adapter->devlink); in ixgbe_remove()
12068 ixgbe_fwlog_deinit(&adapter->hw); in ixgbe_remove()
12071 set_bit(__IXGBE_REMOVING, &adapter->state); in ixgbe_remove()
12072 cancel_work_sync(&adapter->service_task); in ixgbe_remove()
12074 if (adapter->hw.mac.type == ixgbe_mac_e610) in ixgbe_remove()
12077 if (adapter->mii_bus) in ixgbe_remove()
12078 mdiobus_unregister(adapter->mii_bus); in ixgbe_remove()
12081 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { in ixgbe_remove()
12082 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; in ixgbe_remove()
12083 dca_remove_requester(&pdev->dev); in ixgbe_remove()
12084 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, in ixgbe_remove()
12099 if (netdev->reg_state == NETREG_REGISTERED) in ixgbe_remove()
12102 devl_port_unregister(&adapter->devlink_port); in ixgbe_remove()
12103 devl_unlock(adapter->devlink); in ixgbe_remove()
12111 kfree(adapter->ixgbe_ieee_pfc); in ixgbe_remove()
12112 kfree(adapter->ixgbe_ieee_ets); in ixgbe_remove()
12115 iounmap(adapter->io_addr); in ixgbe_remove()
12121 if (adapter->jump_tables[i]) { in ixgbe_remove()
12122 kfree(adapter->jump_tables[i]->input); in ixgbe_remove()
12123 kfree(adapter->jump_tables[i]->mask); in ixgbe_remove()
12125 kfree(adapter->jump_tables[i]); in ixgbe_remove()
12128 kfree(adapter->mac_table); in ixgbe_remove()
12129 kfree(adapter->rss_key); in ixgbe_remove()
12130 bitmap_free(adapter->af_xdp_zc_qps); in ixgbe_remove()
12131 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_remove()
12134 if (adapter->hw.mac.type == ixgbe_mac_e610) in ixgbe_remove()
12135 mutex_destroy(&adapter->hw.aci.lock); in ixgbe_remove()
12140 devlink_free(adapter->devlink); in ixgbe_remove()
12144 * ixgbe_io_error_detected - called when PCI error is detected
12155 struct net_device *netdev = adapter->netdev; in ixgbe_io_error_detected()
12158 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_io_error_detected()
12164 if (adapter->hw.mac.type == ixgbe_mac_82598EB || in ixgbe_io_error_detected()
12165 adapter->num_vfs == 0) in ixgbe_io_error_detected()
12168 bdev = pdev->bus->self; in ixgbe_io_error_detected()
12170 bdev = bdev->bus->self; in ixgbe_io_error_detected()
12183 if (ixgbe_removed(hw->hw_addr)) in ixgbe_io_error_detected()
12192 if ((pf_func & 1) == (pdev->devfn & 1)) { in ixgbe_io_error_detected()
12200 switch (adapter->hw.mac.type) { in ixgbe_io_error_detected()
12227 if (vfdev->devfn == (req_id & 0xFF)) in ixgbe_io_error_detected()
12250 adapter->vferr_refcount++; in ixgbe_io_error_detected()
12256 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) in ixgbe_io_error_detected()
12273 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) in ixgbe_io_error_detected()
12282 * ixgbe_io_slot_reset - called after the pci bus has been reset.
12285 * Restart the card from scratch, as if from a cold-boot.
12293 e_err(probe, "Cannot re-enable PCI device after reset.\n"); in ixgbe_io_slot_reset()
12297 clear_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_io_slot_reset()
12298 adapter->hw.hw_addr = adapter->io_addr; in ixgbe_io_slot_reset()
12306 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); in ixgbe_io_slot_reset()
12314 * ixgbe_io_resume - called when traffic can start flowing again.
12323 struct net_device *netdev = adapter->netdev; in ixgbe_io_resume()
12326 if (adapter->vferr_refcount) { in ixgbe_io_resume()
12328 adapter->vferr_refcount--; in ixgbe_io_resume()
12361 * ixgbe_init_module - Driver Registration Routine
12375 return -ENOMEM; in ixgbe_init_module()
12397 * ixgbe_exit_module - Driver Exit Cleanup Routine