Lines Matching +full:umac +full:- +full:reset

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
46 static int debug = -1;
58 * ice_hw_to_dev - Get device pointer from the hardware structure
69 return &pf->pdev->dev; in ice_hw_to_dev()
92 return dev && (dev->netdev_ops == &ice_netdev_ops || in netif_is_ice()
93 dev->netdev_ops == &ice_netdev_safe_mode_ops); in netif_is_ice()
97 * ice_get_tx_pending - returns number of Tx descriptors not processed
104 head = ring->next_to_clean; in ice_get_tx_pending()
105 tail = ring->next_to_use; in ice_get_tx_pending()
109 tail - head : (tail + ring->count - head); in ice_get_tx_pending()
114 * ice_check_for_hang_subtask - check for and recover hung queues
126 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
127 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
131 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) in ice_check_for_hang_subtask()
134 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) in ice_check_for_hang_subtask()
137 hw = &vsi->back->hw; in ice_check_for_hang_subtask()
140 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; in ice_check_for_hang_subtask()
148 ring_stats = tx_ring->ring_stats; in ice_check_for_hang_subtask()
152 if (tx_ring->desc) { in ice_check_for_hang_subtask()
160 packets = ring_stats->stats.pkts & INT_MAX; in ice_check_for_hang_subtask()
161 if (ring_stats->tx_stats.prev_pkt == packets) { in ice_check_for_hang_subtask()
163 ice_trigger_sw_intr(hw, tx_ring->q_vector); in ice_check_for_hang_subtask()
171 ring_stats->tx_stats.prev_pkt = in ice_check_for_hang_subtask()
172 ice_get_tx_pending(tx_ring) ? packets : -1; in ice_check_for_hang_subtask()
178 * ice_init_mac_fltr - Set initial MAC filters
192 return -EINVAL; in ice_init_mac_fltr()
194 perm_addr = vsi->port_info->mac.perm_addr; in ice_init_mac_fltr()
199 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
211 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_sync_list()
213 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, in ice_add_mac_to_sync_list()
215 return -EINVAL; in ice_add_mac_to_sync_list()
221 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
233 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_unsync_list()
240 if (ether_addr_equal(addr, netdev->dev_addr)) in ice_add_mac_to_unsync_list()
243 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, in ice_add_mac_to_unsync_list()
245 return -EINVAL; in ice_add_mac_to_unsync_list()
251 * ice_vsi_fltr_changed - check if filter state changed
258 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || in ice_vsi_fltr_changed()
259 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_fltr_changed()
263 * ice_set_promisc - Enable promiscuous mode for a given PF
272 if (vsi->type != ICE_VSI_PF) in ice_set_promisc()
277 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_set_promisc()
280 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_set_promisc()
283 if (status && status != -EEXIST) in ice_set_promisc()
286 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", in ice_set_promisc()
287 vsi->vsi_num, promisc_m); in ice_set_promisc()
292 * ice_clear_promisc - Disable promiscuous mode for a given PF
301 if (vsi->type != ICE_VSI_PF) in ice_clear_promisc()
306 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_clear_promisc()
309 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_clear_promisc()
313 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", in ice_clear_promisc()
314 vsi->vsi_num, promisc_m); in ice_clear_promisc()
319 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
327 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_sync_fltr()
328 struct net_device *netdev = vsi->netdev; in ice_vsi_sync_fltr()
330 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr()
331 struct ice_hw *hw = &pf->hw; in ice_vsi_sync_fltr()
335 if (!vsi->netdev) in ice_vsi_sync_fltr()
336 return -EINVAL; in ice_vsi_sync_fltr()
338 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vsi_sync_fltr()
341 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in ice_vsi_sync_fltr()
342 vsi->current_netdev_flags = vsi->netdev->flags; in ice_vsi_sync_fltr()
344 INIT_LIST_HEAD(&vsi->tmp_sync_list); in ice_vsi_sync_fltr()
345 INIT_LIST_HEAD(&vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
348 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
349 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
362 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
363 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
367 if (err == -ENOMEM) in ice_vsi_sync_fltr()
372 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
373 ice_fltr_free_list(dev, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
378 if (err && err != -EEXIST) { in ice_vsi_sync_fltr()
380 /* If there is no more space for new umac filters, VSI in ice_vsi_sync_fltr()
384 if (hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOSPC && in ice_vsi_sync_fltr()
386 vsi->state)) { in ice_vsi_sync_fltr()
389 vsi->vsi_num); in ice_vsi_sync_fltr()
397 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vsi_sync_fltr()
400 vsi->current_netdev_flags &= ~IFF_ALLMULTI; in ice_vsi_sync_fltr()
404 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ in ice_vsi_sync_fltr()
407 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vsi_sync_fltr()
414 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { in ice_vsi_sync_fltr()
415 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
416 if (vsi->current_netdev_flags & IFF_PROMISC) { in ice_vsi_sync_fltr()
418 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { in ice_vsi_sync_fltr()
420 if (err && err != -EEXIST) { in ice_vsi_sync_fltr()
422 err, vsi->vsi_num); in ice_vsi_sync_fltr()
423 vsi->current_netdev_flags &= in ice_vsi_sync_fltr()
428 vlan_ops->dis_rx_filtering(vsi); in ice_vsi_sync_fltr()
446 err, vsi->vsi_num); in ice_vsi_sync_fltr()
447 vsi->current_netdev_flags |= in ice_vsi_sync_fltr()
451 if (vsi->netdev->features & in ice_vsi_sync_fltr()
453 vlan_ops->ena_rx_filtering(vsi); in ice_vsi_sync_fltr()
459 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { in ice_vsi_sync_fltr()
464 err, vsi->vsi_num); in ice_vsi_sync_fltr()
472 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
476 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
477 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
479 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vsi_sync_fltr()
484 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
491 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) in ice_sync_fltr_subtask()
494 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
497 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && in ice_sync_fltr_subtask()
498 ice_vsi_sync_fltr(pf->vsi[v])) { in ice_sync_fltr_subtask()
500 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
506 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
516 if (pf->vsi[v]) in ice_pf_dis_all_vsi()
517 ice_dis_vsi(pf->vsi[v], locked); in ice_pf_dis_all_vsi()
520 pf->pf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
523 pf->vf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
527 * ice_prepare_for_reset - prep for reset
529 * @reset_type: reset type requested
531 * Inform or close all dependent features in prep for reset.
536 struct ice_hw *hw = &pf->hw; in ice_prepare_for_reset()
543 /* already prepared for reset */ in ice_prepare_for_reset()
544 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) in ice_prepare_for_reset()
547 synchronize_irq(pf->oicr_irq.virq); in ice_prepare_for_reset()
551 /* Notify VFs of impending reset */ in ice_prepare_for_reset()
552 if (ice_check_sq_alive(hw, &hw->mailboxq)) in ice_prepare_for_reset()
555 /* Disable VFs until reset is completed */ in ice_prepare_for_reset()
556 mutex_lock(&pf->vfs.table_lock); in ice_prepare_for_reset()
559 mutex_unlock(&pf->vfs.table_lock); in ice_prepare_for_reset()
563 ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge); in ice_prepare_for_reset()
572 /* to be on safe side, reset orig_rss_size so that normal flow in ice_prepare_for_reset()
575 vsi->orig_rss_size = 0; in ice_prepare_for_reset()
577 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_prepare_for_reset()
579 vsi->old_ena_tc = vsi->all_enatc; in ice_prepare_for_reset()
580 vsi->old_numtc = vsi->all_numtc; in ice_prepare_for_reset()
584 /* for other reset type, do not support channel rebuild in ice_prepare_for_reset()
585 * hence reset needed info in ice_prepare_for_reset()
587 vsi->old_ena_tc = 0; in ice_prepare_for_reset()
588 vsi->all_enatc = 0; in ice_prepare_for_reset()
589 vsi->old_numtc = 0; in ice_prepare_for_reset()
590 vsi->all_numtc = 0; in ice_prepare_for_reset()
591 vsi->req_txq = 0; in ice_prepare_for_reset()
592 vsi->req_rxq = 0; in ice_prepare_for_reset()
593 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_prepare_for_reset()
594 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); in ice_prepare_for_reset()
598 if (vsi->netdev) in ice_prepare_for_reset()
599 netif_device_detach(vsi->netdev); in ice_prepare_for_reset()
605 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); in ice_prepare_for_reset()
608 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_prepare_for_reset()
614 if (hw->port_info) in ice_prepare_for_reset()
615 ice_sched_clear_port(hw->port_info); in ice_prepare_for_reset()
619 set_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_prepare_for_reset()
623 * ice_do_reset - Initiate one of many types of resets
625 * @reset_type: reset type requested before this function was called.
630 struct ice_hw *hw = &pf->hw; in ice_do_reset()
634 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { in ice_do_reset()
641 /* trigger the reset */ in ice_do_reset()
643 dev_err(dev, "reset %d failed\n", reset_type); in ice_do_reset()
644 set_bit(ICE_RESET_FAILED, pf->state); in ice_do_reset()
645 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_do_reset()
646 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
647 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
648 clear_bit(ICE_CORER_REQ, pf->state); in ice_do_reset()
649 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_do_reset()
650 wake_up(&pf->reset_wait_queue); in ice_do_reset()
655 * interrupt. So for PFR, rebuild after the reset and clear the reset- in ice_do_reset()
659 pf->pfr_count++; in ice_do_reset()
661 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
662 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
663 wake_up(&pf->reset_wait_queue); in ice_do_reset()
669 * ice_reset_subtask - Set up for resetting the device and driver
678 * of reset is pending and sets bits in pf->state indicating the reset in ice_reset_subtask()
680 * prepare for pending reset if not already (for PF software-initiated in ice_reset_subtask()
684 * for the reset now), poll for reset done, rebuild and return. in ice_reset_subtask()
686 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_reset_subtask()
687 /* Perform the largest reset requested */ in ice_reset_subtask()
688 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) in ice_reset_subtask()
690 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) in ice_reset_subtask()
692 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) in ice_reset_subtask()
694 /* return if no valid reset type requested */ in ice_reset_subtask()
700 if (ice_check_reset(&pf->hw)) { in ice_reset_subtask()
701 set_bit(ICE_RESET_FAILED, pf->state); in ice_reset_subtask()
703 /* done with reset. start rebuild */ in ice_reset_subtask()
704 pf->hw.reset_ongoing = false; in ice_reset_subtask()
709 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_reset_subtask()
710 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_reset_subtask()
711 clear_bit(ICE_PFR_REQ, pf->state); in ice_reset_subtask()
712 clear_bit(ICE_CORER_REQ, pf->state); in ice_reset_subtask()
713 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_reset_subtask()
714 wake_up(&pf->reset_wait_queue); in ice_reset_subtask()
722 if (test_bit(ICE_PFR_REQ, pf->state)) { in ice_reset_subtask()
724 if (pf->lag && pf->lag->bonded) { in ice_reset_subtask()
729 if (test_bit(ICE_CORER_REQ, pf->state)) in ice_reset_subtask()
731 if (test_bit(ICE_GLOBR_REQ, pf->state)) in ice_reset_subtask()
733 /* If no valid reset type requested just return */ in ice_reset_subtask()
737 /* reset if not already down or busy */ in ice_reset_subtask()
738 if (!test_bit(ICE_DOWN, pf->state) && in ice_reset_subtask()
739 !test_bit(ICE_CFG_BUSY, pf->state)) { in ice_reset_subtask()
745 * ice_print_topo_conflict - print topology conflict message
750 switch (vsi->port_info->phy.link_info.topo_media_conflict) { in ice_print_topo_conflict()
756 …netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not … in ice_print_topo_conflict()
759 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) in ice_print_topo_conflict()
760 …netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet … in ice_print_topo_conflict()
762 …netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was d… in ice_print_topo_conflict()
770 * ice_print_link_msg - print link up or down message
788 if (vsi->current_isup == isup) in ice_print_link_msg()
791 vsi->current_isup = isup; in ice_print_link_msg()
794 netdev_info(vsi->netdev, "NIC Link is Down\n"); in ice_print_link_msg()
798 switch (vsi->port_info->phy.link_info.link_speed) { in ice_print_link_msg()
837 switch (vsi->port_info->fc.current_mode) { in ice_print_link_msg()
856 switch (vsi->port_info->phy.link_info.fec_info) { in ice_print_link_msg()
859 fec = "RS-FEC"; in ice_print_link_msg()
862 fec = "FC-FEC/BASE-R"; in ice_print_link_msg()
870 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) in ice_print_link_msg()
883 status = ice_aq_get_phy_caps(vsi->port_info, false, in ice_print_link_msg()
886 netdev_info(vsi->netdev, "Get phy capability failed.\n"); in ice_print_link_msg()
890 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || in ice_print_link_msg()
891 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) in ice_print_link_msg()
892 fec_req = "RS-FEC"; in ice_print_link_msg()
893 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || in ice_print_link_msg()
894 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) in ice_print_link_msg()
895 fec_req = "FC-FEC/BASE-R"; in ice_print_link_msg()
902 …netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s,… in ice_print_link_msg()
908 * ice_vsi_link_event - update the VSI's netdev
917 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) in ice_vsi_link_event()
920 if (vsi->type == ICE_VSI_PF) { in ice_vsi_link_event()
921 if (link_up == netif_carrier_ok(vsi->netdev)) in ice_vsi_link_event()
925 netif_carrier_on(vsi->netdev); in ice_vsi_link_event()
926 netif_tx_wake_all_queues(vsi->netdev); in ice_vsi_link_event()
928 netif_carrier_off(vsi->netdev); in ice_vsi_link_event()
929 netif_tx_stop_all_queues(vsi->netdev); in ice_vsi_link_event()
935 * ice_set_dflt_mib - send a default config MIB to the FW
951 struct ice_hw *hw = &pf->hw; in ice_set_dflt_mib()
966 tlv->typelen = htons(typelen); in ice_set_dflt_mib()
969 tlv->ouisubtype = htonl(ouisubtype); in ice_set_dflt_mib()
971 buf = tlv->tlvinfo; in ice_set_dflt_mib()
974 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. in ice_set_dflt_mib()
975 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. in ice_set_dflt_mib()
976 * Octets 13 - 20 are TSA values - leave as zeros in ice_set_dflt_mib()
982 ((char *)tlv + sizeof(tlv->typelen) + len); in ice_set_dflt_mib()
985 buf = tlv->tlvinfo; in ice_set_dflt_mib()
986 tlv->typelen = htons(typelen); in ice_set_dflt_mib()
990 tlv->ouisubtype = htonl(ouisubtype); in ice_set_dflt_mib()
993 * Octets 1 - 4 map UP to TC - all UPs map to zero in ice_set_dflt_mib()
994 * Octets 5 - 12 are BW values - set TC 0 to 100%. in ice_set_dflt_mib()
995 * Octets 13 - 20 are TSA value - leave as zeros in ice_set_dflt_mib()
1000 ((char *)tlv + sizeof(tlv->typelen) + len); in ice_set_dflt_mib()
1005 tlv->typelen = htons(typelen); in ice_set_dflt_mib()
1009 tlv->ouisubtype = htonl(ouisubtype); in ice_set_dflt_mib()
1011 /* Octet 1 left as all zeros - PFC disabled */ in ice_set_dflt_mib()
1023 * ice_check_phy_fw_load - check if PHY FW load failed
1032 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1036 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) in ice_check_phy_fw_load()
1041 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1058 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1065 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) in ice_check_module_power()
1070 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1073 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1078 * ice_check_link_cfg_err - check if link configuration failed
1092 * ice_link_event - process the link event
1111 phy_info = &pi->phy; in ice_link_event()
1112 phy_info->link_info_old = phy_info->link_info; in ice_link_event()
1114 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); in ice_link_event()
1115 old_link_speed = phy_info->link_info_old.link_speed; in ice_link_event()
1117 /* update the link info structures and re-enable link events, in ice_link_event()
1123 pi->lport, status, in ice_link_event()
1124 libie_aq_str(pi->hw->adminq.sq_last_status)); in ice_link_event()
1126 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_link_event()
1131 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) in ice_link_event()
1135 if (!vsi || !vsi->port_info) in ice_link_event()
1136 return -EINVAL; in ice_link_event()
1139 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && in ice_link_event()
1140 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { in ice_link_event()
1141 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_link_event()
1150 pf->link_down_events++; in ice_link_event()
1155 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_link_event()
1170 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1178 if (test_bit(ICE_DOWN, pf->state) || in ice_watchdog_subtask()
1179 test_bit(ICE_CFG_BUSY, pf->state)) in ice_watchdog_subtask()
1184 pf->serv_tmr_prev + pf->serv_tmr_period)) in ice_watchdog_subtask()
1187 pf->serv_tmr_prev = jiffies; in ice_watchdog_subtask()
1194 if (pf->vsi[i] && pf->vsi[i]->netdev) in ice_watchdog_subtask()
1195 ice_update_vsi_stats(pf->vsi[i]); in ice_watchdog_subtask()
1199 * ice_init_link_events - enable/initialize link events
1202 * Returns -EIO on failure, 0 on success
1212 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { in ice_init_link_events()
1213 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", in ice_init_link_events()
1214 pi->lport); in ice_init_link_events()
1215 return -EIO; in ice_init_link_events()
1219 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", in ice_init_link_events()
1220 pi->lport); in ice_init_link_events()
1221 return -EIO; in ice_init_link_events()
1228 * ice_handle_link_event - handle link event via ARQ
1239 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; in ice_handle_link_event()
1240 port_info = pf->hw.port_info; in ice_handle_link_event()
1242 return -EINVAL; in ice_handle_link_event()
1245 !!(link_data->link_info & ICE_AQ_LINK_UP), in ice_handle_link_event()
1246 le16_to_cpu(link_data->link_speed)); in ice_handle_link_event()
1255 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1266 * To obtain only the descriptor contents, pass an task->event with null
1268 * task->event.msg_buf with enough space ahead of time.
1273 INIT_HLIST_NODE(&task->entry); in ice_aq_prep_for_event()
1274 task->opcode = opcode; in ice_aq_prep_for_event()
1275 task->state = ICE_AQ_TASK_WAITING; in ice_aq_prep_for_event()
1277 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1278 hlist_add_head(&task->entry, &pf->aq_wait_list); in ice_aq_prep_for_event()
1279 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1283 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1297 enum ice_aq_task_state *state = &task->state; in ice_aq_wait_for_event()
1303 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, in ice_aq_wait_for_event()
1309 err = -EINVAL; in ice_aq_wait_for_event()
1312 err = ret < 0 ? ret : -ETIMEDOUT; in ice_aq_wait_for_event()
1315 err = ret < 0 ? ret : -ECANCELED; in ice_aq_wait_for_event()
1322 err = -EINVAL; in ice_aq_wait_for_event()
1327 jiffies_to_msecs(jiffies - start), in ice_aq_wait_for_event()
1329 task->opcode); in ice_aq_wait_for_event()
1331 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1332 hlist_del(&task->entry); in ice_aq_wait_for_event()
1333 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1339 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1350 * Note that event->msg_buf will only be duplicated if the event has a buffer
1363 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1364 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { in ice_aq_check_events()
1365 if (task->state != ICE_AQ_TASK_WAITING) in ice_aq_check_events()
1367 if (task->opcode != opcode) in ice_aq_check_events()
1370 task_ev = &task->event; in ice_aq_check_events()
1371 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); in ice_aq_check_events()
1372 task_ev->msg_len = event->msg_len; in ice_aq_check_events()
1375 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { in ice_aq_check_events()
1376 memcpy(task_ev->msg_buf, event->msg_buf, in ice_aq_check_events()
1377 event->buf_len); in ice_aq_check_events()
1378 task_ev->buf_len = event->buf_len; in ice_aq_check_events()
1381 task->state = ICE_AQ_TASK_COMPLETE; in ice_aq_check_events()
1384 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1387 wake_up(&pf->aq_wait_queue); in ice_aq_check_events()
1391 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1395 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1401 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1402 hlist_for_each_entry(task, &pf->aq_wait_list, entry) in ice_aq_cancel_waiting_tasks()
1403 task->state = ICE_AQ_TASK_CANCELED; in ice_aq_cancel_waiting_tasks()
1404 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1406 wake_up(&pf->aq_wait_queue); in ice_aq_cancel_waiting_tasks()
1412 * __ice_clean_ctrlq - helper function to clean controlq rings
1420 struct ice_hw *hw = &pf->hw; in __ice_clean_ctrlq()
1426 /* Do not clean control queue if/when PF reset fails */ in __ice_clean_ctrlq()
1427 if (test_bit(ICE_RESET_FAILED, pf->state)) in __ice_clean_ctrlq()
1432 cq = &hw->adminq; in __ice_clean_ctrlq()
1436 cq = &hw->sbq; in __ice_clean_ctrlq()
1440 cq = &hw->mailboxq; in __ice_clean_ctrlq()
1445 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; in __ice_clean_ctrlq()
1452 /* check for error indications - PF_xx_AxQLEN register layout for in __ice_clean_ctrlq()
1455 val = rd32(hw, cq->rq.len); in __ice_clean_ctrlq()
1472 wr32(hw, cq->rq.len, val); in __ice_clean_ctrlq()
1475 val = rd32(hw, cq->sq.len); in __ice_clean_ctrlq()
1492 wr32(hw, cq->sq.len, val); in __ice_clean_ctrlq()
1495 event.buf_len = cq->rq_buf_size; in __ice_clean_ctrlq()
1506 if (ret == -EALREADY) in __ice_clean_ctrlq()
1532 u16 val = hw->mailboxq.num_rq_entries; in __ice_clean_ctrlq()
1544 libie_get_fwlog_data(&hw->fwlog, event.msg_buf, in __ice_clean_ctrlq()
1566 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1576 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); in ice_ctrlq_pending()
1577 return cq->rq.next_to_clean != ntu; in ice_ctrlq_pending()
1581 * ice_clean_adminq_subtask - clean the AdminQ rings
1586 struct ice_hw *hw = &pf->hw; in ice_clean_adminq_subtask()
1588 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_clean_adminq_subtask()
1594 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_clean_adminq_subtask()
1601 if (ice_ctrlq_pending(hw, &hw->adminq)) in ice_clean_adminq_subtask()
1608 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1613 struct ice_hw *hw = &pf->hw; in ice_clean_mailboxq_subtask()
1615 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) in ice_clean_mailboxq_subtask()
1621 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_clean_mailboxq_subtask()
1623 if (ice_ctrlq_pending(hw, &hw->mailboxq)) in ice_clean_mailboxq_subtask()
1630 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1635 struct ice_hw *hw = &pf->hw; in ice_clean_sbq_subtask()
1641 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1645 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) in ice_clean_sbq_subtask()
1651 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1653 if (ice_ctrlq_pending(hw, &hw->sbq)) in ice_clean_sbq_subtask()
1660 * ice_service_task_schedule - schedule the service task to wake up
1667 if (!test_bit(ICE_SERVICE_DIS, pf->state) && in ice_service_task_schedule()
1668 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && in ice_service_task_schedule()
1669 !test_bit(ICE_NEEDS_RESTART, pf->state)) in ice_service_task_schedule()
1670 queue_work(ice_wq, &pf->serv_task); in ice_service_task_schedule()
1674 * ice_service_task_complete - finish up the service task
1679 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); in ice_service_task_complete()
1681 /* force memory (pf->state) to sync before next service task */ in ice_service_task_complete()
1683 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_complete()
1687 * ice_service_task_stop - stop service task and cancel works
1697 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_stop()
1699 if (pf->serv_tmr.function) in ice_service_task_stop()
1700 timer_delete_sync(&pf->serv_tmr); in ice_service_task_stop()
1701 if (pf->serv_task.func) in ice_service_task_stop()
1702 cancel_work_sync(&pf->serv_task); in ice_service_task_stop()
1704 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_stop()
1709 * ice_service_task_restart - restart service task and schedule works
1716 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_restart()
1721 * ice_service_timer - timer callback to schedule service task
1728 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); in ice_service_timer()
1733 * ice_mdd_maybe_reset_vf - reset VF after MDD event
1740 * automatically reset the VF by enabling the private ethtool flag
1741 * mdd-auto-reset-vf.
1748 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) in ice_mdd_maybe_reset_vf()
1751 /* VF MDD event counters will be cleared by reset, so print the event in ice_mdd_maybe_reset_vf()
1752 * prior to reset. in ice_mdd_maybe_reset_vf()
1760 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", in ice_mdd_maybe_reset_vf()
1761 pf->hw.pf_id, vf->vf_id); in ice_mdd_maybe_reset_vf()
1766 * ice_handle_mdd_event - handle malicious driver detect event
1772 * disable the queue, the PF can be configured to reset the VF using ethtool
1773 * private flag mdd-auto-reset-vf.
1778 struct ice_hw *hw = &pf->hw; in ice_handle_mdd_event()
1783 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { in ice_handle_mdd_event()
1862 mutex_lock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1866 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); in ice_handle_mdd_event()
1868 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1869 vf->mdd_tx_events.count++; in ice_handle_mdd_event()
1870 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1873 vf->vf_id); in ice_handle_mdd_event()
1878 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); in ice_handle_mdd_event()
1880 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1881 vf->mdd_tx_events.count++; in ice_handle_mdd_event()
1882 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1885 vf->vf_id); in ice_handle_mdd_event()
1890 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); in ice_handle_mdd_event()
1892 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1893 vf->mdd_tx_events.count++; in ice_handle_mdd_event()
1894 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1897 vf->vf_id); in ice_handle_mdd_event()
1902 reg = rd32(hw, VP_MDET_RX(vf->vf_id)); in ice_handle_mdd_event()
1904 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1905 vf->mdd_rx_events.count++; in ice_handle_mdd_event()
1906 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1909 vf->vf_id); in ice_handle_mdd_event()
1918 mutex_unlock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1924 * ice_force_phys_link_state - Force the physical link state
1943 if (!vsi || !vsi->port_info || !vsi->back) in ice_force_phys_link_state()
1944 return -EINVAL; in ice_force_phys_link_state()
1945 if (vsi->type != ICE_VSI_PF) in ice_force_phys_link_state()
1948 dev = ice_pf_to_dev(vsi->back); in ice_force_phys_link_state()
1950 pi = vsi->port_info; in ice_force_phys_link_state()
1954 return -ENOMEM; in ice_force_phys_link_state()
1960 vsi->vsi_num, retcode); in ice_force_phys_link_state()
1961 retcode = -EIO; in ice_force_phys_link_state()
1966 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && in ice_force_phys_link_state()
1967 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) in ice_force_phys_link_state()
1974 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); in ice_force_phys_link_state()
1976 retcode = -ENOMEM; in ice_force_phys_link_state()
1980 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; in ice_force_phys_link_state()
1982 cfg->caps |= ICE_AQ_PHY_ENA_LINK; in ice_force_phys_link_state()
1984 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; in ice_force_phys_link_state()
1986 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); in ice_force_phys_link_state()
1989 vsi->vsi_num, retcode); in ice_force_phys_link_state()
1990 retcode = -EIO; in ice_force_phys_link_state()
2000 * ice_init_nvm_phy_type - Initialize the NVM PHY type
2008 struct ice_pf *pf = pi->hw->back; in ice_init_nvm_phy_type()
2013 return -ENOMEM; in ice_init_nvm_phy_type()
2023 pf->nvm_phy_type_hi = pcaps->phy_type_high; in ice_init_nvm_phy_type()
2024 pf->nvm_phy_type_lo = pcaps->phy_type_low; in ice_init_nvm_phy_type()
2032 * ice_init_link_dflt_override - Initialize link default override
2040 struct ice_pf *pf = pi->hw->back; in ice_init_link_dflt_override()
2042 ldo = &pf->link_dflt_override; in ice_init_link_dflt_override()
2046 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) in ice_init_link_dflt_override()
2049 /* Enable Total Port Shutdown (override/replace link-down-on-close in ice_init_link_dflt_override()
2052 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); in ice_init_link_dflt_override()
2053 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); in ice_init_link_dflt_override()
2057 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2077 struct ice_phy_info *phy = &pi->phy; in ice_init_phy_cfg_dflt_override()
2078 struct ice_pf *pf = pi->hw->back; in ice_init_phy_cfg_dflt_override()
2080 ldo = &pf->link_dflt_override; in ice_init_phy_cfg_dflt_override()
2085 cfg = &phy->curr_user_phy_cfg; in ice_init_phy_cfg_dflt_override()
2087 if (ldo->phy_type_low || ldo->phy_type_high) { in ice_init_phy_cfg_dflt_override()
2088 cfg->phy_type_low = pf->nvm_phy_type_lo & in ice_init_phy_cfg_dflt_override()
2089 cpu_to_le64(ldo->phy_type_low); in ice_init_phy_cfg_dflt_override()
2090 cfg->phy_type_high = pf->nvm_phy_type_hi & in ice_init_phy_cfg_dflt_override()
2091 cpu_to_le64(ldo->phy_type_high); in ice_init_phy_cfg_dflt_override()
2093 cfg->link_fec_opt = ldo->fec_options; in ice_init_phy_cfg_dflt_override()
2094 phy->curr_user_fec_req = ICE_FEC_AUTO; in ice_init_phy_cfg_dflt_override()
2096 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); in ice_init_phy_cfg_dflt_override()
2100 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2116 struct ice_phy_info *phy = &pi->phy; in ice_init_phy_user_cfg()
2117 struct ice_pf *pf = pi->hw->back; in ice_init_phy_user_cfg()
2120 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) in ice_init_phy_user_cfg()
2121 return -EIO; in ice_init_phy_user_cfg()
2125 return -ENOMEM; in ice_init_phy_user_cfg()
2127 if (ice_fw_supports_report_dflt_cfg(pi->hw)) in ice_init_phy_user_cfg()
2138 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); in ice_init_phy_user_cfg()
2141 if (ice_fw_supports_link_override(pi->hw) && in ice_init_phy_user_cfg()
2142 !(pcaps->module_compliance_enforcement & in ice_init_phy_user_cfg()
2144 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); in ice_init_phy_user_cfg()
2150 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && in ice_init_phy_user_cfg()
2151 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { in ice_init_phy_user_cfg()
2160 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, in ice_init_phy_user_cfg()
2161 pcaps->link_fec_options); in ice_init_phy_user_cfg()
2162 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); in ice_init_phy_user_cfg()
2165 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; in ice_init_phy_user_cfg()
2166 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); in ice_init_phy_user_cfg()
2173 * ice_configure_phy - configure PHY
2182 struct device *dev = ice_pf_to_dev(vsi->back); in ice_configure_phy()
2183 struct ice_port_info *pi = vsi->port_info; in ice_configure_phy()
2186 struct ice_phy_info *phy = &pi->phy; in ice_configure_phy()
2187 struct ice_pf *pf = vsi->back; in ice_configure_phy()
2191 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) in ice_configure_phy()
2192 return -ENOMEDIUM; in ice_configure_phy()
2196 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && in ice_configure_phy()
2197 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) in ice_configure_phy()
2198 return -EPERM; in ice_configure_phy()
2200 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) in ice_configure_phy()
2205 return -ENOMEM; in ice_configure_phy()
2212 vsi->vsi_num, err); in ice_configure_phy()
2219 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && in ice_configure_phy()
2220 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) in ice_configure_phy()
2225 if (ice_fw_supports_report_dflt_cfg(pi->hw)) in ice_configure_phy()
2233 vsi->vsi_num, err); in ice_configure_phy()
2239 err = -ENOMEM; in ice_configure_phy()
2245 /* Speed - If default override pending, use curr_user_phy_cfg set in in ice_configure_phy()
2249 vsi->back->state)) { in ice_configure_phy()
2250 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; in ice_configure_phy()
2251 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; in ice_configure_phy()
2256 pi->phy.curr_user_speed_req); in ice_configure_phy()
2257 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); in ice_configure_phy()
2258 cfg->phy_type_high = pcaps->phy_type_high & in ice_configure_phy()
2263 if (!cfg->phy_type_low && !cfg->phy_type_high) { in ice_configure_phy()
2264 cfg->phy_type_low = pcaps->phy_type_low; in ice_configure_phy()
2265 cfg->phy_type_high = pcaps->phy_type_high; in ice_configure_phy()
2269 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); in ice_configure_phy()
2272 if (cfg->link_fec_opt != in ice_configure_phy()
2273 (cfg->link_fec_opt & pcaps->link_fec_options)) { in ice_configure_phy()
2274 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; in ice_configure_phy()
2275 cfg->link_fec_opt = pcaps->link_fec_options; in ice_configure_phy()
2278 /* Flow Control - always supported; no need to check against in ice_configure_phy()
2281 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); in ice_configure_phy()
2284 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; in ice_configure_phy()
2286 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); in ice_configure_phy()
2289 vsi->vsi_num, err); in ice_configure_phy()
2298 * ice_check_media_subtask - Check for media
2311 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) in ice_check_media_subtask()
2319 pi = vsi->port_info; in ice_check_media_subtask()
2324 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_check_media_subtask()
2326 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { in ice_check_media_subtask()
2327 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) in ice_check_media_subtask()
2330 /* PHY settings are reset on media insertion, reconfigure in ice_check_media_subtask()
2333 if (test_bit(ICE_VSI_DOWN, vsi->state) && in ice_check_media_subtask()
2334 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) in ice_check_media_subtask()
2339 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_check_media_subtask()
2351 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_service_task_recovery_mode()
2356 mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100)); in ice_service_task_recovery_mode()
2360 * ice_service_task - manage and run subtasks
2368 if (pf->health_reporters.tx_hang_buf.tx_ring) { in ice_service_task()
2370 pf->health_reporters.tx_hang_buf.tx_ring = NULL; in ice_service_task()
2375 /* bail if a reset/recovery cycle is pending or rebuild failed */ in ice_service_task()
2376 if (ice_is_reset_in_progress(pf->state) || in ice_service_task()
2377 test_bit(ICE_SUSPENDED, pf->state) || in ice_service_task()
2378 test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_service_task()
2383 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { in ice_service_task()
2388 set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type); in ice_service_task()
2390 swap(event->reg, pf->oicr_err_reg); in ice_service_task()
2399 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) in ice_service_task()
2403 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) in ice_service_task()
2406 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { in ice_service_task()
2411 set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type); in ice_service_task()
2439 * or there is more work to be done, reset the service timer to in ice_service_task()
2442 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || in ice_service_task()
2443 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || in ice_service_task()
2444 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_service_task()
2445 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || in ice_service_task()
2446 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || in ice_service_task()
2447 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || in ice_service_task()
2448 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_service_task()
2449 mod_timer(&pf->serv_tmr, jiffies); in ice_service_task()
2453 * ice_set_ctrlq_len - helper function to set controlq length
2458 hw->adminq.num_rq_entries = ICE_AQ_LEN; in ice_set_ctrlq_len()
2459 hw->adminq.num_sq_entries = ICE_AQ_LEN; in ice_set_ctrlq_len()
2460 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2461 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2462 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; in ice_set_ctrlq_len()
2463 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; in ice_set_ctrlq_len()
2464 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2465 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2466 hw->sbq.num_rq_entries = ICE_SBQ_LEN; in ice_set_ctrlq_len()
2467 hw->sbq.num_sq_entries = ICE_SBQ_LEN; in ice_set_ctrlq_len()
2468 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2469 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2473 * ice_schedule_reset - schedule a reset
2475 * @reset: reset being requested
2477 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) in ice_schedule_reset() argument
2481 /* bail out if earlier reset has failed */ in ice_schedule_reset()
2482 if (test_bit(ICE_RESET_FAILED, pf->state)) { in ice_schedule_reset()
2483 dev_dbg(dev, "earlier reset has failed\n"); in ice_schedule_reset()
2484 return -EIO; in ice_schedule_reset()
2486 /* bail if reset/recovery already in progress */ in ice_schedule_reset()
2487 if (ice_is_reset_in_progress(pf->state)) { in ice_schedule_reset()
2488 dev_dbg(dev, "Reset already in progress\n"); in ice_schedule_reset()
2489 return -EBUSY; in ice_schedule_reset()
2492 switch (reset) { in ice_schedule_reset()
2494 set_bit(ICE_PFR_REQ, pf->state); in ice_schedule_reset()
2497 set_bit(ICE_CORER_REQ, pf->state); in ice_schedule_reset()
2500 set_bit(ICE_GLOBR_REQ, pf->state); in ice_schedule_reset()
2503 return -EINVAL; in ice_schedule_reset()
2511 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2516 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_ena_irq()
2520 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); in ice_vsi_ena_irq()
2527 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2533 int q_vectors = vsi->num_q_vectors; in ice_vsi_req_irq_msix()
2534 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix()
2543 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; in ice_vsi_req_irq_msix()
2545 irq_num = q_vector->irq.virq; in ice_vsi_req_irq_msix()
2547 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { in ice_vsi_req_irq_msix()
2548 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2549 "%s-%s-%d", basename, "TxRx", rx_int_idx++); in ice_vsi_req_irq_msix()
2551 } else if (q_vector->rx.rx_ring) { in ice_vsi_req_irq_msix()
2552 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2553 "%s-%s-%d", basename, "rx", rx_int_idx++); in ice_vsi_req_irq_msix()
2554 } else if (q_vector->tx.tx_ring) { in ice_vsi_req_irq_msix()
2555 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2556 "%s-%s-%d", basename, "tx", tx_int_idx++); in ice_vsi_req_irq_msix()
2561 if (vsi->type == ICE_VSI_CTRL && vsi->vf) in ice_vsi_req_irq_msix()
2562 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2563 IRQF_SHARED, q_vector->name, in ice_vsi_req_irq_msix()
2566 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2567 0, q_vector->name, q_vector); in ice_vsi_req_irq_msix()
2569 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", in ice_vsi_req_irq_msix()
2577 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", in ice_vsi_req_irq_msix()
2578 vsi->vsi_num, ERR_PTR(err)); in ice_vsi_req_irq_msix()
2582 vsi->irqs_ready = true; in ice_vsi_req_irq_msix()
2586 while (vector--) { in ice_vsi_req_irq_msix()
2587 irq_num = vsi->q_vectors[vector]->irq.virq; in ice_vsi_req_irq_msix()
2588 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); in ice_vsi_req_irq_msix()
2594 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2601 struct device *dev = ice_pf_to_dev(vsi->back); in ice_xdp_alloc_setup_rings()
2606 u16 xdp_q_idx = vsi->alloc_txq + i; in ice_xdp_alloc_setup_rings()
2620 xdp_ring->ring_stats = ring_stats; in ice_xdp_alloc_setup_rings()
2621 xdp_ring->q_index = xdp_q_idx; in ice_xdp_alloc_setup_rings()
2622 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; in ice_xdp_alloc_setup_rings()
2623 xdp_ring->vsi = vsi; in ice_xdp_alloc_setup_rings()
2624 xdp_ring->netdev = NULL; in ice_xdp_alloc_setup_rings()
2625 xdp_ring->dev = dev; in ice_xdp_alloc_setup_rings()
2626 xdp_ring->count = vsi->num_tx_desc; in ice_xdp_alloc_setup_rings()
2627 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); in ice_xdp_alloc_setup_rings()
2631 spin_lock_init(&xdp_ring->tx_lock); in ice_xdp_alloc_setup_rings()
2632 for (j = 0; j < xdp_ring->count; j++) { in ice_xdp_alloc_setup_rings()
2634 tx_desc->cmd_type_offset_bsz = 0; in ice_xdp_alloc_setup_rings()
2641 for (; i >= 0; i--) { in ice_xdp_alloc_setup_rings()
2642 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { in ice_xdp_alloc_setup_rings()
2643 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); in ice_xdp_alloc_setup_rings()
2644 vsi->xdp_rings[i]->ring_stats = NULL; in ice_xdp_alloc_setup_rings()
2645 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_xdp_alloc_setup_rings()
2648 return -ENOMEM; in ice_xdp_alloc_setup_rings()
2652 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2661 old_prog = xchg(&vsi->xdp_prog, prog); in ice_vsi_assign_bpf_prog()
2663 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in ice_vsi_assign_bpf_prog()
2675 return vsi->xdp_rings[qid % vsi->num_xdp_txq]; in ice_xdp_ring_from_qid()
2677 q_vector = vsi->rx_rings[qid]->q_vector; in ice_xdp_ring_from_qid()
2678 ice_for_each_tx_ring(ring, q_vector->tx) in ice_xdp_ring_from_qid()
2686 * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2694 int xdp_rings_rem = vsi->num_xdp_txq; in ice_map_xdp_rings()
2699 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_map_xdp_rings()
2703 vsi->num_q_vectors - v_idx); in ice_map_xdp_rings()
2704 q_base = vsi->num_xdp_txq - xdp_rings_rem; in ice_map_xdp_rings()
2707 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; in ice_map_xdp_rings()
2709 xdp_ring->q_vector = q_vector; in ice_map_xdp_rings()
2710 xdp_ring->next = q_vector->tx.tx_ring; in ice_map_xdp_rings()
2711 q_vector->tx.tx_ring = xdp_ring; in ice_map_xdp_rings()
2713 xdp_rings_rem -= xdp_rings_per_v; in ice_map_xdp_rings()
2717 vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, in ice_map_xdp_rings()
2724 * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
2732 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_unmap_xdp_rings()
2735 ice_for_each_tx_ring(ring, q_vector->tx) in ice_unmap_xdp_rings()
2736 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) in ice_unmap_xdp_rings()
2740 q_vector->tx.tx_ring = ring; in ice_unmap_xdp_rings()
2745 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2756 struct ice_pf *pf = vsi->back; in ice_prepare_xdp_rings()
2758 .qs_mutex = &pf->avail_q_mutex, in ice_prepare_xdp_rings()
2759 .pf_map = pf->avail_txqs, in ice_prepare_xdp_rings()
2760 .pf_map_size = pf->max_pf_txqs, in ice_prepare_xdp_rings()
2761 .q_count = vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2763 .vsi_map = vsi->txq_map, in ice_prepare_xdp_rings()
2764 .vsi_map_offset = vsi->alloc_txq, in ice_prepare_xdp_rings()
2771 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2772 sizeof(*vsi->xdp_rings), GFP_KERNEL); in ice_prepare_xdp_rings()
2773 if (!vsi->xdp_rings) in ice_prepare_xdp_rings()
2774 return -ENOMEM; in ice_prepare_xdp_rings()
2776 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; in ice_prepare_xdp_rings()
2781 netdev_warn(vsi->netdev, in ice_prepare_xdp_rings()
2787 /* omit the scheduler update if in reset path; XDP queues will be in ice_prepare_xdp_rings()
2799 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_prepare_xdp_rings()
2800 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; in ice_prepare_xdp_rings()
2802 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_prepare_xdp_rings()
2811 * this flow is a subject of both ethtool -L and ndo_bpf flows; in ice_prepare_xdp_rings()
2812 * VSI rebuild that happens under ethtool -L can expose us to in ice_prepare_xdp_rings()
2814 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put in ice_prepare_xdp_rings()
2827 if (vsi->xdp_rings[i]) { in ice_prepare_xdp_rings()
2828 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_prepare_xdp_rings()
2829 vsi->xdp_rings[i] = NULL; in ice_prepare_xdp_rings()
2833 mutex_lock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2835 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_prepare_xdp_rings()
2836 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_prepare_xdp_rings()
2838 mutex_unlock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2840 devm_kfree(dev, vsi->xdp_rings); in ice_prepare_xdp_rings()
2841 vsi->xdp_rings = NULL; in ice_prepare_xdp_rings()
2843 return -ENOMEM; in ice_prepare_xdp_rings()
2847 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2857 struct ice_pf *pf = vsi->back; in ice_destroy_xdp_rings()
2860 /* q_vectors are freed in reset path so there's no point in detaching in ice_destroy_xdp_rings()
2869 mutex_lock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2871 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_destroy_xdp_rings()
2872 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_destroy_xdp_rings()
2874 mutex_unlock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2877 if (vsi->xdp_rings[i]) { in ice_destroy_xdp_rings()
2878 if (vsi->xdp_rings[i]->desc) { in ice_destroy_xdp_rings()
2880 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_destroy_xdp_rings()
2882 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); in ice_destroy_xdp_rings()
2883 vsi->xdp_rings[i]->ring_stats = NULL; in ice_destroy_xdp_rings()
2884 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_destroy_xdp_rings()
2885 vsi->xdp_rings[i] = NULL; in ice_destroy_xdp_rings()
2888 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); in ice_destroy_xdp_rings()
2889 vsi->xdp_rings = NULL; in ice_destroy_xdp_rings()
2902 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_destroy_xdp_rings()
2903 max_txqs[i] = vsi->num_txq; in ice_destroy_xdp_rings()
2906 vsi->num_xdp_txq = 0; in ice_destroy_xdp_rings()
2908 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_destroy_xdp_rings()
2913 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2921 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; in ice_vsi_rx_napi_schedule()
2923 if (READ_ONCE(rx_ring->xsk_pool)) in ice_vsi_rx_napi_schedule()
2924 napi_schedule(&rx_ring->q_vector->napi); in ice_vsi_rx_napi_schedule()
2929 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2933 * -ENOMEM otherwise
2937 u16 avail = ice_get_avail_txq_count(vsi->back); in ice_vsi_determine_xdp_res()
2941 return -ENOMEM; in ice_vsi_determine_xdp_res()
2943 if (vsi->type == ICE_VSI_SF) in ice_vsi_determine_xdp_res()
2944 avail = vsi->alloc_txq; in ice_vsi_determine_xdp_res()
2946 vsi->num_xdp_txq = min_t(u16, avail, cpus); in ice_vsi_determine_xdp_res()
2948 if (vsi->num_xdp_txq < cpus) in ice_vsi_determine_xdp_res()
2955 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2960 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) in ice_max_xdp_frame_size()
2967 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2976 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; in ice_xdp_setup_prog()
2980 if (prog && !prog->aux->xdp_has_frags) { in ice_xdp_setup_prog()
2984 return -EOPNOTSUPP; in ice_xdp_setup_prog()
2990 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { in ice_xdp_setup_prog()
2995 if_running = netif_running(vsi->netdev) && in ice_xdp_setup_prog()
2996 !test_and_set_bit(ICE_VSI_DOWN, vsi->state); in ice_xdp_setup_prog()
3020 xdp_features_set_redirect_target(vsi->netdev, true); in ice_xdp_setup_prog()
3021 /* reallocate Rx queues that are used for zero-copy */ in ice_xdp_setup_prog()
3026 xdp_features_clear_redirect_target(vsi->netdev); in ice_xdp_setup_prog()
3030 /* reallocate Rx queues that were used for zero-copy */ in ice_xdp_setup_prog()
3043 return (ret || xdp_ring_err) ? -ENOMEM : 0; in ice_xdp_setup_prog()
3047 * ice_xdp_safe_mode - XDP handler for safe mode
3054 NL_SET_ERR_MSG_MOD(xdp->extack, in ice_xdp_safe_mode()
3057 return -EOPNOTSUPP; in ice_xdp_safe_mode()
3061 * ice_xdp - implements XDP handler
3068 struct ice_vsi *vsi = np->vsi; in ice_xdp()
3071 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) { in ice_xdp()
3072 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI"); in ice_xdp()
3073 return -EINVAL; in ice_xdp()
3076 mutex_lock(&vsi->xdp_state_lock); in ice_xdp()
3078 switch (xdp->command) { in ice_xdp()
3080 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); in ice_xdp()
3083 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); in ice_xdp()
3086 ret = -EINVAL; in ice_xdp()
3089 mutex_unlock(&vsi->xdp_state_lock); in ice_xdp()
3094 * ice_ena_misc_vector - enable the non-queue interrupts
3099 struct ice_hw *hw = &pf->hw; in ice_ena_misc_vector()
3103 /* Disable anti-spoof detection interrupt to prevent spurious event in ice_ena_misc_vector()
3104 * interrupts during a function reset. Anti-spoof functionally is in ice_ena_misc_vector()
3127 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), in ice_ena_misc_vector()
3130 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_ena_misc_vector()
3133 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ena_misc_vector()
3138 * ice_ll_ts_intr - ll_ts interrupt handler
3152 hw = &pf->hw; in ice_ll_ts_intr()
3153 tx = &pf->ptp.port.tx; in ice_ll_ts_intr()
3154 spin_lock_irqsave(&tx->lock, flags); in ice_ll_ts_intr()
3155 if (tx->init) { in ice_ll_ts_intr()
3158 idx = find_next_bit_wrap(tx->in_use, tx->len, in ice_ll_ts_intr()
3159 tx->last_ll_ts_idx_read + 1); in ice_ll_ts_intr()
3160 if (idx != tx->len) in ice_ll_ts_intr()
3163 spin_unlock_irqrestore(&tx->lock, flags); in ice_ll_ts_intr()
3168 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ll_ts_intr()
3175 * ice_misc_intr - misc interrupt handler
3183 struct ice_hw *hw = &pf->hw; in ice_misc_intr()
3188 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3189 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3190 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3197 pf->sw_int_count++; in ice_misc_intr()
3202 set_bit(ICE_MDD_EVENT_PENDING, pf->state); in ice_misc_intr()
3206 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { in ice_misc_intr()
3213 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); in ice_misc_intr()
3218 u32 reset; in ice_misc_intr() local
3220 /* we have a reset warning */ in ice_misc_intr()
3222 reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M, in ice_misc_intr()
3225 if (reset == ICE_RESET_CORER) in ice_misc_intr()
3226 pf->corer_count++; in ice_misc_intr()
3227 else if (reset == ICE_RESET_GLOBR) in ice_misc_intr()
3228 pf->globr_count++; in ice_misc_intr()
3229 else if (reset == ICE_RESET_EMPR) in ice_misc_intr()
3230 pf->empr_count++; in ice_misc_intr()
3232 dev_dbg(dev, "Invalid reset type %d\n", reset); in ice_misc_intr()
3234 /* If a reset cycle isn't already in progress, we set a bit in in ice_misc_intr()
3235 * pf->state so that the service task can start a reset/rebuild. in ice_misc_intr()
3237 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_misc_intr()
3238 if (reset == ICE_RESET_CORER) in ice_misc_intr()
3239 set_bit(ICE_CORER_RECV, pf->state); in ice_misc_intr()
3240 else if (reset == ICE_RESET_GLOBR) in ice_misc_intr()
3241 set_bit(ICE_GLOBR_RECV, pf->state); in ice_misc_intr()
3243 set_bit(ICE_EMPR_RECV, pf->state); in ice_misc_intr()
3246 * hw->reset_ongoing indicates whether the hardware is in ice_misc_intr()
3247 * in reset. This is set to true when a reset interrupt in ice_misc_intr()
3249 * has determined that the hardware is out of reset. in ice_misc_intr()
3251 * ICE_RESET_OICR_RECV in pf->state indicates in ice_misc_intr()
3252 * that a post reset rebuild is required before the in ice_misc_intr()
3255 * As this is the start of the reset/rebuild cycle, set in ice_misc_intr()
3258 hw->reset_ongoing = true; in ice_misc_intr()
3269 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; in ice_misc_intr()
3276 pf->ptp.ext_ts_irq |= gltsyn_stat & in ice_misc_intr()
3287 pf->oicr_err_reg |= oicr; in ice_misc_intr()
3288 set_bit(ICE_AUX_ERR_PENDING, pf->state); in ice_misc_intr()
3297 * reset the device. in ice_misc_intr()
3301 set_bit(ICE_PFR_REQ, pf->state); in ice_misc_intr()
3312 * ice_misc_intr_thread_fn - misc interrupt thread function
3321 hw = &pf->hw; in ice_misc_intr_thread_fn()
3323 if (ice_is_reset_in_progress(pf->state)) in ice_misc_intr_thread_fn()
3326 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { in ice_misc_intr_thread_fn()
3328 * re-arm the interrupt to trigger again. in ice_misc_intr_thread_fn()
3343 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3367 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3372 int irq_num = pf->ll_ts_irq.virq; in ice_free_irq_msix_ll_ts()
3377 ice_free_irq(pf, pf->ll_ts_irq); in ice_free_irq_msix_ll_ts()
3381 * ice_free_irq_msix_misc - Unroll misc vector setup
3386 int misc_irq_num = pf->oicr_irq.virq; in ice_free_irq_msix_misc()
3387 struct ice_hw *hw = &pf->hw; in ice_free_irq_msix_misc()
3398 ice_free_irq(pf, pf->oicr_irq); in ice_free_irq_msix_misc()
3399 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_free_irq_msix_misc()
3404 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3426 if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) { in ice_ena_ctrlq_interrupts()
3437 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3441 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3447 struct ice_hw *hw = &pf->hw; in ice_req_irq_msix_misc()
3452 if (!pf->int_name[0]) in ice_req_irq_msix_misc()
3453 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", in ice_req_irq_msix_misc()
3456 if (!pf->int_name_ll_ts[0]) in ice_req_irq_msix_misc()
3457 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, in ice_req_irq_msix_misc()
3458 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev)); in ice_req_irq_msix_misc()
3460 * lost during reset. Note that this function is called only during in ice_req_irq_msix_misc()
3461 * rebuild path and not while reset is in progress. in ice_req_irq_msix_misc()
3463 if (ice_is_reset_in_progress(pf->state)) in ice_req_irq_msix_misc()
3471 pf->oicr_irq = irq; in ice_req_irq_msix_misc()
3472 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, in ice_req_irq_msix_misc()
3474 pf->int_name, pf); in ice_req_irq_msix_misc()
3477 pf->int_name, err); in ice_req_irq_msix_misc()
3478 ice_free_irq(pf, pf->oicr_irq); in ice_req_irq_msix_misc()
3483 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3490 pf->ll_ts_irq = irq; in ice_req_irq_msix_misc()
3491 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, in ice_req_irq_msix_misc()
3492 pf->int_name_ll_ts, pf); in ice_req_irq_msix_misc()
3495 pf->int_name_ll_ts, err); in ice_req_irq_msix_misc()
3496 ice_free_irq(pf, pf->ll_ts_irq); in ice_req_irq_msix_misc()
3503 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); in ice_req_irq_msix_misc()
3506 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3508 ((pf->ll_ts_irq.index + pf_intr_start_offset) & in ice_req_irq_msix_misc()
3510 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), in ice_req_irq_msix_misc()
3520 * ice_set_ops - set netdev and ethtools ops for the given netdev
3525 struct net_device *netdev = vsi->netdev; in ice_set_ops()
3529 netdev->netdev_ops = &ice_netdev_safe_mode_ops; in ice_set_ops()
3534 netdev->netdev_ops = &ice_netdev_ops; in ice_set_ops()
3535 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; in ice_set_ops()
3536 netdev->xdp_metadata_ops = &ice_xdp_md_ops; in ice_set_ops()
3539 if (vsi->type != ICE_VSI_PF) in ice_set_ops()
3542 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in ice_set_ops()
3545 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; in ice_set_ops()
3549 * ice_set_netdev_features - set features for the given netdev
3555 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); in ice_set_netdev_features()
3563 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; in ice_set_netdev_features()
3564 netdev->hw_features = netdev->features; in ice_set_netdev_features()
3598 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | in ice_set_netdev_features()
3601 netdev->hw_features = dflt_features | csumo_features | in ice_set_netdev_features()
3605 netdev->mpls_features = NETIF_F_HW_CSUM | in ice_set_netdev_features()
3610 netdev->features |= netdev->hw_features; in ice_set_netdev_features()
3612 netdev->hw_features |= NETIF_F_HW_TC; in ice_set_netdev_features()
3613 netdev->hw_features |= NETIF_F_LOOPBACK; in ice_set_netdev_features()
3616 netdev->hw_enc_features |= dflt_features | csumo_features | in ice_set_netdev_features()
3618 netdev->vlan_features |= dflt_features | csumo_features | in ice_set_netdev_features()
3627 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | in ice_set_netdev_features()
3633 netdev->hw_features |= NETIF_F_RXFCS; in ice_set_netdev_features()
3642 netdev->hw_features |= NETIF_F_HW_CSUM; in ice_set_netdev_features()
3648 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3662 * ice_pf_vsi_setup - Set up a PF VSI
3696 * ice_ctrl_vsi_setup - Set up a control VSI
3716 * ice_lb_vsi_setup - Set up a loopback VSI
3736 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3747 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_add_vid()
3751 /* VLAN 0 is added by default during load/reset */ in ice_vlan_rx_add_vid()
3755 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_add_vid()
3759 * all-multicast is currently enabled. in ice_vlan_rx_add_vid()
3761 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_add_vid()
3762 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3775 ret = vlan_ops->add_vlan(vsi, &vlan); in ice_vlan_rx_add_vid()
3779 /* If all-multicast is currently enabled and this VLAN ID is only one in ice_vlan_rx_add_vid()
3780 * besides VLAN-0 we have to update look-up type of multicast promisc in ice_vlan_rx_add_vid()
3781 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. in ice_vlan_rx_add_vid()
3783 if ((vsi->current_netdev_flags & IFF_ALLMULTI) && in ice_vlan_rx_add_vid()
3785 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3787 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3792 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_add_vid()
3798 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3809 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_kill_vid()
3817 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_kill_vid()
3820 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3824 vsi->vsi_num); in ice_vlan_rx_kill_vid()
3825 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vlan_rx_kill_vid()
3834 ret = vlan_ops->del_vlan(vsi, &vlan); in ice_vlan_rx_kill_vid()
3839 * all-multicast is enabled. in ice_vlan_rx_kill_vid()
3841 if (vsi->current_netdev_flags & IFF_ALLMULTI) in ice_vlan_rx_kill_vid()
3842 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3846 /* Update look-up type of multicast promisc rule for VLAN 0 in ice_vlan_rx_kill_vid()
3848 * all-multicast is enabled and VLAN 0 is the only VLAN rule. in ice_vlan_rx_kill_vid()
3850 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_kill_vid()
3851 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3854 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3860 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_kill_vid()
3873 list_del(&indr_priv->list); in ice_rep_indr_tc_block_unbind()
3878 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3883 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); in ice_tc_indir_block_unregister()
3890 * ice_tc_indir_block_register - Register TC indirect block notifications
3899 if (!vsi || !vsi->netdev) in ice_tc_indir_block_register()
3900 return -EINVAL; in ice_tc_indir_block_register()
3902 np = netdev_priv(vsi->netdev); in ice_tc_indir_block_register()
3904 INIT_LIST_HEAD(&np->tc_indr_block_priv_list); in ice_tc_indir_block_register()
3909 * ice_get_avail_q_count - Get count of queues in use
3929 * ice_get_avail_txq_count - Get count of Tx queues in use
3934 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, in ice_get_avail_txq_count()
3935 pf->max_pf_txqs); in ice_get_avail_txq_count()
3939 * ice_get_avail_rxq_count - Get count of Rx queues in use
3944 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, in ice_get_avail_rxq_count()
3945 pf->max_pf_rxqs); in ice_get_avail_rxq_count()
3949 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3955 mutex_destroy(&pf->lag_mutex); in ice_deinit_pf()
3956 mutex_destroy(&pf->adev_mutex); in ice_deinit_pf()
3957 mutex_destroy(&pf->sw_mutex); in ice_deinit_pf()
3958 mutex_destroy(&pf->tc_mutex); in ice_deinit_pf()
3959 mutex_destroy(&pf->avail_q_mutex); in ice_deinit_pf()
3960 mutex_destroy(&pf->vfs.table_lock); in ice_deinit_pf()
3962 if (pf->avail_txqs) { in ice_deinit_pf()
3963 bitmap_free(pf->avail_txqs); in ice_deinit_pf()
3964 pf->avail_txqs = NULL; in ice_deinit_pf()
3967 if (pf->avail_rxqs) { in ice_deinit_pf()
3968 bitmap_free(pf->avail_rxqs); in ice_deinit_pf()
3969 pf->avail_rxqs = NULL; in ice_deinit_pf()
3972 if (pf->txtime_txqs) { in ice_deinit_pf()
3973 bitmap_free(pf->txtime_txqs); in ice_deinit_pf()
3974 pf->txtime_txqs = NULL; in ice_deinit_pf()
3977 if (pf->ptp.clock) in ice_deinit_pf()
3978 ptp_clock_unregister(pf->ptp.clock); in ice_deinit_pf()
3980 xa_destroy(&pf->dyn_ports); in ice_deinit_pf()
3981 xa_destroy(&pf->sf_nums); in ice_deinit_pf()
3985 * ice_set_pf_caps - set PFs capability flags
3990 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; in ice_set_pf_caps()
3992 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3993 if (func_caps->common_cap.rdma) in ice_set_pf_caps()
3994 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3995 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3996 if (func_caps->common_cap.dcb) in ice_set_pf_caps()
3997 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3998 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
3999 if (func_caps->common_cap.sr_iov_1_1) { in ice_set_pf_caps()
4000 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
4001 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, in ice_set_pf_caps()
4004 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4005 if (func_caps->common_cap.rss_table_size) in ice_set_pf_caps()
4006 set_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4008 clear_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4009 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { in ice_set_pf_caps()
4015 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_set_pf_caps()
4016 set_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4018 ice_alloc_fd_guar_item(&pf->hw, &unused, in ice_set_pf_caps()
4019 func_caps->fd_fltr_guar); in ice_set_pf_caps()
4021 ice_alloc_fd_shrd_item(&pf->hw, &unused, in ice_set_pf_caps()
4022 func_caps->fd_fltr_best_effort); in ice_set_pf_caps()
4025 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4026 if (func_caps->common_cap.ieee_1588) in ice_set_pf_caps()
4027 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4029 pf->max_pf_txqs = func_caps->common_cap.num_txq; in ice_set_pf_caps()
4030 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; in ice_set_pf_caps()
4034 * ice_init_pf - Initialize general software structures (struct ice_pf)
4041 mutex_init(&pf->sw_mutex); in ice_init_pf()
4042 mutex_init(&pf->tc_mutex); in ice_init_pf()
4043 mutex_init(&pf->adev_mutex); in ice_init_pf()
4044 mutex_init(&pf->lag_mutex); in ice_init_pf()
4046 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_init_pf()
4047 spin_lock_init(&pf->aq_wait_lock); in ice_init_pf()
4048 init_waitqueue_head(&pf->aq_wait_queue); in ice_init_pf()
4050 init_waitqueue_head(&pf->reset_wait_queue); in ice_init_pf()
4053 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_init_pf()
4054 pf->serv_tmr_period = HZ; in ice_init_pf()
4055 INIT_WORK(&pf->serv_task, ice_service_task); in ice_init_pf()
4056 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_init_pf()
4058 mutex_init(&pf->avail_q_mutex); in ice_init_pf()
4059 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); in ice_init_pf()
4060 if (!pf->avail_txqs) in ice_init_pf()
4061 return -ENOMEM; in ice_init_pf()
4063 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); in ice_init_pf()
4064 if (!pf->avail_rxqs) { in ice_init_pf()
4065 bitmap_free(pf->avail_txqs); in ice_init_pf()
4066 pf->avail_txqs = NULL; in ice_init_pf()
4067 return -ENOMEM; in ice_init_pf()
4070 pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); in ice_init_pf()
4071 if (!pf->txtime_txqs) { in ice_init_pf()
4072 bitmap_free(pf->avail_txqs); in ice_init_pf()
4073 pf->avail_txqs = NULL; in ice_init_pf()
4074 bitmap_free(pf->avail_rxqs); in ice_init_pf()
4075 pf->avail_rxqs = NULL; in ice_init_pf()
4076 return -ENOMEM; in ice_init_pf()
4079 mutex_init(&pf->vfs.table_lock); in ice_init_pf()
4080 hash_init(pf->vfs.table); in ice_init_pf()
4082 wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH, in ice_init_pf()
4085 ice_mbx_init_snapshot(&pf->hw); in ice_init_pf()
4087 xa_init(&pf->dyn_ports); in ice_init_pf()
4088 xa_init(&pf->sf_nums); in ice_init_pf()
4094 * ice_is_wol_supported - check if WoL is supported
4110 return !(BIT(hw->port_info->lport) & wol_ctrl); in ice_is_wol_supported()
4114 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4120 * Only change the number of queues if new_tx, or new_rx is non-0.
4126 struct ice_pf *pf = vsi->back; in ice_vsi_recfg_qs()
4130 return -EINVAL; in ice_vsi_recfg_qs()
4132 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { in ice_vsi_recfg_qs()
4133 timeout--; in ice_vsi_recfg_qs()
4135 return -EBUSY; in ice_vsi_recfg_qs()
4140 vsi->req_txq = (u16)new_tx; in ice_vsi_recfg_qs()
4142 vsi->req_rxq = (u16)new_rx; in ice_vsi_recfg_qs()
4145 if (!netif_running(vsi->netdev)) { in ice_vsi_recfg_qs()
4159 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_recfg_qs()
4160 netdev_set_tc_queue(vsi->netdev, in ice_vsi_recfg_qs()
4161 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_recfg_qs()
4162 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_recfg_qs()
4163 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_recfg_qs()
4173 clear_bit(ICE_CFG_BUSY, pf->state); in ice_vsi_recfg_qs()
4178 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4198 hw = &pf->hw; in ice_set_safe_mode_vlan_cfg()
4199 ctxt->info = vsi->info; in ice_set_safe_mode_vlan_cfg()
4201 ctxt->info.valid_sections = in ice_set_safe_mode_vlan_cfg()
4206 /* disable VLAN anti-spoof */ in ice_set_safe_mode_vlan_cfg()
4207 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << in ice_set_safe_mode_vlan_cfg()
4211 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_set_safe_mode_vlan_cfg()
4214 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | in ice_set_safe_mode_vlan_cfg()
4217 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_set_safe_mode_vlan_cfg()
4219 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", in ice_set_safe_mode_vlan_cfg()
4220 status, libie_aq_str(hw->adminq.sq_last_status)); in ice_set_safe_mode_vlan_cfg()
4222 vsi->info.sec_flags = ctxt->info.sec_flags; in ice_set_safe_mode_vlan_cfg()
4223 vsi->info.sw_flags2 = ctxt->info.sw_flags2; in ice_set_safe_mode_vlan_cfg()
4224 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_set_safe_mode_vlan_cfg()
4231 * ice_log_pkg_init - log result of DDP package load
4237 struct ice_pf *pf = hw->back; in ice_log_pkg_init()
4245 hw->active_pkg_name, in ice_log_pkg_init()
4246 hw->active_pkg_ver.major, in ice_log_pkg_init()
4247 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4248 hw->active_pkg_ver.update, in ice_log_pkg_init()
4249 hw->active_pkg_ver.draft); in ice_log_pkg_init()
4253 hw->active_pkg_name, in ice_log_pkg_init()
4254 hw->active_pkg_ver.major, in ice_log_pkg_init()
4255 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4256 hw->active_pkg_ver.update, in ice_log_pkg_init()
4257 hw->active_pkg_ver.draft); in ice_log_pkg_init()
4261 hw->active_pkg_name, in ice_log_pkg_init()
4262 hw->active_pkg_ver.major, in ice_log_pkg_init()
4263 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4268 hw->active_pkg_name, in ice_log_pkg_init()
4269 hw->active_pkg_ver.major, in ice_log_pkg_init()
4270 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4271 hw->active_pkg_ver.update, in ice_log_pkg_init()
4272 hw->active_pkg_ver.draft, in ice_log_pkg_init()
4273 hw->pkg_name, in ice_log_pkg_init()
4274 hw->pkg_ver.major, in ice_log_pkg_init()
4275 hw->pkg_ver.minor, in ice_log_pkg_init()
4276 hw->pkg_ver.update, in ice_log_pkg_init()
4277 hw->pkg_ver.draft); in ice_log_pkg_init()
4299 …ev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); in ice_log_pkg_init()
4300 /* poll for reset to complete */ in ice_log_pkg_init()
4312 * ice_load_pkg - load/reload the DDP Package file
4324 struct ice_hw *hw = &pf->hw; in ice_load_pkg()
4327 if (firmware && !hw->pkg_copy) { in ice_load_pkg()
4328 state = ice_copy_and_init_pkg(hw, firmware->data, in ice_load_pkg()
4329 firmware->size); in ice_load_pkg()
4331 } else if (!firmware && hw->pkg_copy) { in ice_load_pkg()
4332 /* Reload package during rebuild after CORER/GLOBR reset */ in ice_load_pkg()
4333 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); in ice_load_pkg()
4341 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4348 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4352 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4361 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) in ice_verify_cacheline_size()
4367 * ice_send_version - update firmware with driver version
4382 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); in ice_send_version()
4386 * ice_init_fdir - Initialize flow director VSI and configuration
4400 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); in ice_init_fdir()
4403 return -ENOMEM; in ice_init_fdir()
4412 mutex_init(&pf->hw.fdir_fltr_lock); in ice_init_fdir()
4421 ice_fdir_release_flows(&pf->hw); in ice_init_fdir()
4425 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_init_fdir()
4426 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_init_fdir()
4427 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_init_fdir()
4441 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_deinit_fdir()
4442 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_deinit_fdir()
4443 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_deinit_fdir()
4446 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); in ice_deinit_fdir()
4450 * ice_get_opt_fw_name - return optional firmware file name or NULL
4456 * followed by a EUI-64 identifier (PCIe Device Serial Number) in ice_get_opt_fw_name()
4458 struct pci_dev *pdev = pf->pdev; in ice_get_opt_fw_name()
4473 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", in ice_get_opt_fw_name()
4480 * ice_request_fw - Device initialization routine
4492 /* optional device-specific DDP (if present) overrides the default DDP in ice_request_fw()
4510 * ice_init_tx_topology - performs Tx topology initialization
4519 u8 num_tx_sched_layers = hw->num_tx_sched_layers; in ice_init_tx_topology()
4520 struct ice_pf *pf = hw->back; in ice_init_tx_topology()
4525 err = ice_cfg_tx_topo(hw, firmware->data, firmware->size); in ice_init_tx_topology()
4527 if (hw->num_tx_sched_layers > num_tx_sched_layers) in ice_init_tx_topology()
4532 } else if (err == -ENODEV) { in ice_init_tx_topology()
4533 /* If we failed to re-initialize the device, we can no longer in ice_init_tx_topology()
4538 } else if (err == -EIO) { in ice_init_tx_topology()
4539 …dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update… in ice_init_tx_topology()
4541 } else if (err == -EEXIST) { in ice_init_tx_topology()
4552 * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
4556 * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
4561 * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
4562 * in the DDP package. The 16-byte legacy descriptor is never supported by
4567 pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1); in ice_init_supported_rxdids()
4575 pf->supported_rxdids |= BIT(i); in ice_init_supported_rxdids()
4580 * ice_init_ddp_config - DDP related configuration
4620 * ice_print_wake_reason - show the wake up cause in the log
4625 u32 wus = pf->wakeup_reason; in ice_print_wake_reason()
4639 wake_str = "Firmware Reset\n"; in ice_print_wake_reason()
4647 * ice_register_netdev - register netdev
4654 if (!vsi || !vsi->netdev) in ice_register_netdev()
4655 return -EIO; in ice_register_netdev()
4657 err = register_netdev(vsi->netdev); in ice_register_netdev()
4661 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_register_netdev()
4662 netif_carrier_off(vsi->netdev); in ice_register_netdev()
4663 netif_tx_stop_all_queues(vsi->netdev); in ice_register_netdev()
4670 if (!vsi || !vsi->netdev) in ice_unregister_netdev()
4673 unregister_netdev(vsi->netdev); in ice_unregister_netdev()
4674 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_unregister_netdev()
4678 * ice_cfg_netdev - Allocate, configure and register a netdev
4689 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, in ice_cfg_netdev()
4690 vsi->alloc_rxq); in ice_cfg_netdev()
4692 return -ENOMEM; in ice_cfg_netdev()
4694 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_cfg_netdev()
4695 vsi->netdev = netdev; in ice_cfg_netdev()
4697 np->vsi = vsi; in ice_cfg_netdev()
4702 if (vsi->type == ICE_VSI_PF) { in ice_cfg_netdev()
4703 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); in ice_cfg_netdev()
4704 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_cfg_netdev()
4708 netdev->priv_flags |= IFF_UNICAST_FLT; in ice_cfg_netdev()
4711 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_cfg_netdev()
4713 netdev->max_mtu = ICE_MAX_MTU; in ice_cfg_netdev()
4720 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_decfg_netdev()
4721 free_netdev(vsi->netdev); in ice_decfg_netdev()
4722 vsi->netdev = NULL; in ice_decfg_netdev()
4728 struct ice_hw *hw = &pf->hw; in ice_init_dev()
4736 * set in pf->state, which will cause ice_is_safe_mode to return in ice_init_dev()
4754 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; in ice_init_dev()
4755 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; in ice_init_dev()
4756 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; in ice_init_dev()
4757 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { in ice_init_dev()
4758 pf->hw.udp_tunnel_nic.tables[0].n_entries = in ice_init_dev()
4759 pf->hw.tnl.valid_count[TNL_VXLAN]; in ice_init_dev()
4760 pf->hw.udp_tunnel_nic.tables[0].tunnel_types = in ice_init_dev()
4763 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { in ice_init_dev()
4764 pf->hw.udp_tunnel_nic.tables[1].n_entries = in ice_init_dev()
4765 pf->hw.tnl.valid_count[TNL_GENEVE]; in ice_init_dev()
4766 pf->hw.udp_tunnel_nic.tables[1].tunnel_types = in ice_init_dev()
4773 err = -EIO; in ice_init_dev()
4801 ice_deinit_hw(&pf->hw); in ice_deinit_dev()
4803 /* Service task is already stopped, so call reset directly. */ in ice_deinit_dev()
4804 ice_reset(&pf->hw, ICE_RESET_PFR); in ice_deinit_dev()
4805 pci_wait_for_pending_transaction(pf->pdev); in ice_deinit_dev()
4817 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_init_features()
4827 /* Note: Flow director init failure is non-fatal to load */ in ice_init_features()
4831 /* Note: DCB init failure is non-fatal to load */ in ice_init_features()
4833 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_init_features()
4834 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); in ice_init_features()
4836 ice_cfg_lldp_mib_change(&pf->hw, true); in ice_init_features()
4851 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) in ice_deinit_features()
4852 ice_cfg_lldp_mib_change(&pf->hw, false); in ice_deinit_features()
4856 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_deinit_features()
4858 if (test_bit(ICE_FLAG_DPLL, pf->flags)) in ice_deinit_features()
4860 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) in ice_deinit_features()
4861 xa_destroy(&pf->eswitch.reprs); in ice_deinit_features()
4867 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); in ice_init_wakeup()
4873 wr32(&pf->hw, PFPM_WUS, U32_MAX); in ice_init_wakeup()
4884 err = ice_init_link_events(pf->hw.port_info); in ice_init_link()
4891 err = ice_init_nvm_phy_type(pf->hw.port_info); in ice_init_link()
4896 err = ice_update_link_info(pf->hw.port_info); in ice_init_link()
4900 ice_init_link_dflt_override(pf->hw.port_info); in ice_init_link()
4903 pf->hw.port_info->phy.link_info.link_cfg_err); in ice_init_link()
4906 if (pf->hw.port_info->phy.link_info.link_info & in ice_init_link()
4909 err = ice_init_phy_user_cfg(pf->hw.port_info); in ice_init_link()
4913 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { in ice_init_link()
4920 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_init_link()
4928 bool dvm = ice_is_dvm_ena(&pf->hw); in ice_init_pf_sw()
4933 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); in ice_init_pf_sw()
4934 if (!pf->first_sw) in ice_init_pf_sw()
4935 return -ENOMEM; in ice_init_pf_sw()
4937 if (pf->hw.evb_veb) in ice_init_pf_sw()
4938 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; in ice_init_pf_sw()
4940 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; in ice_init_pf_sw()
4942 pf->first_sw->pf = pf; in ice_init_pf_sw()
4945 pf->first_sw->sw_id = pf->hw.port_info->sw_id; in ice_init_pf_sw()
4947 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_init_pf_sw()
4951 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); in ice_init_pf_sw()
4953 err = -ENOMEM; in ice_init_pf_sw()
4961 kfree(pf->first_sw); in ice_init_pf_sw()
4973 kfree(pf->first_sw); in ice_deinit_pf_sw()
4980 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; in ice_alloc_vsis()
4981 if (!pf->num_alloc_vsi) in ice_alloc_vsis()
4982 return -EIO; in ice_alloc_vsis()
4984 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in ice_alloc_vsis()
4987 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in ice_alloc_vsis()
4988 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in ice_alloc_vsis()
4991 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), in ice_alloc_vsis()
4993 if (!pf->vsi) in ice_alloc_vsis()
4994 return -ENOMEM; in ice_alloc_vsis()
4996 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, in ice_alloc_vsis()
4997 sizeof(*pf->vsi_stats), GFP_KERNEL); in ice_alloc_vsis()
4998 if (!pf->vsi_stats) { in ice_alloc_vsis()
4999 devm_kfree(dev, pf->vsi); in ice_alloc_vsis()
5000 return -ENOMEM; in ice_alloc_vsis()
5008 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); in ice_dealloc_vsis()
5009 pf->vsi_stats = NULL; in ice_dealloc_vsis()
5011 pf->num_alloc_vsi = 0; in ice_dealloc_vsis()
5012 devm_kfree(ice_pf_to_dev(pf), pf->vsi); in ice_dealloc_vsis()
5013 pf->vsi = NULL; in ice_dealloc_vsis()
5047 if (pf->hw.mac_type == ICE_MAC_E830) { in ice_init()
5048 err = pci_enable_ptm(pf->pdev, NULL); in ice_init()
5077 pcie_print_link_status(pf->pdev); in ice_init()
5080 clear_bit(ICE_DOWN, pf->state); in ice_init()
5081 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_init()
5084 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_init()
5099 set_bit(ICE_SERVICE_DIS, pf->state); in ice_deinit()
5100 set_bit(ICE_DOWN, pf->state); in ice_deinit()
5108 * ice_load - load pf by init hw and starting VSI
5123 INIT_LIST_HEAD(&vsi->ch_list); in ice_load()
5140 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in ice_load()
5160 clear_bit(ICE_DOWN, pf->state); in ice_load()
5178 * ice_unload - unload pf by stopping VSI and deinit hw
5204 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_probe_recovery_mode()
5205 spin_lock_init(&pf->aq_wait_lock); in ice_probe_recovery_mode()
5206 init_waitqueue_head(&pf->aq_wait_queue); in ice_probe_recovery_mode()
5208 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_probe_recovery_mode()
5209 pf->serv_tmr_period = HZ; in ice_probe_recovery_mode()
5210 INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode); in ice_probe_recovery_mode()
5211 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_probe_recovery_mode()
5212 err = ice_create_all_ctrlq(&pf->hw); in ice_probe_recovery_mode()
5228 * ice_probe - Device initialization routine
5237 struct device *dev = &pdev->dev; in ice_probe()
5243 if (pdev->is_virtfn) { in ice_probe()
5245 return -EINVAL; in ice_probe()
5248 /* when under a kdump kernel initiate a reset before enabling the in ice_probe()
5263 * Documentation/driver-api/driver-model/devres.rst in ice_probe()
5277 return -ENOMEM; in ice_probe()
5280 pf->aux_idx = -1; in ice_probe()
5290 pf->pdev = pdev; in ice_probe()
5292 set_bit(ICE_DOWN, pf->state); in ice_probe()
5294 set_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
5296 hw = &pf->hw; in ice_probe()
5297 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; in ice_probe()
5300 hw->back = pf; in ice_probe()
5301 hw->port_info = NULL; in ice_probe()
5302 hw->vendor_id = pdev->vendor; in ice_probe()
5303 hw->device_id = pdev->device; in ice_probe()
5304 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); in ice_probe()
5305 hw->subsystem_vendor_id = pdev->subsystem_vendor; in ice_probe()
5306 hw->subsystem_device_id = pdev->subsystem_device; in ice_probe()
5307 hw->bus.device = PCI_SLOT(pdev->devfn); in ice_probe()
5308 hw->bus.func = PCI_FUNC(pdev->devfn); in ice_probe()
5311 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); in ice_probe()
5314 if (debug < -1) in ice_probe()
5315 hw->debug_mask = debug; in ice_probe()
5332 pf->adapter = adapter; in ice_probe()
5363 * ice_set_wake - enable or disable Wake on LAN
5370 struct ice_hw *hw = &pf->hw; in ice_set_wake()
5371 bool wol = pf->wol_ena; in ice_set_wake()
5384 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5389 * wake, and that PF reset doesn't undo the LAA.
5394 struct ice_hw *hw = &pf->hw; in ice_setup_mc_magic_wake()
5400 if (!pf->wol_ena) in ice_setup_mc_magic_wake()
5408 if (vsi->netdev) in ice_setup_mc_magic_wake()
5409 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); in ice_setup_mc_magic_wake()
5411 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_setup_mc_magic_wake()
5420 status, libie_aq_str(hw->adminq.sq_last_status)); in ice_setup_mc_magic_wake()
5424 * ice_remove - Device removal routine
5433 if (!ice_is_reset_in_progress(pf->state)) in ice_remove()
5438 if (ice_is_recovery_mode(&pf->hw)) { in ice_remove()
5446 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { in ice_remove()
5447 set_bit(ICE_VF_RESETS_DISABLED, pf->state); in ice_remove()
5455 set_bit(ICE_DOWN, pf->state); in ice_remove()
5477 * ice_shutdown - PCI callback for shutting down device
5487 pci_wake_from_d3(pdev, pf->wol_ena); in ice_shutdown()
5493 * ice_prepare_for_shutdown - prep for PCI shutdown
5500 struct ice_hw *hw = &pf->hw; in ice_prepare_for_shutdown()
5503 /* Notify VFs of impending reset */ in ice_prepare_for_shutdown()
5504 if (ice_check_sq_alive(hw, &hw->mailboxq)) in ice_prepare_for_shutdown()
5513 if (pf->vsi[v]) in ice_prepare_for_shutdown()
5514 pf->vsi[v]->vsi_num = 0; in ice_prepare_for_shutdown()
5520 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5526 * This should be called during resume routine to re-allocate the q_vectors
5540 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); in ice_reinit_interrupt_scheme()
5544 /* Remap vectors and rings, after successful re-init interrupts */ in ice_reinit_interrupt_scheme()
5546 if (!pf->vsi[v]) in ice_reinit_interrupt_scheme()
5549 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5552 ice_vsi_map_rings_to_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5554 ice_vsi_set_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5568 while (v--) in ice_reinit_interrupt_scheme()
5569 if (pf->vsi[v]) { in ice_reinit_interrupt_scheme()
5571 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5573 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5596 return -EBUSY; in ice_suspend()
5610 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { in ice_suspend()
5616 if (test_bit(ICE_DOWN, pf->state) || in ice_suspend()
5617 ice_is_reset_in_progress(pf->state)) { in ice_suspend()
5618 dev_err(dev, "can't suspend device in reset or already down\n"); in ice_suspend()
5637 if (!pf->vsi[v]) in ice_suspend()
5640 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_suspend()
5642 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_suspend()
5647 pci_wake_from_d3(pdev, pf->wol_ena); in ice_suspend()
5653 * ice_resume - PM callback for waking up from D3
5669 return -ENODEV; in ice_resume()
5678 hw = &pf->hw; in ice_resume()
5680 pf->wakeup_reason = rd32(hw, PFPM_WUS); in ice_resume()
5695 clear_bit(ICE_DOWN, pf->state); in ice_resume()
5696 /* Now perform PF reset and rebuild */ in ice_resume()
5698 /* re-enable service task for reset, but allow reset to schedule it */ in ice_resume()
5699 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_resume()
5702 dev_err(dev, "Reset during resume failed.\n"); in ice_resume()
5704 clear_bit(ICE_SUSPENDED, pf->state); in ice_resume()
5708 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_resume()
5714 * ice_pci_err_detected - warning that PCI error has been detected
5727 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", in ice_pci_err_detected()
5732 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_detected()
5735 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_detected()
5736 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_detected()
5745 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5748 * Called to determine if the driver can recover from the PCI slot reset by
5760 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", in ice_pci_err_slot_reset()
5770 reg = rd32(&pf->hw, GLGEN_RTRIG); in ice_pci_err_slot_reset()
5781 * ice_pci_err_resume - restart operations after PCI error recovery
5785 * reset recovery have finished
5792 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", in ice_pci_err_resume()
5797 if (test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_resume()
5798 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", in ice_pci_err_resume()
5807 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_pci_err_resume()
5811 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5818 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_reset_prepare()
5821 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_reset_prepare()
5822 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_reset_prepare()
5829 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5837 /* ice_pci_tbl - PCI Device ID Table
5924 * ice_module_init - Driver registration routine
5931 int status = -ENOMEM; in ice_module_init()
5978 * ice_module_exit - Driver exit cleanup routine
5995 * ice_set_mac_address - NDO callback to set MAC address
6004 struct ice_vsi *vsi = np->vsi; in ice_set_mac_address()
6005 struct ice_pf *pf = vsi->back; in ice_set_mac_address()
6006 struct ice_hw *hw = &pf->hw; in ice_set_mac_address()
6013 mac = (u8 *)addr->sa_data; in ice_set_mac_address()
6016 return -EADDRNOTAVAIL; in ice_set_mac_address()
6018 if (test_bit(ICE_DOWN, pf->state) || in ice_set_mac_address()
6019 ice_is_reset_in_progress(pf->state)) { in ice_set_mac_address()
6022 return -EBUSY; in ice_set_mac_address()
6026 …netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try ag… in ice_set_mac_address()
6028 return -EAGAIN; in ice_set_mac_address()
6032 ether_addr_copy(old_mac, netdev->dev_addr); in ice_set_mac_address()
6039 if (err && err != -ENOENT) { in ice_set_mac_address()
6040 err = -EADDRNOTAVAIL; in ice_set_mac_address()
6046 if (err == -EEXIST) { in ice_set_mac_address()
6057 err = -EADDRNOTAVAIL; in ice_set_mac_address()
6070 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", in ice_set_mac_address()
6071 netdev->dev_addr); in ice_set_mac_address()
6084 * ice_set_rx_mode - NDO callback to set the netdev filters
6090 struct ice_vsi *vsi = np->vsi; in ice_set_rx_mode()
6092 if (!vsi || ice_is_switchdev_running(vsi->back)) in ice_set_rx_mode()
6099 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
6100 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
6101 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); in ice_set_rx_mode()
6106 ice_service_task_schedule(vsi->back); in ice_set_rx_mode()
6110 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6119 struct ice_vsi *vsi = np->vsi; in ice_set_tx_maxrate()
6128 return -EINVAL; in ice_set_tx_maxrate()
6131 q_handle = vsi->tx_rings[queue_index]->q_handle; in ice_set_tx_maxrate()
6138 return -EINVAL; in ice_set_tx_maxrate()
6143 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
6146 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
6156 * ice_fdb_add - add an entry to the hardware database
6176 return -EINVAL; in ice_fdb_add()
6178 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { in ice_fdb_add()
6180 return -EINVAL; in ice_fdb_add()
6188 err = -EINVAL; in ice_fdb_add()
6191 if (err == -EEXIST && !(flags & NLM_F_EXCL)) in ice_fdb_add()
6198 * ice_fdb_del - delete an entry from the hardware database
6215 if (ndm->ndm_state & NUD_PERMANENT) { in ice_fdb_del()
6217 return -EINVAL; in ice_fdb_del()
6225 err = -EINVAL; in ice_fdb_del()
6242 * ice_fix_features - fix the netdev features flags based on device limitations
6285 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; in ice_fix_features()
6294 if (ice_is_dvm_ena(&np->vsi->back->hw)) { in ice_fix_features()
6324 if (!(netdev->features & NETIF_F_RXFCS) && in ice_fix_features()
6327 !ice_vsi_has_non_zero_vlans(np->vsi)) { in ice_fix_features()
6336 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6349 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; in ice_set_rx_rings_vlan_proto()
6353 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6382 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
6384 strip_err = vlan_ops->dis_stripping(vsi); in ice_set_vlan_offload_features()
6387 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
6389 insert_err = vlan_ops->dis_insertion(vsi); in ice_set_vlan_offload_features()
6392 return -EIO; in ice_set_vlan_offload_features()
6401 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6420 !ice_is_eswitch_mode_switchdev(vsi->back)) in ice_set_vlan_filtering_features()
6421 err = vlan_ops->ena_rx_filtering(vsi); in ice_set_vlan_filtering_features()
6423 err = vlan_ops->dis_rx_filtering(vsi); in ice_set_vlan_filtering_features()
6429 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6441 struct ice_vsi *vsi = np->vsi; in ice_set_vlan_features()
6444 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; in ice_set_vlan_features()
6449 dev_err(ice_pf_to_dev(vsi->back), in ice_set_vlan_features()
6451 return -EIO; in ice_set_vlan_features()
6459 current_vlan_features = netdev->features & in ice_set_vlan_features()
6472 * ice_set_loopback - turn on/off loopback mode on underlying PF
6478 bool if_running = netif_running(vsi->netdev); in ice_set_loopback()
6481 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { in ice_set_loopback()
6484 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); in ice_set_loopback()
6488 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); in ice_set_loopback()
6490 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); in ice_set_loopback()
6498 * ice_set_features - set the netdev feature flags
6505 netdev_features_t changed = netdev->features ^ features; in ice_set_features()
6507 struct ice_vsi *vsi = np->vsi; in ice_set_features()
6508 struct ice_pf *pf = vsi->back; in ice_set_features()
6514 "Device is in Safe Mode - not enabling advanced netdev features\n"); in ice_set_features()
6518 /* Do not change setting during reset */ in ice_set_features()
6519 if (ice_is_reset_in_progress(pf->state)) { in ice_set_features()
6522 return -EBUSY; in ice_set_features()
6541 dev_err(ice_pf_to_dev(vsi->back), in ice_set_features()
6543 return -EIO; in ice_set_features()
6562 return -EACCES; in ice_set_features()
6568 assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena); in ice_set_features()
6579 if (netdev->features & NETIF_F_HW_CSUM) in ice_set_features()
6583 return -EIO; in ice_set_features()
6590 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6597 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6601 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6609 * ice_vsi_cfg_lan - Setup the VSI lan related config
6618 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_vsi_cfg_lan()
6619 ice_set_rx_mode(vsi->netdev); in ice_vsi_cfg_lan()
6640 * which is hard-coded to a limit of 250,000 ints/second.
6642 * by ethtool rx-usecs-high.
6681 rc = dim->priv; in ice_tx_dim_work()
6683 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); in ice_tx_dim_work()
6686 itr = tx_profile[dim->profile_ix].itr; in ice_tx_dim_work()
6691 dim->state = DIM_START_MEASURE; in ice_tx_dim_work()
6701 rc = dim->priv; in ice_rx_dim_work()
6703 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); in ice_rx_dim_work()
6706 itr = rx_profile[dim->profile_ix].itr; in ice_rx_dim_work()
6711 dim->state = DIM_START_MEASURE; in ice_rx_dim_work()
6717 * ice_init_moderation - set up interrupt moderation
6721 * when called from reset or from probe, and whether or not dynamic moderation
6731 rc = &q_vector->tx; in ice_init_moderation()
6732 INIT_WORK(&rc->dim.work, ice_tx_dim_work); in ice_init_moderation()
6733 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in ice_init_moderation()
6734 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; in ice_init_moderation()
6735 rc->dim.priv = rc; in ice_init_moderation()
6740 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); in ice_init_moderation()
6742 rc = &q_vector->rx; in ice_init_moderation()
6743 INIT_WORK(&rc->dim.work, ice_rx_dim_work); in ice_init_moderation()
6744 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in ice_init_moderation()
6745 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; in ice_init_moderation()
6746 rc->dim.priv = rc; in ice_init_moderation()
6750 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : in ice_init_moderation()
6751 rc->itr_setting); in ice_init_moderation()
6757 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6764 if (!vsi->netdev) in ice_napi_enable_all()
6768 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_enable_all()
6772 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) in ice_napi_enable_all()
6773 napi_enable(&q_vector->napi); in ice_napi_enable_all()
6778 * ice_up_complete - Finish the last steps of bringing up a connection
6785 struct ice_pf *pf = vsi->back; in ice_up_complete()
6798 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_up_complete()
6802 if (vsi->port_info && in ice_up_complete()
6803 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && in ice_up_complete()
6804 ((vsi->netdev && (vsi->type == ICE_VSI_PF || in ice_up_complete()
6805 vsi->type == ICE_VSI_SF)))) { in ice_up_complete()
6807 netif_tx_start_all_queues(vsi->netdev); in ice_up_complete()
6808 netif_carrier_on(vsi->netdev); in ice_up_complete()
6817 if (vsi->type == ICE_VSI_PF) in ice_up_complete()
6824 * ice_up - Bring the connection back up after being down
6839 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6862 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6880 if (!ring || !ring->ring_stats) in ice_update_vsi_tx_ring_stats()
6882 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, in ice_update_vsi_tx_ring_stats()
6883 ring->ring_stats->stats, &pkts, in ice_update_vsi_tx_ring_stats()
6885 vsi_stats->tx_packets += pkts; in ice_update_vsi_tx_ring_stats()
6886 vsi_stats->tx_bytes += bytes; in ice_update_vsi_tx_ring_stats()
6887 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; in ice_update_vsi_tx_ring_stats()
6888 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; in ice_update_vsi_tx_ring_stats()
6889 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; in ice_update_vsi_tx_ring_stats()
6894 * ice_update_vsi_ring_stats - Update VSI stats counters
6901 struct ice_pf *pf = vsi->back; in ice_update_vsi_ring_stats()
6909 /* reset non-netdev (extended) stats */ in ice_update_vsi_ring_stats()
6910 vsi->tx_restart = 0; in ice_update_vsi_ring_stats()
6911 vsi->tx_busy = 0; in ice_update_vsi_ring_stats()
6912 vsi->tx_linearize = 0; in ice_update_vsi_ring_stats()
6913 vsi->rx_buf_failed = 0; in ice_update_vsi_ring_stats()
6914 vsi->rx_page_failed = 0; in ice_update_vsi_ring_stats()
6919 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, in ice_update_vsi_ring_stats()
6920 vsi->num_txq); in ice_update_vsi_ring_stats()
6924 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); in ice_update_vsi_ring_stats()
6927 ring_stats = ring->ring_stats; in ice_update_vsi_ring_stats()
6928 ice_fetch_u64_stats_per_ring(&ring_stats->syncp, in ice_update_vsi_ring_stats()
6929 ring_stats->stats, &pkts, in ice_update_vsi_ring_stats()
6931 vsi_stats->rx_packets += pkts; in ice_update_vsi_ring_stats()
6932 vsi_stats->rx_bytes += bytes; in ice_update_vsi_ring_stats()
6933 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; in ice_update_vsi_ring_stats()
6934 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; in ice_update_vsi_ring_stats()
6939 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, in ice_update_vsi_ring_stats()
6940 vsi->num_xdp_txq); in ice_update_vsi_ring_stats()
6944 net_stats = &vsi->net_stats; in ice_update_vsi_ring_stats()
6945 stats_prev = &vsi->net_stats_prev; in ice_update_vsi_ring_stats()
6948 * random value after PF reset. And as we increase the reported stat by in ice_update_vsi_ring_stats()
6949 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not, in ice_update_vsi_ring_stats()
6952 if (likely(pf->stat_prev_loaded)) { in ice_update_vsi_ring_stats()
6953 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; in ice_update_vsi_ring_stats()
6954 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; in ice_update_vsi_ring_stats()
6955 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; in ice_update_vsi_ring_stats()
6956 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; in ice_update_vsi_ring_stats()
6959 stats_prev->tx_packets = vsi_stats->tx_packets; in ice_update_vsi_ring_stats()
6960 stats_prev->tx_bytes = vsi_stats->tx_bytes; in ice_update_vsi_ring_stats()
6961 stats_prev->rx_packets = vsi_stats->rx_packets; in ice_update_vsi_ring_stats()
6962 stats_prev->rx_bytes = vsi_stats->rx_bytes; in ice_update_vsi_ring_stats()
6968 * ice_update_vsi_stats - Update VSI stats counters
6973 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; in ice_update_vsi_stats()
6974 struct ice_eth_stats *cur_es = &vsi->eth_stats; in ice_update_vsi_stats()
6975 struct ice_pf *pf = vsi->back; in ice_update_vsi_stats()
6977 if (test_bit(ICE_VSI_DOWN, vsi->state) || in ice_update_vsi_stats()
6978 test_bit(ICE_CFG_BUSY, pf->state)) in ice_update_vsi_stats()
6987 cur_ns->tx_errors = cur_es->tx_errors; in ice_update_vsi_stats()
6988 cur_ns->rx_dropped = cur_es->rx_discards; in ice_update_vsi_stats()
6989 cur_ns->tx_dropped = cur_es->tx_discards; in ice_update_vsi_stats()
6990 cur_ns->multicast = cur_es->rx_multicast; in ice_update_vsi_stats()
6993 if (vsi->type == ICE_VSI_PF) { in ice_update_vsi_stats()
6994 cur_ns->rx_crc_errors = pf->stats.crc_errors; in ice_update_vsi_stats()
6995 cur_ns->rx_errors = pf->stats.crc_errors + in ice_update_vsi_stats()
6996 pf->stats.illegal_bytes + in ice_update_vsi_stats()
6997 pf->stats.rx_undersize + in ice_update_vsi_stats()
6998 pf->hw_csum_rx_error + in ice_update_vsi_stats()
6999 pf->stats.rx_jabber + in ice_update_vsi_stats()
7000 pf->stats.rx_fragments + in ice_update_vsi_stats()
7001 pf->stats.rx_oversize; in ice_update_vsi_stats()
7003 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; in ice_update_vsi_stats()
7008 * ice_update_pf_stats - Update PF port stats counters
7014 struct ice_hw *hw = &pf->hw; in ice_update_pf_stats()
7018 port = hw->port_info->lport; in ice_update_pf_stats()
7019 prev_ps = &pf->stats_prev; in ice_update_pf_stats()
7020 cur_ps = &pf->stats; in ice_update_pf_stats()
7022 if (ice_is_reset_in_progress(pf->state)) in ice_update_pf_stats()
7023 pf->stat_prev_loaded = false; in ice_update_pf_stats()
7025 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7026 &prev_ps->eth.rx_bytes, in ice_update_pf_stats()
7027 &cur_ps->eth.rx_bytes); in ice_update_pf_stats()
7029 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7030 &prev_ps->eth.rx_unicast, in ice_update_pf_stats()
7031 &cur_ps->eth.rx_unicast); in ice_update_pf_stats()
7033 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7034 &prev_ps->eth.rx_multicast, in ice_update_pf_stats()
7035 &cur_ps->eth.rx_multicast); in ice_update_pf_stats()
7037 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7038 &prev_ps->eth.rx_broadcast, in ice_update_pf_stats()
7039 &cur_ps->eth.rx_broadcast); in ice_update_pf_stats()
7041 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, in ice_update_pf_stats()
7042 &prev_ps->eth.rx_discards, in ice_update_pf_stats()
7043 &cur_ps->eth.rx_discards); in ice_update_pf_stats()
7045 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7046 &prev_ps->eth.tx_bytes, in ice_update_pf_stats()
7047 &cur_ps->eth.tx_bytes); in ice_update_pf_stats()
7049 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7050 &prev_ps->eth.tx_unicast, in ice_update_pf_stats()
7051 &cur_ps->eth.tx_unicast); in ice_update_pf_stats()
7053 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7054 &prev_ps->eth.tx_multicast, in ice_update_pf_stats()
7055 &cur_ps->eth.tx_multicast); in ice_update_pf_stats()
7057 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7058 &prev_ps->eth.tx_broadcast, in ice_update_pf_stats()
7059 &cur_ps->eth.tx_broadcast); in ice_update_pf_stats()
7061 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7062 &prev_ps->tx_dropped_link_down, in ice_update_pf_stats()
7063 &cur_ps->tx_dropped_link_down); in ice_update_pf_stats()
7065 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7066 &prev_ps->rx_size_64, &cur_ps->rx_size_64); in ice_update_pf_stats()
7068 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7069 &prev_ps->rx_size_127, &cur_ps->rx_size_127); in ice_update_pf_stats()
7071 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7072 &prev_ps->rx_size_255, &cur_ps->rx_size_255); in ice_update_pf_stats()
7074 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7075 &prev_ps->rx_size_511, &cur_ps->rx_size_511); in ice_update_pf_stats()
7077 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7078 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); in ice_update_pf_stats()
7080 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7081 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); in ice_update_pf_stats()
7083 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7084 &prev_ps->rx_size_big, &cur_ps->rx_size_big); in ice_update_pf_stats()
7086 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7087 &prev_ps->tx_size_64, &cur_ps->tx_size_64); in ice_update_pf_stats()
7089 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7090 &prev_ps->tx_size_127, &cur_ps->tx_size_127); in ice_update_pf_stats()
7092 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7093 &prev_ps->tx_size_255, &cur_ps->tx_size_255); in ice_update_pf_stats()
7095 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7096 &prev_ps->tx_size_511, &cur_ps->tx_size_511); in ice_update_pf_stats()
7098 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7099 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); in ice_update_pf_stats()
7101 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7102 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); in ice_update_pf_stats()
7104 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7105 &prev_ps->tx_size_big, &cur_ps->tx_size_big); in ice_update_pf_stats()
7107 fd_ctr_base = hw->fd_ctr_base; in ice_update_pf_stats()
7111 pf->stat_prev_loaded, &prev_ps->fd_sb_match, in ice_update_pf_stats()
7112 &cur_ps->fd_sb_match); in ice_update_pf_stats()
7113 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7114 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); in ice_update_pf_stats()
7116 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7117 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); in ice_update_pf_stats()
7119 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7120 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); in ice_update_pf_stats()
7122 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7123 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); in ice_update_pf_stats()
7127 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7128 &prev_ps->crc_errors, &cur_ps->crc_errors); in ice_update_pf_stats()
7130 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7131 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); in ice_update_pf_stats()
7133 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7134 &prev_ps->mac_local_faults, in ice_update_pf_stats()
7135 &cur_ps->mac_local_faults); in ice_update_pf_stats()
7137 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7138 &prev_ps->mac_remote_faults, in ice_update_pf_stats()
7139 &cur_ps->mac_remote_faults); in ice_update_pf_stats()
7141 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7142 &prev_ps->rx_undersize, &cur_ps->rx_undersize); in ice_update_pf_stats()
7144 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7145 &prev_ps->rx_fragments, &cur_ps->rx_fragments); in ice_update_pf_stats()
7147 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7148 &prev_ps->rx_oversize, &cur_ps->rx_oversize); in ice_update_pf_stats()
7150 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7151 &prev_ps->rx_jabber, &cur_ps->rx_jabber); in ice_update_pf_stats()
7153 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; in ice_update_pf_stats()
7155 pf->stat_prev_loaded = true; in ice_update_pf_stats()
7159 * ice_get_stats64 - get statistics for network device structure
7167 struct ice_vsi *vsi = np->vsi; in ice_get_stats64()
7169 vsi_stats = &vsi->net_stats; in ice_get_stats64()
7171 if (!vsi->num_txq || !vsi->num_rxq) in ice_get_stats64()
7179 if (!test_bit(ICE_VSI_DOWN, vsi->state)) in ice_get_stats64()
7181 stats->tx_packets = vsi_stats->tx_packets; in ice_get_stats64()
7182 stats->tx_bytes = vsi_stats->tx_bytes; in ice_get_stats64()
7183 stats->rx_packets = vsi_stats->rx_packets; in ice_get_stats64()
7184 stats->rx_bytes = vsi_stats->rx_bytes; in ice_get_stats64()
7190 stats->multicast = vsi_stats->multicast; in ice_get_stats64()
7191 stats->tx_errors = vsi_stats->tx_errors; in ice_get_stats64()
7192 stats->tx_dropped = vsi_stats->tx_dropped; in ice_get_stats64()
7193 stats->rx_errors = vsi_stats->rx_errors; in ice_get_stats64()
7194 stats->rx_dropped = vsi_stats->rx_dropped; in ice_get_stats64()
7195 stats->rx_crc_errors = vsi_stats->rx_crc_errors; in ice_get_stats64()
7196 stats->rx_length_errors = vsi_stats->rx_length_errors; in ice_get_stats64()
7200 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7207 if (!vsi->netdev) in ice_napi_disable_all()
7211 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_disable_all()
7213 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) in ice_napi_disable_all()
7214 napi_disable(&q_vector->napi); in ice_napi_disable_all()
7216 cancel_work_sync(&q_vector->tx.dim.work); in ice_napi_disable_all()
7217 cancel_work_sync(&q_vector->rx.dim.work); in ice_napi_disable_all()
7222 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7223 * @vsi: the VSI being un-configured
7227 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq()
7228 struct ice_hw *hw = &pf->hw; in ice_vsi_dis_irq()
7235 if (vsi->rx_rings) { in ice_vsi_dis_irq()
7237 if (vsi->rx_rings[i]) { in ice_vsi_dis_irq()
7240 reg = vsi->rx_rings[i]->reg_idx; in ice_vsi_dis_irq()
7250 if (!vsi->q_vectors[i]) in ice_vsi_dis_irq()
7252 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); in ice_vsi_dis_irq()
7258 if (vsi->type == ICE_VSI_VF) in ice_vsi_dis_irq()
7262 synchronize_irq(vsi->q_vectors[i]->irq.virq); in ice_vsi_dis_irq()
7266 * ice_down - Shutdown the connection
7269 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7275 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); in ice_down()
7277 if (vsi->netdev) { in ice_down()
7279 ice_ptp_link_change(vsi->back, false); in ice_down()
7280 netif_carrier_off(vsi->netdev); in ice_down()
7281 netif_tx_disable(vsi->netdev); in ice_down()
7288 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", in ice_down()
7289 vsi->vsi_num, tx_err); in ice_down()
7290 if (!tx_err && vsi->xdp_rings) { in ice_down()
7293 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", in ice_down()
7294 vsi->vsi_num, tx_err); in ice_down()
7299 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", in ice_down()
7300 vsi->vsi_num, rx_err); in ice_down()
7305 ice_clean_tx_ring(vsi->tx_rings[i]); in ice_down()
7307 if (vsi->xdp_rings) in ice_down()
7309 ice_clean_tx_ring(vsi->xdp_rings[i]); in ice_down()
7312 ice_clean_rx_ring(vsi->rx_rings[i]); in ice_down()
7315 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", in ice_down()
7316 vsi->vsi_num, vsi->vsw->sw_id); in ice_down()
7317 return -EIO; in ice_down()
7324 * ice_down_up - shutdown the VSI connection and bring it up
7332 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_down_up()
7341 …netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to … in ice_down_up()
7349 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7358 if (!vsi->num_txq) { in ice_vsi_setup_tx_rings()
7359 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", in ice_vsi_setup_tx_rings()
7360 vsi->vsi_num); in ice_vsi_setup_tx_rings()
7361 return -EINVAL; in ice_vsi_setup_tx_rings()
7365 struct ice_tx_ring *ring = vsi->tx_rings[i]; in ice_vsi_setup_tx_rings()
7368 return -EINVAL; in ice_vsi_setup_tx_rings()
7370 if (vsi->netdev) in ice_vsi_setup_tx_rings()
7371 ring->netdev = vsi->netdev; in ice_vsi_setup_tx_rings()
7381 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7390 if (!vsi->num_rxq) { in ice_vsi_setup_rx_rings()
7391 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", in ice_vsi_setup_rx_rings()
7392 vsi->vsi_num); in ice_vsi_setup_rx_rings()
7393 return -EINVAL; in ice_vsi_setup_rx_rings()
7397 struct ice_rx_ring *ring = vsi->rx_rings[i]; in ice_vsi_setup_rx_rings()
7400 return -EINVAL; in ice_vsi_setup_rx_rings()
7402 if (vsi->netdev) in ice_vsi_setup_rx_rings()
7403 ring->netdev = vsi->netdev; in ice_vsi_setup_rx_rings()
7413 * ice_vsi_open_ctrl - open control VSI for use
7423 struct ice_pf *pf = vsi->back; in ice_vsi_open_ctrl()
7441 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", in ice_vsi_open_ctrl()
7453 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_open_ctrl()
7469 * ice_vsi_open - Called when a network interface is made active
7479 struct ice_pf *pf = vsi->back; in ice_vsi_open()
7495 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", in ice_vsi_open()
7496 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); in ice_vsi_open()
7501 if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs)) in ice_vsi_open()
7502 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_vsi_open()
7504 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) { in ice_vsi_open()
7506 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); in ice_vsi_open()
7510 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); in ice_vsi_open()
7536 * ice_vsi_release_all - Delete all VSIs
7543 if (!pf->vsi) in ice_vsi_release_all()
7547 if (!pf->vsi[i]) in ice_vsi_release_all()
7550 if (pf->vsi[i]->type == ICE_VSI_CHNL) in ice_vsi_release_all()
7553 err = ice_vsi_release(pf->vsi[i]); in ice_vsi_release_all()
7555 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", in ice_vsi_release_all()
7556 i, err, pf->vsi[i]->vsi_num); in ice_vsi_release_all()
7561 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7565 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7573 struct ice_vsi *vsi = pf->vsi[i]; in ice_vsi_rebuild_by_type()
7575 if (!vsi || vsi->type != type) in ice_vsi_rebuild_by_type()
7582 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7587 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7590 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7594 /* Re-map HW VSI number, using VSI handle that has been in ice_vsi_rebuild_by_type()
7597 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7603 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7607 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, in ice_vsi_rebuild_by_type()
7615 * ice_update_pf_netdev_link - Update PF netdev link status
7624 struct ice_vsi *vsi = pf->vsi[i]; in ice_update_pf_netdev_link()
7626 if (!vsi || vsi->type != ICE_VSI_PF) in ice_update_pf_netdev_link()
7629 ice_get_link_status(pf->vsi[i]->port_info, &link_up); in ice_update_pf_netdev_link()
7631 netif_carrier_on(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7632 netif_tx_wake_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7634 netif_carrier_off(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7635 netif_tx_stop_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7641 * ice_rebuild - rebuild after reset
7643 * @reset_type: type of reset
7648 * to reset/rebuild all the VF VSI twice.
7654 struct ice_hw *hw = &pf->hw; in ice_rebuild()
7658 if (test_bit(ICE_DOWN, pf->state)) in ice_rebuild()
7665 /* If an EMP reset has occurred, any previously pending flash in ice_rebuild()
7667 * not the NVM update EMP reset is restricted. in ice_rebuild()
7669 pf->fw_emp_reset_disabled = false; in ice_rebuild()
7686 /* Reload DDP Package after CORER/GLOBR reset */ in ice_rebuild()
7718 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_rebuild()
7722 err = ice_sched_init_port(hw->port_info); in ice_rebuild()
7733 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7738 guar = hw->func_caps.fd_fltr_guar; in ice_rebuild()
7739 b_effort = hw->func_caps.fd_fltr_best_effort; in ice_rebuild()
7748 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_rebuild()
7755 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_rebuild()
7778 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7786 if (hw->fdir_prof) in ice_rebuild()
7795 if (vsi && vsi->netdev) in ice_rebuild()
7796 netif_device_attach(vsi->netdev); in ice_rebuild()
7810 /* if we get here, reset flow is successful */ in ice_rebuild()
7811 clear_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7828 set_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7831 set_bit(ICE_NEEDS_RESTART, pf->state); in ice_rebuild()
7836 * ice_change_mtu - NDO callback to change the MTU
7845 struct ice_vsi *vsi = np->vsi; in ice_change_mtu()
7846 struct ice_pf *pf = vsi->back; in ice_change_mtu()
7851 if (new_mtu == (int)netdev->mtu) { in ice_change_mtu()
7852 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); in ice_change_mtu()
7856 prog = vsi->xdp_prog; in ice_change_mtu()
7857 if (prog && !prog->aux->xdp_has_frags) { in ice_change_mtu()
7862 frame_size - ICE_ETH_PKT_HDR_PAD); in ice_change_mtu()
7863 return -EINVAL; in ice_change_mtu()
7865 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { in ice_change_mtu()
7867 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", in ice_change_mtu()
7868 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); in ice_change_mtu()
7869 return -EINVAL; in ice_change_mtu()
7873 /* if a reset is in progress, wait for some time for it to complete */ in ice_change_mtu()
7875 if (ice_is_reset_in_progress(pf->state)) { in ice_change_mtu()
7886 return -EBUSY; in ice_change_mtu()
7889 WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu); in ice_change_mtu()
7895 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); in ice_change_mtu()
7901 * ice_set_rss_lut - Set RSS LUT
7911 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_lut()
7915 return -EINVAL; in ice_set_rss_lut()
7917 params.vsi_handle = vsi->idx; in ice_set_rss_lut()
7919 params.lut_type = vsi->rss_lut_type; in ice_set_rss_lut()
7924 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", in ice_set_rss_lut()
7925 status, libie_aq_str(hw->adminq.sq_last_status)); in ice_set_rss_lut()
7931 * ice_set_rss_key - Set RSS key
7939 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_key()
7943 return -EINVAL; in ice_set_rss_key()
7945 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_set_rss_key()
7947 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", in ice_set_rss_key()
7948 status, libie_aq_str(hw->adminq.sq_last_status)); in ice_set_rss_key()
7954 * ice_get_rss_lut - Get RSS LUT
7964 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_lut()
7968 return -EINVAL; in ice_get_rss_lut()
7970 params.vsi_handle = vsi->idx; in ice_get_rss_lut()
7972 params.lut_type = vsi->rss_lut_type; in ice_get_rss_lut()
7977 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", in ice_get_rss_lut()
7978 status, libie_aq_str(hw->adminq.sq_last_status)); in ice_get_rss_lut()
7984 * ice_get_rss_key - Get RSS key
7992 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_key()
7996 return -EINVAL; in ice_get_rss_key()
7998 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_get_rss_key()
8000 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", in ice_get_rss_key()
8001 status, libie_aq_str(hw->adminq.sq_last_status)); in ice_get_rss_key()
8007 * ice_set_rss_hfunc - Set RSS HASH function
8015 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_hfunc()
8020 if (hfunc == vsi->rss_hfunc) in ice_set_rss_hfunc()
8025 return -EOPNOTSUPP; in ice_set_rss_hfunc()
8029 return -ENOMEM; in ice_set_rss_hfunc()
8031 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); in ice_set_rss_hfunc()
8032 ctx->info.q_opt_rss = vsi->info.q_opt_rss; in ice_set_rss_hfunc()
8033 ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; in ice_set_rss_hfunc()
8034 ctx->info.q_opt_rss |= in ice_set_rss_hfunc()
8036 ctx->info.q_opt_tc = vsi->info.q_opt_tc; in ice_set_rss_hfunc()
8037 ctx->info.q_opt_flags = vsi->info.q_opt_rss; in ice_set_rss_hfunc()
8039 err = ice_update_vsi(hw, vsi->idx, ctx, NULL); in ice_set_rss_hfunc()
8041 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", in ice_set_rss_hfunc()
8042 vsi->vsi_num, err); in ice_set_rss_hfunc()
8044 vsi->info.q_opt_rss = ctx->info.q_opt_rss; in ice_set_rss_hfunc()
8045 vsi->rss_hfunc = hfunc; in ice_set_rss_hfunc()
8046 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", in ice_set_rss_hfunc()
8060 * ice_bridge_getlink - Get the hardware bridge mode
8075 struct ice_vsi *vsi = np->vsi; in ice_bridge_getlink()
8076 struct ice_pf *pf = vsi->back; in ice_bridge_getlink()
8079 bmode = pf->first_sw->bridge_mode; in ice_bridge_getlink()
8086 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8095 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_update_bridge_mode()
8099 vsi_props = &vsi->info; in ice_vsi_update_bridge_mode()
8103 return -ENOMEM; in ice_vsi_update_bridge_mode()
8105 ctxt->info = vsi->info; in ice_vsi_update_bridge_mode()
8109 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_update_bridge_mode()
8112 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_update_bridge_mode()
8113 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); in ice_vsi_update_bridge_mode()
8115 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_update_bridge_mode()
8117 …dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\… in ice_vsi_update_bridge_mode()
8118 bmode, ret, libie_aq_str(hw->adminq.sq_last_status)); in ice_vsi_update_bridge_mode()
8122 vsi_props->sw_flags = ctxt->info.sw_flags; in ice_vsi_update_bridge_mode()
8130 * ice_bridge_setlink - Set the hardware bridge mode
8147 struct ice_pf *pf = np->vsi->back; in ice_bridge_setlink()
8149 struct ice_hw *hw = &pf->hw; in ice_bridge_setlink()
8153 pf_sw = pf->first_sw; in ice_bridge_setlink()
8157 return -EINVAL; in ice_bridge_setlink()
8163 return -EINVAL; in ice_bridge_setlink()
8165 if (mode == pf_sw->bridge_mode) in ice_bridge_setlink()
8171 if (!pf->vsi[v]) in ice_bridge_setlink()
8173 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); in ice_bridge_setlink()
8178 hw->evb_veb = (mode == BRIDGE_MODE_VEB); in ice_bridge_setlink()
8186 libie_aq_str(hw->adminq.sq_last_status)); in ice_bridge_setlink()
8187 /* revert hw->evb_veb */ in ice_bridge_setlink()
8188 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); in ice_bridge_setlink()
8192 pf_sw->bridge_mode = mode; in ice_bridge_setlink()
8199 * ice_tx_timeout - Respond to a Tx Hang
8207 struct ice_vsi *vsi = np->vsi; in ice_tx_timeout()
8208 struct ice_pf *pf = vsi->back; in ice_tx_timeout()
8211 pf->tx_timeout_count++; in ice_tx_timeout()
8215 * need to reset and rebuild in ice_tx_timeout()
8225 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_tx_timeout()
8226 if (txqueue == vsi->tx_rings[i]->q_index) { in ice_tx_timeout()
8227 tx_ring = vsi->tx_rings[i]; in ice_tx_timeout()
8231 /* Reset recovery level if enough time has elapsed after last timeout. in ice_tx_timeout()
8232 * Also ensure no new reset action happens before next timeout period. in ice_tx_timeout()
8234 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) in ice_tx_timeout()
8235 pf->tx_timeout_recovery_level = 1; in ice_tx_timeout()
8236 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + in ice_tx_timeout()
8237 netdev->watchdog_timeo))) in ice_tx_timeout()
8241 struct ice_hw *hw = &pf->hw; in ice_tx_timeout()
8245 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); in ice_tx_timeout()
8247 intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); in ice_tx_timeout()
8250 vsi->vsi_num, txqueue, tx_ring->next_to_clean, in ice_tx_timeout()
8251 head, tx_ring->next_to_use, intr); in ice_tx_timeout()
8253 ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); in ice_tx_timeout()
8256 pf->tx_timeout_last_recovery = jiffies; in ice_tx_timeout()
8258 pf->tx_timeout_recovery_level, txqueue); in ice_tx_timeout()
8260 switch (pf->tx_timeout_recovery_level) { in ice_tx_timeout()
8262 set_bit(ICE_PFR_REQ, pf->state); in ice_tx_timeout()
8265 set_bit(ICE_CORER_REQ, pf->state); in ice_tx_timeout()
8268 set_bit(ICE_GLOBR_REQ, pf->state); in ice_tx_timeout()
8272 set_bit(ICE_DOWN, pf->state); in ice_tx_timeout()
8273 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_tx_timeout()
8274 set_bit(ICE_SERVICE_DIS, pf->state); in ice_tx_timeout()
8279 pf->tx_timeout_recovery_level++; in ice_tx_timeout()
8283 * ice_setup_tc_cls_flower - flower classifier offloads
8298 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_cls_flower()
8300 if (cls_flower->common.chain_index) in ice_setup_tc_cls_flower()
8301 return -EOPNOTSUPP; in ice_setup_tc_cls_flower()
8303 switch (cls_flower->command) { in ice_setup_tc_cls_flower()
8309 return -EINVAL; in ice_setup_tc_cls_flower()
8314 * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block
8329 return ice_setup_tc_cls_flower(np, np->vsi->netdev, in ice_setup_tc_block_cb_ingress()
8332 return -EOPNOTSUPP; in ice_setup_tc_block_cb_ingress()
8337 * ice_setup_tc_block_cb_egress - callback handler for egress TC block
8352 return ice_setup_tc_cls_flower(np, np->vsi->netdev, in ice_setup_tc_block_cb_egress()
8355 return -EOPNOTSUPP; in ice_setup_tc_block_cb_egress()
8360 * ice_validate_mqprio_qopt - Validate TCF input parameters
8373 struct ice_pf *pf = vsi->back; in ice_validate_mqprio_qopt()
8380 if (vsi->type != ICE_VSI_PF) in ice_validate_mqprio_qopt()
8381 return -EINVAL; in ice_validate_mqprio_qopt()
8383 if (mqprio_qopt->qopt.offset[0] != 0 || in ice_validate_mqprio_qopt()
8384 mqprio_qopt->qopt.num_tc < 1 || in ice_validate_mqprio_qopt()
8385 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) in ice_validate_mqprio_qopt()
8386 return -EINVAL; in ice_validate_mqprio_qopt()
8389 vsi->ch_rss_size = 0; in ice_validate_mqprio_qopt()
8390 num_tc = mqprio_qopt->qopt.num_tc; in ice_validate_mqprio_qopt()
8394 int qcount = mqprio_qopt->qopt.count[i]; in ice_validate_mqprio_qopt()
8398 return -EINVAL; in ice_validate_mqprio_qopt()
8405 return -EINVAL; in ice_validate_mqprio_qopt()
8414 return -EINVAL; in ice_validate_mqprio_qopt()
8419 return -EINVAL; in ice_validate_mqprio_qopt()
8430 max_rate = mqprio_qopt->max_rate[i]; in ice_validate_mqprio_qopt()
8434 min_rate = mqprio_qopt->min_rate[i]; in ice_validate_mqprio_qopt()
8441 return -EINVAL; in ice_validate_mqprio_qopt()
8447 return -EINVAL; in ice_validate_mqprio_qopt()
8454 return -EINVAL; in ice_validate_mqprio_qopt()
8461 return -EINVAL; in ice_validate_mqprio_qopt()
8471 return -EINVAL; in ice_validate_mqprio_qopt()
8474 if (i >= mqprio_qopt->qopt.num_tc - 1) in ice_validate_mqprio_qopt()
8476 if (mqprio_qopt->qopt.offset[i + 1] != in ice_validate_mqprio_qopt()
8477 (mqprio_qopt->qopt.offset[i] + qcount)) in ice_validate_mqprio_qopt()
8478 return -EINVAL; in ice_validate_mqprio_qopt()
8480 if (vsi->num_rxq < in ice_validate_mqprio_qopt()
8481 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) in ice_validate_mqprio_qopt()
8482 return -EINVAL; in ice_validate_mqprio_qopt()
8483 if (vsi->num_txq < in ice_validate_mqprio_qopt()
8484 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) in ice_validate_mqprio_qopt()
8485 return -EINVAL; in ice_validate_mqprio_qopt()
8490 return -EINVAL; in ice_validate_mqprio_qopt()
8493 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ in ice_validate_mqprio_qopt()
8494 vsi->ch_rss_size = max_rss_q_cnt; in ice_validate_mqprio_qopt()
8500 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8511 if (!(vsi->num_gfltr || vsi->num_bfltr)) in ice_add_vsi_to_fdir()
8512 return -EINVAL; in ice_add_vsi_to_fdir()
8514 hw = &pf->hw; in ice_add_vsi_to_fdir()
8520 if (!(hw->fdir_prof && hw->fdir_prof[flow] && in ice_add_vsi_to_fdir()
8521 hw->fdir_prof[flow]->cnt)) in ice_add_vsi_to_fdir()
8529 prof = hw->fdir_prof[flow]; in ice_add_vsi_to_fdir()
8531 prof->prof_id[tun], in ice_add_vsi_to_fdir()
8532 prof->vsi_h[0], vsi->idx, in ice_add_vsi_to_fdir()
8533 prio, prof->fdir_seg[tun], in ice_add_vsi_to_fdir()
8537 vsi->idx, flow); in ice_add_vsi_to_fdir()
8541 prof->entry_h[prof->cnt][tun] = entry_h; in ice_add_vsi_to_fdir()
8545 prof->vsi_h[prof->cnt] = vsi->idx; in ice_add_vsi_to_fdir()
8546 prof->cnt++; in ice_add_vsi_to_fdir()
8549 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, in ice_add_vsi_to_fdir()
8554 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); in ice_add_vsi_to_fdir()
8560 * ice_add_channel - add a channel by adding VSI
8572 if (ch->type != ICE_VSI_CHNL) { in ice_add_channel()
8573 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); in ice_add_channel()
8574 return -EINVAL; in ice_add_channel()
8577 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); in ice_add_channel()
8578 if (!vsi || vsi->type != ICE_VSI_CHNL) { in ice_add_channel()
8580 return -EINVAL; in ice_add_channel()
8585 ch->sw_id = sw_id; in ice_add_channel()
8586 ch->vsi_num = vsi->vsi_num; in ice_add_channel()
8587 ch->info.mapping_flags = vsi->info.mapping_flags; in ice_add_channel()
8588 ch->ch_vsi = vsi; in ice_add_channel()
8590 vsi->ch = ch; in ice_add_channel()
8592 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, in ice_add_channel()
8593 sizeof(vsi->info.q_mapping)); in ice_add_channel()
8594 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, in ice_add_channel()
8595 sizeof(vsi->info.tc_mapping)); in ice_add_channel()
8611 for (i = 0; i < ch->num_txq; i++) { in ice_chnl_cfg_res()
8617 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8618 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8623 tx_ring->ch = ch; in ice_chnl_cfg_res()
8624 rx_ring->ch = ch; in ice_chnl_cfg_res()
8627 tx_q_vector = tx_ring->q_vector; in ice_chnl_cfg_res()
8628 rx_q_vector = rx_ring->q_vector; in ice_chnl_cfg_res()
8633 tx_q_vector->ch = ch; in ice_chnl_cfg_res()
8635 rc = &tx_q_vector->tx; in ice_chnl_cfg_res()
8637 ice_write_itr(rc, rc->itr_setting); in ice_chnl_cfg_res()
8640 rx_q_vector->ch = ch; in ice_chnl_cfg_res()
8642 rc = &rx_q_vector->rx; in ice_chnl_cfg_res()
8644 ice_write_itr(rc, rc->itr_setting); in ice_chnl_cfg_res()
8648 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then in ice_chnl_cfg_res()
8649 * GLINT_ITR register would have written to perform in-context in ice_chnl_cfg_res()
8652 if (ch->num_txq || ch->num_rxq) in ice_chnl_cfg_res()
8653 ice_flush(&vsi->back->hw); in ice_chnl_cfg_res()
8657 * ice_cfg_chnl_all_res - configure channel resources
8661 * This function configures channel specific resources such as flow-director
8674 * ice_setup_hw_channel - setup new channel
8691 ch->base_q = vsi->next_base_q; in ice_setup_hw_channel()
8692 ch->type = type; in ice_setup_hw_channel()
8706 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; in ice_setup_hw_channel()
8707 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, in ice_setup_hw_channel()
8708 ch->num_rxq); in ice_setup_hw_channel()
8714 * ice_setup_channel - setup new channel using uplink element
8730 if (vsi->type != ICE_VSI_PF) { in ice_setup_channel()
8731 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); in ice_setup_channel()
8735 sw_id = pf->first_sw->sw_id; in ice_setup_channel()
8745 return ch->ch_vsi ? true : false; in ice_setup_channel()
8749 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8767 * ice_create_q_channel - function to create channel
8776 struct ice_pf *pf = vsi->back; in ice_create_q_channel()
8780 return -EINVAL; in ice_create_q_channel()
8783 if (!ch->num_txq || !ch->num_rxq) { in ice_create_q_channel()
8784 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); in ice_create_q_channel()
8785 return -EINVAL; in ice_create_q_channel()
8788 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { in ice_create_q_channel()
8790 vsi->cnt_q_avail, ch->num_txq); in ice_create_q_channel()
8791 return -EINVAL; in ice_create_q_channel()
8796 return -EINVAL; in ice_create_q_channel()
8799 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { in ice_create_q_channel()
8802 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, in ice_create_q_channel()
8803 ch->min_tx_rate); in ice_create_q_channel()
8806 ch->max_tx_rate, ch->ch_vsi->vsi_num); in ice_create_q_channel()
8809 ch->max_tx_rate, ch->ch_vsi->vsi_num); in ice_create_q_channel()
8812 vsi->cnt_q_avail -= ch->num_txq; in ice_create_q_channel()
8818 * ice_rem_all_chnl_fltrs - removes all channel filters
8819 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8822 * tc-flower based filter
8831 &pf->tc_flower_fltr_list, in ice_rem_all_chnl_fltrs()
8840 rule.rid = fltr->rid; in ice_rem_all_chnl_fltrs()
8841 rule.rule_id = fltr->rule_id; in ice_rem_all_chnl_fltrs()
8842 rule.vsi_handle = fltr->dest_vsi_handle; in ice_rem_all_chnl_fltrs()
8843 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); in ice_rem_all_chnl_fltrs()
8845 if (status == -ENOENT) in ice_rem_all_chnl_fltrs()
8851 } else if (fltr->dest_vsi) { in ice_rem_all_chnl_fltrs()
8853 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { in ice_rem_all_chnl_fltrs()
8854 u32 flags = fltr->flags; in ice_rem_all_chnl_fltrs()
8856 fltr->dest_vsi->num_chnl_fltr--; in ice_rem_all_chnl_fltrs()
8859 pf->num_dmac_chnl_fltrs--; in ice_rem_all_chnl_fltrs()
8863 hlist_del(&fltr->tc_flower_node); in ice_rem_all_chnl_fltrs()
8869 * ice_remove_q_channels - Remove queue channels for the TCs
8878 struct ice_pf *pf = vsi->back; in ice_remove_q_channels()
8881 /* remove all tc-flower based filter if they are channel filters only */ in ice_remove_q_channels()
8886 if (vsi->netdev->features & NETIF_F_NTUPLE) { in ice_remove_q_channels()
8887 struct ice_hw *hw = &pf->hw; in ice_remove_q_channels()
8889 mutex_lock(&hw->fdir_fltr_lock); in ice_remove_q_channels()
8891 mutex_unlock(&hw->fdir_fltr_lock); in ice_remove_q_channels()
8895 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in ice_remove_q_channels()
8898 list_del(&ch->list); in ice_remove_q_channels()
8899 ch_vsi = ch->ch_vsi; in ice_remove_q_channels()
8905 /* Reset queue contexts */ in ice_remove_q_channels()
8906 for (i = 0; i < ch->num_rxq; i++) { in ice_remove_q_channels()
8910 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_remove_q_channels()
8911 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_remove_q_channels()
8913 tx_ring->ch = NULL; in ice_remove_q_channels()
8914 if (tx_ring->q_vector) in ice_remove_q_channels()
8915 tx_ring->q_vector->ch = NULL; in ice_remove_q_channels()
8918 rx_ring->ch = NULL; in ice_remove_q_channels()
8919 if (rx_ring->q_vector) in ice_remove_q_channels()
8920 rx_ring->q_vector->ch = NULL; in ice_remove_q_channels()
8925 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); in ice_remove_q_channels()
8928 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); in ice_remove_q_channels()
8931 ice_vsi_delete(ch->ch_vsi); in ice_remove_q_channels()
8939 vsi->tc_map_vsi[i] = NULL; in ice_remove_q_channels()
8941 /* reset main VSI's all TC information */ in ice_remove_q_channels()
8942 vsi->all_enatc = 0; in ice_remove_q_channels()
8943 vsi->all_numtc = 0; in ice_remove_q_channels()
8947 * ice_rebuild_channels - rebuild channel
8966 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || in ice_rebuild_channels()
8967 main_vsi->old_numtc == 1) in ice_rebuild_channels()
8973 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); in ice_rebuild_channels()
8976 main_vsi->old_ena_tc, main_vsi->vsi_num); in ice_rebuild_channels()
8984 vsi = pf->vsi[i]; in ice_rebuild_channels()
8985 if (!vsi || vsi->type != ICE_VSI_CHNL) in ice_rebuild_channels()
8988 type = vsi->type; in ice_rebuild_channels()
8994 ice_vsi_type_str(type), vsi->idx, err); in ice_rebuild_channels()
8998 /* Re-map HW VSI number, using VSI handle that has been in ice_rebuild_channels()
9001 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_rebuild_channels()
9004 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_rebuild_channels()
9007 ice_vsi_type_str(type), err, vsi->idx); in ice_rebuild_channels()
9012 ice_vsi_type_str(type), vsi->idx); in ice_rebuild_channels()
9017 main_vsi->tc_map_vsi[tc_idx++] = vsi; in ice_rebuild_channels()
9023 list_for_each_entry(ch, &main_vsi->ch_list, list) { in ice_rebuild_channels()
9026 ch_vsi = ch->ch_vsi; in ice_rebuild_channels()
9033 /* replay BW rate limit if it is non-zero */ in ice_rebuild_channels()
9034 if (!ch->max_tx_rate && !ch->min_tx_rate) in ice_rebuild_channels()
9037 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, in ice_rebuild_channels()
9038 ch->min_tx_rate); in ice_rebuild_channels()
9041 err, ch->max_tx_rate, ch->min_tx_rate, in ice_rebuild_channels()
9042 ch_vsi->vsi_num); in ice_rebuild_channels()
9045 ch->max_tx_rate, ch->min_tx_rate, in ice_rebuild_channels()
9046 ch_vsi->vsi_num); in ice_rebuild_channels()
9050 if (main_vsi->ch_rss_size) in ice_rebuild_channels()
9061 * ice_create_q_channels - Add queue channel for the given TCs
9068 struct ice_pf *pf = vsi->back; in ice_create_q_channels()
9073 if (!(vsi->all_enatc & BIT(i))) in ice_create_q_channels()
9078 ret = -ENOMEM; in ice_create_q_channels()
9081 INIT_LIST_HEAD(&ch->list); in ice_create_q_channels()
9082 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
9083 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
9084 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; in ice_create_q_channels()
9085 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; in ice_create_q_channels()
9086 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; in ice_create_q_channels()
9089 if (ch->max_tx_rate) in ice_create_q_channels()
9090 ch->max_tx_rate = div_u64(ch->max_tx_rate, in ice_create_q_channels()
9092 if (ch->min_tx_rate) in ice_create_q_channels()
9093 ch->min_tx_rate = div_u64(ch->min_tx_rate, in ice_create_q_channels()
9103 list_add_tail(&ch->list, &vsi->ch_list); in ice_create_q_channels()
9104 vsi->tc_map_vsi[i] = ch->ch_vsi; in ice_create_q_channels()
9106 "successfully created channel: VSI %p\n", ch->ch_vsi); in ice_create_q_channels()
9117 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9125 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_mqprio_qdisc()
9126 struct ice_pf *pf = vsi->back; in ice_setup_tc_mqprio_qdisc()
9134 num_tcf = mqprio_qopt->qopt.num_tc; in ice_setup_tc_mqprio_qdisc()
9135 hw = mqprio_qopt->qopt.hw; in ice_setup_tc_mqprio_qdisc()
9136 mode = mqprio_qopt->mode; in ice_setup_tc_mqprio_qdisc()
9138 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9139 vsi->ch_rss_size = 0; in ice_setup_tc_mqprio_qdisc()
9140 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
9151 if (pf->hw.port_info->is_custom_tx_enabled) { in ice_setup_tc_mqprio_qdisc()
9153 return -EBUSY; in ice_setup_tc_mqprio_qdisc()
9163 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
9164 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9169 if (vsi->netdev->features & NETIF_F_HW_TC) in ice_setup_tc_mqprio_qdisc()
9170 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_setup_tc_mqprio_qdisc()
9173 return -EINVAL; in ice_setup_tc_mqprio_qdisc()
9179 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && in ice_setup_tc_mqprio_qdisc()
9186 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_setup_tc_mqprio_qdisc()
9189 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9190 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), in ice_setup_tc_mqprio_qdisc()
9192 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), in ice_setup_tc_mqprio_qdisc()
9195 /* logic to rebuild VSI, same like ethtool -L */ in ice_setup_tc_mqprio_qdisc()
9202 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_setup_tc_mqprio_qdisc()
9203 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
9204 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
9206 vsi->req_txq = offset + qcount_tx; in ice_setup_tc_mqprio_qdisc()
9207 vsi->req_rxq = offset + qcount_rx; in ice_setup_tc_mqprio_qdisc()
9210 * form ice_vsi_rebuild during tc-qdisc delete stage - to in ice_setup_tc_mqprio_qdisc()
9213 vsi->orig_rss_size = vsi->rss_size; in ice_setup_tc_mqprio_qdisc()
9219 cur_txq = vsi->num_txq; in ice_setup_tc_mqprio_qdisc()
9220 cur_rxq = vsi->num_rxq; in ice_setup_tc_mqprio_qdisc()
9227 vsi->req_txq = cur_txq; in ice_setup_tc_mqprio_qdisc()
9228 vsi->req_rxq = cur_rxq; in ice_setup_tc_mqprio_qdisc()
9229 clear_bit(ICE_RESET_FAILED, pf->state); in ice_setup_tc_mqprio_qdisc()
9236 vsi->all_numtc = num_tcf; in ice_setup_tc_mqprio_qdisc()
9237 vsi->all_enatc = ena_tc_qdisc; in ice_setup_tc_mqprio_qdisc()
9241 vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
9245 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9246 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; in ice_setup_tc_mqprio_qdisc()
9247 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; in ice_setup_tc_mqprio_qdisc()
9260 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
9263 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
9276 if (vsi->ch_rss_size) in ice_setup_tc_mqprio_qdisc()
9280 /* if error, reset the all_numtc and all_enatc */ in ice_setup_tc_mqprio_qdisc()
9282 vsi->all_numtc = 0; in ice_setup_tc_mqprio_qdisc()
9283 vsi->all_enatc = 0; in ice_setup_tc_mqprio_qdisc()
9292 * ice_cfg_txtime - configure Tx Time for the Tx ring
9306 return -EINVAL; in ice_cfg_txtime()
9308 vsi = tx_ring->vsi; in ice_cfg_txtime()
9309 pf = vsi->back; in ice_cfg_txtime()
9310 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { in ice_cfg_txtime()
9311 timeout--; in ice_cfg_txtime()
9313 return -EBUSY; in ice_cfg_txtime()
9317 queue = tx_ring->q_index; in ice_cfg_txtime()
9328 clear_bit(ICE_CFG_BUSY, pf->state); in ice_cfg_txtime()
9333 * ice_offload_txtime - set earliest TxTime first
9343 struct ice_pf *pf = np->vsi->back; in ice_offload_txtime()
9345 struct ice_vsi *vsi = np->vsi; in ice_offload_txtime()
9350 return -EOPNOTSUPP; in ice_offload_txtime()
9353 if (!qopt_off || qopt->queue < 0 || qopt->queue >= vsi->num_txq) in ice_offload_txtime()
9354 return -EINVAL; in ice_offload_txtime()
9356 if (qopt->enable) in ice_offload_txtime()
9357 set_bit(qopt->queue, pf->txtime_txqs); in ice_offload_txtime()
9359 clear_bit(qopt->queue, pf->txtime_txqs); in ice_offload_txtime()
9361 if (netif_running(vsi->netdev)) { in ice_offload_txtime()
9362 tx_ring = vsi->tx_rings[qopt->queue]; in ice_offload_txtime()
9369 str_enable_disable(qopt->enable), qopt->queue); in ice_offload_txtime()
9374 str_enable_disable(qopt->enable), qopt->queue); in ice_offload_txtime()
9376 if (qopt->enable) in ice_offload_txtime()
9377 clear_bit(qopt->queue, pf->txtime_txqs); in ice_offload_txtime()
9390 struct ice_pf *pf = np->vsi->back; in ice_setup_tc()
9398 ((struct flow_block_offload *)type_data)->binder_type; in ice_setup_tc()
9408 return -EOPNOTSUPP; in ice_setup_tc()
9418 return -EOPNOTSUPP; in ice_setup_tc()
9421 cdev = pf->cdev_info; in ice_setup_tc()
9422 if (cdev && cdev->adev) { in ice_setup_tc()
9423 mutex_lock(&pf->adev_mutex); in ice_setup_tc()
9424 device_lock(&cdev->adev->dev); in ice_setup_tc()
9426 if (cdev->adev->dev.driver) { in ice_setup_tc()
9428 err = -EBUSY; in ice_setup_tc()
9434 mutex_lock(&pf->tc_mutex); in ice_setup_tc()
9436 mutex_unlock(&pf->tc_mutex); in ice_setup_tc()
9440 device_unlock(&cdev->adev->dev); in ice_setup_tc()
9441 mutex_unlock(&pf->adev_mutex); in ice_setup_tc()
9447 return -EOPNOTSUPP; in ice_setup_tc()
9449 return -EOPNOTSUPP; in ice_setup_tc()
9458 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { in ice_indr_block_priv_lookup()
9459 if (!cb_priv->netdev) in ice_indr_block_priv_lookup()
9461 if (cb_priv->netdev == netdev) in ice_indr_block_priv_lookup()
9472 struct ice_netdev_priv *np = priv->np; in ice_indr_setup_block_cb()
9476 return ice_setup_tc_cls_flower(np, priv->netdev, in ice_indr_setup_block_cb()
9480 return -EOPNOTSUPP; in ice_indr_setup_block_cb()
9495 vlan_dev_real_dev(netdev) == np->vsi->netdev)) in ice_indr_setup_tc_block()
9496 return -EOPNOTSUPP; in ice_indr_setup_tc_block()
9498 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) in ice_indr_setup_tc_block()
9499 return -EOPNOTSUPP; in ice_indr_setup_tc_block()
9501 switch (f->command) { in ice_indr_setup_tc_block()
9505 return -EEXIST; in ice_indr_setup_tc_block()
9509 return -ENOMEM; in ice_indr_setup_tc_block()
9511 indr_priv->netdev = netdev; in ice_indr_setup_tc_block()
9512 indr_priv->np = np; in ice_indr_setup_tc_block()
9513 list_add(&indr_priv->list, &np->tc_indr_block_priv_list); in ice_indr_setup_tc_block()
9523 list_del(&indr_priv->list); in ice_indr_setup_tc_block()
9528 list_add_tail(&block_cb->driver_list, &ice_block_cb_list); in ice_indr_setup_tc_block()
9533 return -ENOENT; in ice_indr_setup_tc_block()
9535 block_cb = flow_block_cb_lookup(f->block, in ice_indr_setup_tc_block()
9539 return -ENOENT; in ice_indr_setup_tc_block()
9543 list_del(&block_cb->driver_list); in ice_indr_setup_tc_block()
9546 return -EOPNOTSUPP; in ice_indr_setup_tc_block()
9563 return -EOPNOTSUPP; in ice_indr_setup_tc_cb()
9568 * ice_open - Called when a network interface becomes active
9582 struct ice_pf *pf = np->vsi->back; in ice_open()
9584 if (ice_is_reset_in_progress(pf->state)) { in ice_open()
9585 netdev_err(netdev, "can't open net device while reset is in progress"); in ice_open()
9586 return -EBUSY; in ice_open()
9593 * ice_open_internal - Called when a network interface becomes active
9596 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9604 struct ice_vsi *vsi = np->vsi; in ice_open_internal()
9605 struct ice_pf *pf = vsi->back; in ice_open_internal()
9609 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_open_internal()
9611 return -EIO; in ice_open_internal()
9616 pi = vsi->port_info; in ice_open_internal()
9623 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_open_internal()
9626 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { in ice_open_internal()
9627 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9628 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { in ice_open_internal()
9644 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9651 vsi->vsi_num, vsi->vsw->sw_id); in ice_open_internal()
9660 * ice_stop - Disables a network interface
9663 * The stop entry point is called when an interface is de-activated by the OS,
9667 * Returns success only - not allowed to fail
9672 struct ice_vsi *vsi = np->vsi; in ice_stop()
9673 struct ice_pf *pf = vsi->back; in ice_stop()
9675 if (ice_is_reset_in_progress(pf->state)) { in ice_stop()
9676 netdev_err(netdev, "can't stop net device while reset is in progress"); in ice_stop()
9677 return -EBUSY; in ice_stop()
9680 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { in ice_stop()
9684 if (link_err == -ENOMEDIUM) in ice_stop()
9685 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", in ice_stop()
9686 vsi->vsi_num); in ice_stop()
9688 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", in ice_stop()
9689 vsi->vsi_num, link_err); in ice_stop()
9692 return -EIO; in ice_stop()
9702 * ice_features_check - Validate encapsulated packet conforms to limits
9719 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_features_check()
9725 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) in ice_features_check()
9736 if (skb->encapsulation) { in ice_features_check()
9742 if (gso && (skb_shinfo(skb)->gso_type & in ice_features_check()
9744 len = skb_inner_network_header(skb) - in ice_features_check()