Lines Matching full:pf

67 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);  in ice_hw_to_dev()  local
69 return &pf->pdev->dev; in ice_hw_to_dev()
77 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
79 static void ice_vsi_release_all(struct ice_pf *pf);
81 static int ice_rebuild_channels(struct ice_pf *pf);
115 * @pf: pointer to PF struct
117 static void ice_check_for_hang_subtask(struct ice_pf *pf) in ice_check_for_hang_subtask() argument
125 ice_for_each_vsi(pf, v) in ice_check_for_hang_subtask()
126 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
127 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
179 * @pf: board private structure
181 * Set initial set of MAC filters for PF VSI; configure filters for permanent
185 static int ice_init_mac_fltr(struct ice_pf *pf) in ice_init_mac_fltr() argument
190 vsi = ice_get_main_vsi(pf); in ice_init_mac_fltr()
263 * ice_set_promisc - Enable promiscuous mode for a given PF
292 * ice_clear_promisc - Disable promiscuous mode for a given PF
330 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr() local
331 struct ice_hw *hw = &pf->hw; in ice_vsi_sync_fltr()
485 * @pf: board private structure
487 static void ice_sync_fltr_subtask(struct ice_pf *pf) in ice_sync_fltr_subtask() argument
491 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) in ice_sync_fltr_subtask()
494 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
496 ice_for_each_vsi(pf, v) in ice_sync_fltr_subtask()
497 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && in ice_sync_fltr_subtask()
498 ice_vsi_sync_fltr(pf->vsi[v])) { in ice_sync_fltr_subtask()
500 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
506 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
507 * @pf: the PF
510 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) in ice_pf_dis_all_vsi() argument
515 ice_for_each_vsi(pf, v) in ice_pf_dis_all_vsi()
516 if (pf->vsi[v]) in ice_pf_dis_all_vsi()
517 ice_dis_vsi(pf->vsi[v], locked); in ice_pf_dis_all_vsi()
520 pf->pf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
523 pf->vf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
528 * @pf: board private structure
534 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_prepare_for_reset() argument
536 struct ice_hw *hw = &pf->hw; in ice_prepare_for_reset()
541 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); in ice_prepare_for_reset()
544 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) in ice_prepare_for_reset()
547 synchronize_irq(pf->oicr_irq.virq); in ice_prepare_for_reset()
549 ice_unplug_aux_dev(pf); in ice_prepare_for_reset()
553 ice_vc_notify_reset(pf); in ice_prepare_for_reset()
556 mutex_lock(&pf->vfs.table_lock); in ice_prepare_for_reset()
557 ice_for_each_vf(pf, bkt, vf) in ice_prepare_for_reset()
559 mutex_unlock(&pf->vfs.table_lock); in ice_prepare_for_reset()
561 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_prepare_for_reset()
563 ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge); in ice_prepare_for_reset()
568 vsi = ice_get_main_vsi(pf); in ice_prepare_for_reset()
577 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_prepare_for_reset()
593 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_prepare_for_reset()
605 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); in ice_prepare_for_reset()
606 ice_pf_dis_all_vsi(pf, false); in ice_prepare_for_reset()
608 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_prepare_for_reset()
609 ice_ptp_prepare_for_reset(pf, reset_type); in ice_prepare_for_reset()
611 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_prepare_for_reset()
612 ice_gnss_exit(pf); in ice_prepare_for_reset()
619 set_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_prepare_for_reset()
624 * @pf: board private structure
627 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_do_reset() argument
629 struct device *dev = ice_pf_to_dev(pf); in ice_do_reset()
630 struct ice_hw *hw = &pf->hw; in ice_do_reset()
634 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { in ice_do_reset()
639 ice_prepare_for_reset(pf, reset_type); in ice_do_reset()
644 set_bit(ICE_RESET_FAILED, pf->state); in ice_do_reset()
645 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_do_reset()
646 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
647 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
648 clear_bit(ICE_CORER_REQ, pf->state); in ice_do_reset()
649 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_do_reset()
650 wake_up(&pf->reset_wait_queue); in ice_do_reset()
659 pf->pfr_count++; in ice_do_reset()
660 ice_rebuild(pf, reset_type); in ice_do_reset()
661 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
662 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
663 wake_up(&pf->reset_wait_queue); in ice_do_reset()
664 ice_reset_all_vfs(pf); in ice_do_reset()
670 * @pf: board private structure
672 static void ice_reset_subtask(struct ice_pf *pf) in ice_reset_subtask() argument
678 * of reset is pending and sets bits in pf->state indicating the reset in ice_reset_subtask()
680 * prepare for pending reset if not already (for PF software-initiated in ice_reset_subtask()
686 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_reset_subtask()
688 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) in ice_reset_subtask()
690 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) in ice_reset_subtask()
692 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) in ice_reset_subtask()
697 ice_prepare_for_reset(pf, reset_type); in ice_reset_subtask()
700 if (ice_check_reset(&pf->hw)) { in ice_reset_subtask()
701 set_bit(ICE_RESET_FAILED, pf->state); in ice_reset_subtask()
704 pf->hw.reset_ongoing = false; in ice_reset_subtask()
705 ice_rebuild(pf, reset_type); in ice_reset_subtask()
709 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_reset_subtask()
710 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_reset_subtask()
711 clear_bit(ICE_PFR_REQ, pf->state); in ice_reset_subtask()
712 clear_bit(ICE_CORER_REQ, pf->state); in ice_reset_subtask()
713 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_reset_subtask()
714 wake_up(&pf->reset_wait_queue); in ice_reset_subtask()
715 ice_reset_all_vfs(pf); in ice_reset_subtask()
722 if (test_bit(ICE_PFR_REQ, pf->state)) { in ice_reset_subtask()
724 if (pf->lag && pf->lag->bonded) { in ice_reset_subtask()
725 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n"); in ice_reset_subtask()
729 if (test_bit(ICE_CORER_REQ, pf->state)) in ice_reset_subtask()
731 if (test_bit(ICE_GLOBR_REQ, pf->state)) in ice_reset_subtask()
738 if (!test_bit(ICE_DOWN, pf->state) && in ice_reset_subtask()
739 !test_bit(ICE_CFG_BUSY, pf->state)) { in ice_reset_subtask()
740 ice_do_reset(pf, reset_type); in ice_reset_subtask()
936 * @pf: private PF struct
945 static void ice_set_dflt_mib(struct ice_pf *pf) in ice_set_dflt_mib() argument
947 struct device *dev = ice_pf_to_dev(pf); in ice_set_dflt_mib()
951 struct ice_hw *hw = &pf->hw; in ice_set_dflt_mib()
1024 * @pf: pointer to PF struct
1029 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) in ice_check_phy_fw_load() argument
1032 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1036 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) in ice_check_phy_fw_load()
1040 …dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and… in ice_check_phy_fw_load()
1041 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1047 * @pf: pointer to PF struct
1053 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) in ice_check_module_power() argument
1058 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1065 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) in ice_check_module_power()
1069 …dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cann… in ice_check_module_power()
1070 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1072 …dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cann… in ice_check_module_power()
1073 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1079 * @pf: pointer to the PF struct
1085 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) in ice_check_link_cfg_err() argument
1087 ice_check_module_power(pf, link_cfg_err); in ice_check_link_cfg_err()
1088 ice_check_phy_fw_load(pf, link_cfg_err); in ice_check_link_cfg_err()
1093 * @pf: PF that the link event is associated with
1101 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, in ice_link_event() argument
1104 struct device *dev = ice_pf_to_dev(pf); in ice_link_event()
1126 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_link_event()
1134 vsi = ice_get_main_vsi(pf); in ice_link_event()
1139 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && in ice_link_event()
1141 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_link_event()
1150 pf->link_down_events++; in ice_link_event()
1152 ice_ptp_link_change(pf, link_up); in ice_link_event()
1154 if (ice_is_dcb_active(pf)) { in ice_link_event()
1155 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_link_event()
1156 ice_dcb_rebuild(pf); in ice_link_event()
1159 ice_set_dflt_mib(pf); in ice_link_event()
1164 ice_vc_notify_link_state(pf); in ice_link_event()
1171 * @pf: board private structure
1173 static void ice_watchdog_subtask(struct ice_pf *pf) in ice_watchdog_subtask() argument
1178 if (test_bit(ICE_DOWN, pf->state) || in ice_watchdog_subtask()
1179 test_bit(ICE_CFG_BUSY, pf->state)) in ice_watchdog_subtask()
1184 pf->serv_tmr_prev + pf->serv_tmr_period)) in ice_watchdog_subtask()
1187 pf->serv_tmr_prev = jiffies; in ice_watchdog_subtask()
1192 ice_update_pf_stats(pf); in ice_watchdog_subtask()
1193 ice_for_each_vsi(pf, i) in ice_watchdog_subtask()
1194 if (pf->vsi[i] && pf->vsi[i]->netdev) in ice_watchdog_subtask()
1195 ice_update_vsi_stats(pf->vsi[i]); in ice_watchdog_subtask()
1229 * @pf: PF that the link event is associated with
1233 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_handle_link_event() argument
1240 port_info = pf->hw.port_info; in ice_handle_link_event()
1244 status = ice_link_event(pf, port_info, in ice_handle_link_event()
1248 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", in ice_handle_link_event()
1256 * @pf: pointer to the PF private structure
1261 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1270 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, in ice_aq_prep_for_event() argument
1277 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1278 hlist_add_head(&task->entry, &pf->aq_wait_list); in ice_aq_prep_for_event()
1279 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1284 * @pf: pointer to the PF private structure
1288 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1294 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, in ice_aq_wait_for_event() argument
1298 struct device *dev = ice_pf_to_dev(pf); in ice_aq_wait_for_event()
1303 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, in ice_aq_wait_for_event()
1331 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1333 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1340 * @pf: pointer to the PF private structure
1356 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, in ice_aq_check_events() argument
1363 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1364 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { in ice_aq_check_events()
1384 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1387 wake_up(&pf->aq_wait_queue); in ice_aq_check_events()
1392 * @pf: the PF private structure
1397 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) in ice_aq_cancel_waiting_tasks() argument
1401 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1402 hlist_for_each_entry(task, &pf->aq_wait_list, entry) in ice_aq_cancel_waiting_tasks()
1404 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1406 wake_up(&pf->aq_wait_queue); in ice_aq_cancel_waiting_tasks()
1413 * @pf: ptr to struct ice_pf
1416 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) in __ice_clean_ctrlq() argument
1418 struct device *dev = ice_pf_to_dev(pf); in __ice_clean_ctrlq()
1420 struct ice_hw *hw = &pf->hw; in __ice_clean_ctrlq()
1426 /* Do not clean control queue if/when PF reset fails */ in __ice_clean_ctrlq()
1427 if (test_bit(ICE_RESET_FAILED, pf->state)) in __ice_clean_ctrlq()
1517 ice_aq_check_events(pf, opcode, &event); in __ice_clean_ctrlq()
1521 if (ice_handle_link_event(pf, &event)) in __ice_clean_ctrlq()
1525 ice_vf_lan_overflow_event(pf, &event); in __ice_clean_ctrlq()
1528 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) { in __ice_clean_ctrlq()
1529 ice_vc_process_vf_msg(pf, &event, NULL); in __ice_clean_ctrlq()
1540 ice_vc_process_vf_msg(pf, &event, &data); in __ice_clean_ctrlq()
1548 ice_dcb_process_lldp_set_mib_change(pf, &event); in __ice_clean_ctrlq()
1551 ice_process_health_status_event(pf, &event); in __ice_clean_ctrlq()
1582 * @pf: board private structure
1584 static void ice_clean_adminq_subtask(struct ice_pf *pf) in ice_clean_adminq_subtask() argument
1586 struct ice_hw *hw = &pf->hw; in ice_clean_adminq_subtask()
1588 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_clean_adminq_subtask()
1591 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) in ice_clean_adminq_subtask()
1594 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_clean_adminq_subtask()
1602 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); in ice_clean_adminq_subtask()
1609 * @pf: board private structure
1611 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) in ice_clean_mailboxq_subtask() argument
1613 struct ice_hw *hw = &pf->hw; in ice_clean_mailboxq_subtask()
1615 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) in ice_clean_mailboxq_subtask()
1618 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) in ice_clean_mailboxq_subtask()
1621 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_clean_mailboxq_subtask()
1624 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); in ice_clean_mailboxq_subtask()
1631 * @pf: board private structure
1633 static void ice_clean_sbq_subtask(struct ice_pf *pf) in ice_clean_sbq_subtask() argument
1635 struct ice_hw *hw = &pf->hw; in ice_clean_sbq_subtask()
1641 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1645 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) in ice_clean_sbq_subtask()
1648 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) in ice_clean_sbq_subtask()
1651 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1654 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); in ice_clean_sbq_subtask()
1661 * @pf: board private structure
1665 void ice_service_task_schedule(struct ice_pf *pf) in ice_service_task_schedule() argument
1667 if (!test_bit(ICE_SERVICE_DIS, pf->state) && in ice_service_task_schedule()
1668 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && in ice_service_task_schedule()
1669 !test_bit(ICE_NEEDS_RESTART, pf->state)) in ice_service_task_schedule()
1670 queue_work(ice_wq, &pf->serv_task); in ice_service_task_schedule()
1675 * @pf: board private structure
1677 static void ice_service_task_complete(struct ice_pf *pf) in ice_service_task_complete() argument
1679 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); in ice_service_task_complete()
1681 /* force memory (pf->state) to sync before next service task */ in ice_service_task_complete()
1683 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_complete()
1688 * @pf: board private structure
1693 static int ice_service_task_stop(struct ice_pf *pf) in ice_service_task_stop() argument
1697 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_stop()
1699 if (pf->serv_tmr.function) in ice_service_task_stop()
1700 timer_delete_sync(&pf->serv_tmr); in ice_service_task_stop()
1701 if (pf->serv_task.func) in ice_service_task_stop()
1702 cancel_work_sync(&pf->serv_task); in ice_service_task_stop()
1704 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_stop()
1710 * @pf: board private structure
1714 static void ice_service_task_restart(struct ice_pf *pf) in ice_service_task_restart() argument
1716 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_restart()
1717 ice_service_task_schedule(pf); in ice_service_task_restart()
1726 struct ice_pf *pf = timer_container_of(pf, t, serv_tmr); in ice_service_timer() local
1728 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); in ice_service_timer()
1729 ice_service_task_schedule(pf); in ice_service_timer()
1734 * @pf: pointer to the PF structure
1739 * Since the queue can get stuck on VF MDD events, the PF can be configured to
1743 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf, in ice_mdd_maybe_reset_vf() argument
1746 struct device *dev = ice_pf_to_dev(pf); in ice_mdd_maybe_reset_vf()
1748 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) in ice_mdd_maybe_reset_vf()
1760 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", in ice_mdd_maybe_reset_vf()
1761 pf->hw.pf_id, vf->vf_id); in ice_mdd_maybe_reset_vf()
1767 * @pf: pointer to the PF structure
1770 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1772 * disable the queue, the PF can be configured to reset the VF using ethtool
1775 static void ice_handle_mdd_event(struct ice_pf *pf) in ice_handle_mdd_event() argument
1777 struct device *dev = ice_pf_to_dev(pf); in ice_handle_mdd_event()
1778 struct ice_hw *hw = &pf->hw; in ice_handle_mdd_event()
1783 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { in ice_handle_mdd_event()
1787 ice_print_vfs_mdd_events(pf); in ice_handle_mdd_event()
1799 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1800 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", in ice_handle_mdd_event()
1802 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num, in ice_handle_mdd_event()
1814 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1815 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", in ice_handle_mdd_event()
1817 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num, in ice_handle_mdd_event()
1829 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1830 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", in ice_handle_mdd_event()
1832 ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event, in ice_handle_mdd_event()
1837 /* check to see if this PF caused an MDD event */ in ice_handle_mdd_event()
1841 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1842 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); in ice_handle_mdd_event()
1848 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1849 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); in ice_handle_mdd_event()
1855 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1856 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); in ice_handle_mdd_event()
1862 mutex_lock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1863 ice_for_each_vf(pf, bkt, vf) { in ice_handle_mdd_event()
1870 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1871 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1882 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1883 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1894 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1895 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1906 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1907 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1915 ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx, in ice_handle_mdd_event()
1918 mutex_unlock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1920 ice_print_vfs_mdd_events(pf); in ice_handle_mdd_event()
2008 struct ice_pf *pf = pi->hw->back; in ice_init_nvm_phy_type() local
2019 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); in ice_init_nvm_phy_type()
2023 pf->nvm_phy_type_hi = pcaps->phy_type_high; in ice_init_nvm_phy_type()
2024 pf->nvm_phy_type_lo = pcaps->phy_type_low; in ice_init_nvm_phy_type()
2040 struct ice_pf *pf = pi->hw->back; in ice_init_link_dflt_override() local
2042 ldo = &pf->link_dflt_override; in ice_init_link_dflt_override()
2052 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); in ice_init_link_dflt_override()
2053 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); in ice_init_link_dflt_override()
2078 struct ice_pf *pf = pi->hw->back; in ice_init_phy_cfg_dflt_override() local
2080 ldo = &pf->link_dflt_override; in ice_init_phy_cfg_dflt_override()
2088 cfg->phy_type_low = pf->nvm_phy_type_lo & in ice_init_phy_cfg_dflt_override()
2090 cfg->phy_type_high = pf->nvm_phy_type_hi & in ice_init_phy_cfg_dflt_override()
2096 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); in ice_init_phy_cfg_dflt_override()
2117 struct ice_pf *pf = pi->hw->back; in ice_init_phy_user_cfg() local
2134 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); in ice_init_phy_user_cfg()
2144 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); in ice_init_phy_user_cfg()
2151 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { in ice_init_phy_user_cfg()
2166 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); in ice_init_phy_user_cfg()
2187 struct ice_pf *pf = vsi->back; in ice_configure_phy() local
2196 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && in ice_configure_phy()
2200 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) in ice_configure_phy()
2286 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); in ice_configure_phy()
2299 * @pf: pointer to PF struct
2304 static void ice_check_media_subtask(struct ice_pf *pf) in ice_check_media_subtask() argument
2311 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) in ice_check_media_subtask()
2314 vsi = ice_get_main_vsi(pf); in ice_check_media_subtask()
2324 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_check_media_subtask()
2327 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) in ice_check_media_subtask()
2339 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_check_media_subtask()
2349 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); in ice_service_task_recovery_mode() local
2351 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_service_task_recovery_mode()
2352 ice_clean_adminq_subtask(pf); in ice_service_task_recovery_mode()
2354 ice_service_task_complete(pf); in ice_service_task_recovery_mode()
2356 mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100)); in ice_service_task_recovery_mode()
2361 * @work: pointer to work_struct contained by the PF struct
2365 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); in ice_service_task() local
2368 if (pf->health_reporters.tx_hang_buf.tx_ring) { in ice_service_task()
2369 ice_report_tx_hang(pf); in ice_service_task()
2370 pf->health_reporters.tx_hang_buf.tx_ring = NULL; in ice_service_task()
2373 ice_reset_subtask(pf); in ice_service_task()
2376 if (ice_is_reset_in_progress(pf->state) || in ice_service_task()
2377 test_bit(ICE_SUSPENDED, pf->state) || in ice_service_task()
2378 test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_service_task()
2379 ice_service_task_complete(pf); in ice_service_task()
2383 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { in ice_service_task()
2390 swap(event->reg, pf->oicr_err_reg); in ice_service_task()
2391 ice_send_event_to_aux(pf, event); in ice_service_task()
2399 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) in ice_service_task()
2400 ice_unplug_aux_dev(pf); in ice_service_task()
2403 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) in ice_service_task()
2404 ice_plug_aux_dev(pf); in ice_service_task()
2406 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { in ice_service_task()
2412 ice_send_event_to_aux(pf, event); in ice_service_task()
2417 ice_clean_adminq_subtask(pf); in ice_service_task()
2418 ice_check_media_subtask(pf); in ice_service_task()
2419 ice_check_for_hang_subtask(pf); in ice_service_task()
2420 ice_sync_fltr_subtask(pf); in ice_service_task()
2421 ice_handle_mdd_event(pf); in ice_service_task()
2422 ice_watchdog_subtask(pf); in ice_service_task()
2424 if (ice_is_safe_mode(pf)) { in ice_service_task()
2425 ice_service_task_complete(pf); in ice_service_task()
2429 ice_process_vflr_event(pf); in ice_service_task()
2430 ice_clean_mailboxq_subtask(pf); in ice_service_task()
2431 ice_clean_sbq_subtask(pf); in ice_service_task()
2432 ice_sync_arfs_fltrs(pf); in ice_service_task()
2433 ice_flush_fdir_ctx(pf); in ice_service_task()
2436 ice_service_task_complete(pf); in ice_service_task()
2442 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || in ice_service_task()
2443 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || in ice_service_task()
2444 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_service_task()
2445 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || in ice_service_task()
2446 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || in ice_service_task()
2447 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || in ice_service_task()
2448 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_service_task()
2449 mod_timer(&pf->serv_tmr, jiffies); in ice_service_task()
2474 * @pf: board private structure
2477 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) in ice_schedule_reset() argument
2479 struct device *dev = ice_pf_to_dev(pf); in ice_schedule_reset()
2482 if (test_bit(ICE_RESET_FAILED, pf->state)) { in ice_schedule_reset()
2487 if (ice_is_reset_in_progress(pf->state)) { in ice_schedule_reset()
2494 set_bit(ICE_PFR_REQ, pf->state); in ice_schedule_reset()
2497 set_bit(ICE_CORER_REQ, pf->state); in ice_schedule_reset()
2500 set_bit(ICE_GLOBR_REQ, pf->state); in ice_schedule_reset()
2506 ice_service_task_schedule(pf); in ice_schedule_reset()
2534 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix() local
2541 dev = ice_pf_to_dev(pf); in ice_vsi_req_irq_msix()
2756 struct ice_pf *pf = vsi->back; in ice_prepare_xdp_rings() local
2758 .qs_mutex = &pf->avail_q_mutex, in ice_prepare_xdp_rings()
2759 .pf_map = pf->avail_txqs, in ice_prepare_xdp_rings()
2760 .pf_map_size = pf->max_pf_txqs, in ice_prepare_xdp_rings()
2770 dev = ice_pf_to_dev(pf); in ice_prepare_xdp_rings()
2833 mutex_lock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2835 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_prepare_xdp_rings()
2838 mutex_unlock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2851 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2857 struct ice_pf *pf = vsi->back; in ice_destroy_xdp_rings() local
2869 mutex_lock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2871 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_destroy_xdp_rings()
2874 mutex_unlock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2888 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); in ice_destroy_xdp_rings()
3072 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI"); in ice_xdp()
3095 * @pf: board private structure
3097 static void ice_ena_misc_vector(struct ice_pf *pf) in ice_ena_misc_vector() argument
3099 struct ice_hw *hw = &pf->hw; in ice_ena_misc_vector()
3127 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), in ice_ena_misc_vector()
3130 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_ena_misc_vector()
3133 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ena_misc_vector()
3144 struct ice_pf *pf = data; in ice_ll_ts_intr() local
3152 hw = &pf->hw; in ice_ll_ts_intr()
3153 tx = &pf->ptp.port.tx; in ice_ll_ts_intr()
3168 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ll_ts_intr()
3181 struct ice_pf *pf = (struct ice_pf *)data; in ice_misc_intr() local
3183 struct ice_hw *hw = &pf->hw; in ice_misc_intr()
3187 dev = ice_pf_to_dev(pf); in ice_misc_intr()
3188 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3189 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3190 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3197 pf->sw_int_count++; in ice_misc_intr()
3202 set_bit(ICE_MDD_EVENT_PENDING, pf->state); in ice_misc_intr()
3206 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { in ice_misc_intr()
3213 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); in ice_misc_intr()
3226 pf->corer_count++; in ice_misc_intr()
3228 pf->globr_count++; in ice_misc_intr()
3230 pf->empr_count++; in ice_misc_intr()
3235 * pf->state so that the service task can start a reset/rebuild. in ice_misc_intr()
3237 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_misc_intr()
3239 set_bit(ICE_CORER_RECV, pf->state); in ice_misc_intr()
3241 set_bit(ICE_GLOBR_RECV, pf->state); in ice_misc_intr()
3243 set_bit(ICE_EMPR_RECV, pf->state); in ice_misc_intr()
3251 * ICE_RESET_OICR_RECV in pf->state indicates in ice_misc_intr()
3265 ret = ice_ptp_ts_irq(pf); in ice_misc_intr()
3274 if (ice_pf_src_tmr_owned(pf)) { in ice_misc_intr()
3276 pf->ptp.ext_ts_irq |= gltsyn_stat & in ice_misc_intr()
3281 ice_ptp_extts_event(pf); in ice_misc_intr()
3287 pf->oicr_err_reg |= oicr; in ice_misc_intr()
3288 set_bit(ICE_AUX_ERR_PENDING, pf->state); in ice_misc_intr()
3301 set_bit(ICE_PFR_REQ, pf->state); in ice_misc_intr()
3304 ice_service_task_schedule(pf); in ice_misc_intr()
3318 struct ice_pf *pf = data; in ice_misc_intr_thread_fn() local
3321 hw = &pf->hw; in ice_misc_intr_thread_fn()
3323 if (ice_is_reset_in_progress(pf->state)) in ice_misc_intr_thread_fn()
3326 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { in ice_misc_intr_thread_fn()
3330 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { in ice_misc_intr_thread_fn()
3368 * @pf: board private structure
3370 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf) in ice_free_irq_msix_ll_ts() argument
3372 int irq_num = pf->ll_ts_irq.virq; in ice_free_irq_msix_ll_ts()
3375 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); in ice_free_irq_msix_ll_ts()
3377 ice_free_irq(pf, pf->ll_ts_irq); in ice_free_irq_msix_ll_ts()
3382 * @pf: board private structure
3384 static void ice_free_irq_msix_misc(struct ice_pf *pf) in ice_free_irq_msix_misc() argument
3386 int misc_irq_num = pf->oicr_irq.virq; in ice_free_irq_msix_misc()
3387 struct ice_hw *hw = &pf->hw; in ice_free_irq_msix_misc()
3396 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); in ice_free_irq_msix_misc()
3398 ice_free_irq(pf, pf->oicr_irq); in ice_free_irq_msix_misc()
3399 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_free_irq_msix_misc()
3400 ice_free_irq_msix_ll_ts(pf); in ice_free_irq_msix_misc()
3438 * @pf: board private structure
3444 static int ice_req_irq_msix_misc(struct ice_pf *pf) in ice_req_irq_msix_misc() argument
3446 struct device *dev = ice_pf_to_dev(pf); in ice_req_irq_msix_misc()
3447 struct ice_hw *hw = &pf->hw; in ice_req_irq_msix_misc()
3452 if (!pf->int_name[0]) in ice_req_irq_msix_misc()
3453 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", in ice_req_irq_msix_misc()
3456 if (!pf->int_name_ll_ts[0]) in ice_req_irq_msix_misc()
3457 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, in ice_req_irq_msix_misc()
3463 if (ice_is_reset_in_progress(pf->state)) in ice_req_irq_msix_misc()
3467 irq = ice_alloc_irq(pf, false); in ice_req_irq_msix_misc()
3471 pf->oicr_irq = irq; in ice_req_irq_msix_misc()
3472 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, in ice_req_irq_msix_misc()
3474 pf->int_name, pf); in ice_req_irq_msix_misc()
3477 pf->int_name, err); in ice_req_irq_msix_misc()
3478 ice_free_irq(pf, pf->oicr_irq); in ice_req_irq_msix_misc()
3483 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3486 irq = ice_alloc_irq(pf, false); in ice_req_irq_msix_misc()
3490 pf->ll_ts_irq = irq; in ice_req_irq_msix_misc()
3491 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, in ice_req_irq_msix_misc()
3492 pf->int_name_ll_ts, pf); in ice_req_irq_msix_misc()
3495 pf->int_name_ll_ts, err); in ice_req_irq_msix_misc()
3496 ice_free_irq(pf, pf->ll_ts_irq); in ice_req_irq_msix_misc()
3501 ice_ena_misc_vector(pf); in ice_req_irq_msix_misc()
3503 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); in ice_req_irq_msix_misc()
3506 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3508 ((pf->ll_ts_irq.index + pf_intr_start_offset) & in ice_req_irq_msix_misc()
3510 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), in ice_req_irq_msix_misc()
3526 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_ops() local
3528 if (ice_is_safe_mode(pf)) { in ice_set_ops()
3535 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; in ice_set_ops()
3554 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_netdev_features() local
3555 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); in ice_set_netdev_features()
3561 if (ice_is_safe_mode(pf)) { in ice_set_netdev_features()
3641 if (ice_is_feature_supported(pf, ICE_F_GCS)) in ice_set_netdev_features()
3662 * ice_pf_vsi_setup - Set up a PF VSI
3663 * @pf: board private structure
3670 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_pf_vsi_setup() argument
3678 return ice_vsi_setup(pf, &params); in ice_pf_vsi_setup()
3682 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, in ice_chnl_vsi_setup() argument
3692 return ice_vsi_setup(pf, &params); in ice_chnl_vsi_setup()
3697 * @pf: board private structure
3704 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_ctrl_vsi_setup() argument
3712 return ice_vsi_setup(pf, &params); in ice_ctrl_vsi_setup()
3717 * @pf: board private structure
3724 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_lb_vsi_setup() argument
3732 return ice_vsi_setup(pf, &params); in ice_lb_vsi_setup()
3930 * @pf: pointer to an ice_pf instance
3932 u16 ice_get_avail_txq_count(struct ice_pf *pf) in ice_get_avail_txq_count() argument
3934 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, in ice_get_avail_txq_count()
3935 pf->max_pf_txqs); in ice_get_avail_txq_count()
3940 * @pf: pointer to an ice_pf instance
3942 u16 ice_get_avail_rxq_count(struct ice_pf *pf) in ice_get_avail_rxq_count() argument
3944 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, in ice_get_avail_rxq_count()
3945 pf->max_pf_rxqs); in ice_get_avail_rxq_count()
3950 * @pf: board private structure to initialize
3952 static void ice_deinit_pf(struct ice_pf *pf) in ice_deinit_pf() argument
3954 ice_service_task_stop(pf); in ice_deinit_pf()
3955 mutex_destroy(&pf->lag_mutex); in ice_deinit_pf()
3956 mutex_destroy(&pf->adev_mutex); in ice_deinit_pf()
3957 mutex_destroy(&pf->sw_mutex); in ice_deinit_pf()
3958 mutex_destroy(&pf->tc_mutex); in ice_deinit_pf()
3959 mutex_destroy(&pf->avail_q_mutex); in ice_deinit_pf()
3960 mutex_destroy(&pf->vfs.table_lock); in ice_deinit_pf()
3962 if (pf->avail_txqs) { in ice_deinit_pf()
3963 bitmap_free(pf->avail_txqs); in ice_deinit_pf()
3964 pf->avail_txqs = NULL; in ice_deinit_pf()
3967 if (pf->avail_rxqs) { in ice_deinit_pf()
3968 bitmap_free(pf->avail_rxqs); in ice_deinit_pf()
3969 pf->avail_rxqs = NULL; in ice_deinit_pf()
3972 if (pf->txtime_txqs) { in ice_deinit_pf()
3973 bitmap_free(pf->txtime_txqs); in ice_deinit_pf()
3974 pf->txtime_txqs = NULL; in ice_deinit_pf()
3977 if (pf->ptp.clock) in ice_deinit_pf()
3978 ptp_clock_unregister(pf->ptp.clock); in ice_deinit_pf()
3980 xa_destroy(&pf->dyn_ports); in ice_deinit_pf()
3981 xa_destroy(&pf->sf_nums); in ice_deinit_pf()
3986 * @pf: pointer to the PF instance
3988 static void ice_set_pf_caps(struct ice_pf *pf) in ice_set_pf_caps() argument
3990 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; in ice_set_pf_caps()
3992 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3994 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3995 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3997 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3998 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
4000 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
4001 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, in ice_set_pf_caps()
4004 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4006 set_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4008 clear_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4015 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_set_pf_caps()
4016 set_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4017 /* force guaranteed filter pool for PF */ in ice_set_pf_caps()
4018 ice_alloc_fd_guar_item(&pf->hw, &unused, in ice_set_pf_caps()
4020 /* force shared filter pool for PF */ in ice_set_pf_caps()
4021 ice_alloc_fd_shrd_item(&pf->hw, &unused, in ice_set_pf_caps()
4025 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4027 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4029 pf->max_pf_txqs = func_caps->common_cap.num_txq; in ice_set_pf_caps()
4030 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; in ice_set_pf_caps()
4035 * @pf: board private structure to initialize
4037 static int ice_init_pf(struct ice_pf *pf) in ice_init_pf() argument
4039 ice_set_pf_caps(pf); in ice_init_pf()
4041 mutex_init(&pf->sw_mutex); in ice_init_pf()
4042 mutex_init(&pf->tc_mutex); in ice_init_pf()
4043 mutex_init(&pf->adev_mutex); in ice_init_pf()
4044 mutex_init(&pf->lag_mutex); in ice_init_pf()
4046 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_init_pf()
4047 spin_lock_init(&pf->aq_wait_lock); in ice_init_pf()
4048 init_waitqueue_head(&pf->aq_wait_queue); in ice_init_pf()
4050 init_waitqueue_head(&pf->reset_wait_queue); in ice_init_pf()
4053 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_init_pf()
4054 pf->serv_tmr_period = HZ; in ice_init_pf()
4055 INIT_WORK(&pf->serv_task, ice_service_task); in ice_init_pf()
4056 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_init_pf()
4058 mutex_init(&pf->avail_q_mutex); in ice_init_pf()
4059 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); in ice_init_pf()
4060 if (!pf->avail_txqs) in ice_init_pf()
4063 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); in ice_init_pf()
4064 if (!pf->avail_rxqs) { in ice_init_pf()
4065 bitmap_free(pf->avail_txqs); in ice_init_pf()
4066 pf->avail_txqs = NULL; in ice_init_pf()
4070 pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); in ice_init_pf()
4071 if (!pf->txtime_txqs) { in ice_init_pf()
4072 bitmap_free(pf->avail_txqs); in ice_init_pf()
4073 pf->avail_txqs = NULL; in ice_init_pf()
4074 bitmap_free(pf->avail_rxqs); in ice_init_pf()
4075 pf->avail_rxqs = NULL; in ice_init_pf()
4079 mutex_init(&pf->vfs.table_lock); in ice_init_pf()
4080 hash_init(pf->vfs.table); in ice_init_pf()
4081 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) in ice_init_pf()
4082 wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH, in ice_init_pf()
4085 ice_mbx_init_snapshot(&pf->hw); in ice_init_pf()
4087 xa_init(&pf->dyn_ports); in ice_init_pf()
4088 xa_init(&pf->sf_nums); in ice_init_pf()
4105 * word) indicates WoL is not supported on the corresponding PF ID. in ice_is_wol_supported()
4126 struct ice_pf *pf = vsi->back; in ice_vsi_recfg_qs() local
4132 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { in ice_vsi_recfg_qs()
4149 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); in ice_vsi_recfg_qs()
4165 ice_pf_dcb_recfg(pf, locked); in ice_vsi_recfg_qs()
4170 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", in ice_vsi_recfg_qs()
4173 clear_bit(ICE_CFG_BUSY, pf->state); in ice_vsi_recfg_qs()
4178 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4179 * @pf: PF to configure
4181 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4184 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) in ice_set_safe_mode_vlan_cfg() argument
4186 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_set_safe_mode_vlan_cfg()
4198 hw = &pf->hw; in ice_set_safe_mode_vlan_cfg()
4237 struct ice_pf *pf = hw->back; in ice_log_pkg_init() local
4240 dev = ice_pf_to_dev(pf); in ice_log_pkg_init()
4314 * @pf: pointer to the PF instance
4320 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) in ice_load_pkg() argument
4323 struct device *dev = ice_pf_to_dev(pf); in ice_load_pkg()
4324 struct ice_hw *hw = &pf->hw; in ice_load_pkg()
4341 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4348 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4353 * @pf: pointer to the PF structure
4359 static void ice_verify_cacheline_size(struct ice_pf *pf) in ice_verify_cacheline_size() argument
4361 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) in ice_verify_cacheline_size()
4362 …dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts… in ice_verify_cacheline_size()
4368 * @pf: PF struct
4372 static int ice_send_version(struct ice_pf *pf) in ice_send_version() argument
4382 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); in ice_send_version()
4387 * @pf: pointer to the PF instance
4391 static int ice_init_fdir(struct ice_pf *pf) in ice_init_fdir() argument
4393 struct device *dev = ice_pf_to_dev(pf); in ice_init_fdir()
4398 * Allocate it and store it in the PF. in ice_init_fdir()
4400 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); in ice_init_fdir()
4412 mutex_init(&pf->hw.fdir_fltr_lock); in ice_init_fdir()
4414 err = ice_fdir_create_dflt_rules(pf); in ice_init_fdir()
4421 ice_fdir_release_flows(&pf->hw); in ice_init_fdir()
4425 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_init_fdir()
4426 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_init_fdir()
4427 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_init_fdir()
4432 static void ice_deinit_fdir(struct ice_pf *pf) in ice_deinit_fdir() argument
4434 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); in ice_deinit_fdir()
4441 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_deinit_fdir()
4442 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_deinit_fdir()
4443 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_deinit_fdir()
4446 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); in ice_deinit_fdir()
4451 * @pf: pointer to the PF instance
4453 static char *ice_get_opt_fw_name(struct ice_pf *pf) in ice_get_opt_fw_name() argument
4458 struct pci_dev *pdev = pf->pdev; in ice_get_opt_fw_name()
4481 * @pf: pointer to the PF instance
4486 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) in ice_request_fw() argument
4488 char *opt_fw_filename = ice_get_opt_fw_name(pf); in ice_request_fw()
4489 struct device *dev = ice_pf_to_dev(pf); in ice_request_fw()
4520 struct ice_pf *pf = hw->back; in ice_init_tx_topology() local
4524 dev = ice_pf_to_dev(pf); in ice_init_tx_topology()
4554 * @pf: pointer to pf structure
4556 * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
4557 * formats the PF hardware supports. The exact list of supported RXDIDs
4565 static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf) in ice_init_supported_rxdids() argument
4567 pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1); in ice_init_supported_rxdids()
4575 pf->supported_rxdids |= BIT(i); in ice_init_supported_rxdids()
4582 * @pf: pointer to pf structure
4589 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) in ice_init_ddp_config() argument
4591 struct device *dev = ice_pf_to_dev(pf); in ice_init_ddp_config()
4595 err = ice_request_fw(pf, &firmware); in ice_init_ddp_config()
4610 ice_load_pkg(firmware, pf); in ice_init_ddp_config()
4614 ice_init_supported_rxdids(hw, pf); in ice_init_ddp_config()
4621 * @pf: pointer to the PF struct
4623 static void ice_print_wake_reason(struct ice_pf *pf) in ice_print_wake_reason() argument
4625 u32 wus = pf->wakeup_reason; in ice_print_wake_reason()
4643 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); in ice_print_wake_reason()
4725 int ice_init_dev(struct ice_pf *pf) in ice_init_dev() argument
4727 struct device *dev = ice_pf_to_dev(pf); in ice_init_dev()
4728 struct ice_hw *hw = &pf->hw; in ice_init_dev()
4731 ice_init_feature_support(pf); in ice_init_dev()
4733 err = ice_init_ddp_config(hw, pf); in ice_init_dev()
4736 * set in pf->state, which will cause ice_is_safe_mode to return in ice_init_dev()
4739 if (err || ice_is_safe_mode(pf)) { in ice_init_dev()
4748 err = ice_init_pf(pf); in ice_init_dev()
4754 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; in ice_init_dev()
4755 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; in ice_init_dev()
4756 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; in ice_init_dev()
4757 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { in ice_init_dev()
4758 pf->hw.udp_tunnel_nic.tables[0].n_entries = in ice_init_dev()
4759 pf->hw.tnl.valid_count[TNL_VXLAN]; in ice_init_dev()
4760 pf->hw.udp_tunnel_nic.tables[0].tunnel_types = in ice_init_dev()
4763 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { in ice_init_dev()
4764 pf->hw.udp_tunnel_nic.tables[1].n_entries = in ice_init_dev()
4765 pf->hw.tnl.valid_count[TNL_GENEVE]; in ice_init_dev()
4766 pf->hw.udp_tunnel_nic.tables[1].tunnel_types = in ice_init_dev()
4770 err = ice_init_interrupt_scheme(pf); in ice_init_dev()
4782 err = ice_req_irq_msix_misc(pf); in ice_init_dev()
4791 ice_clear_interrupt_scheme(pf); in ice_init_dev()
4793 ice_deinit_pf(pf); in ice_init_dev()
4797 void ice_deinit_dev(struct ice_pf *pf) in ice_deinit_dev() argument
4799 ice_free_irq_msix_misc(pf); in ice_deinit_dev()
4800 ice_deinit_pf(pf); in ice_deinit_dev()
4801 ice_deinit_hw(&pf->hw); in ice_deinit_dev()
4804 ice_reset(&pf->hw, ICE_RESET_PFR); in ice_deinit_dev()
4805 pci_wait_for_pending_transaction(pf->pdev); in ice_deinit_dev()
4806 ice_clear_interrupt_scheme(pf); in ice_deinit_dev()
4809 static void ice_init_features(struct ice_pf *pf) in ice_init_features() argument
4811 struct device *dev = ice_pf_to_dev(pf); in ice_init_features()
4813 if (ice_is_safe_mode(pf)) in ice_init_features()
4817 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_init_features()
4818 ice_ptp_init(pf); in ice_init_features()
4820 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_init_features()
4821 ice_gnss_init(pf); in ice_init_features()
4823 if (ice_is_feature_supported(pf, ICE_F_CGU) || in ice_init_features()
4824 ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) in ice_init_features()
4825 ice_dpll_init(pf); in ice_init_features()
4828 if (ice_init_fdir(pf)) in ice_init_features()
4832 if (ice_init_pf_dcb(pf, false)) { in ice_init_features()
4833 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_init_features()
4834 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); in ice_init_features()
4836 ice_cfg_lldp_mib_change(&pf->hw, true); in ice_init_features()
4839 if (ice_init_lag(pf)) in ice_init_features()
4842 ice_hwmon_init(pf); in ice_init_features()
4845 static void ice_deinit_features(struct ice_pf *pf) in ice_deinit_features() argument
4847 if (ice_is_safe_mode(pf)) in ice_deinit_features()
4850 ice_deinit_lag(pf); in ice_deinit_features()
4851 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) in ice_deinit_features()
4852 ice_cfg_lldp_mib_change(&pf->hw, false); in ice_deinit_features()
4853 ice_deinit_fdir(pf); in ice_deinit_features()
4854 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_deinit_features()
4855 ice_gnss_exit(pf); in ice_deinit_features()
4856 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_deinit_features()
4857 ice_ptp_release(pf); in ice_deinit_features()
4858 if (test_bit(ICE_FLAG_DPLL, pf->flags)) in ice_deinit_features()
4859 ice_dpll_deinit(pf); in ice_deinit_features()
4860 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) in ice_deinit_features()
4861 xa_destroy(&pf->eswitch.reprs); in ice_deinit_features()
4864 static void ice_init_wakeup(struct ice_pf *pf) in ice_init_wakeup() argument
4867 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); in ice_init_wakeup()
4870 ice_print_wake_reason(pf); in ice_init_wakeup()
4873 wr32(&pf->hw, PFPM_WUS, U32_MAX); in ice_init_wakeup()
4876 device_set_wakeup_enable(ice_pf_to_dev(pf), false); in ice_init_wakeup()
4879 static int ice_init_link(struct ice_pf *pf) in ice_init_link() argument
4881 struct device *dev = ice_pf_to_dev(pf); in ice_init_link()
4884 err = ice_init_link_events(pf->hw.port_info); in ice_init_link()
4891 err = ice_init_nvm_phy_type(pf->hw.port_info); in ice_init_link()
4896 err = ice_update_link_info(pf->hw.port_info); in ice_init_link()
4900 ice_init_link_dflt_override(pf->hw.port_info); in ice_init_link()
4902 ice_check_link_cfg_err(pf, in ice_init_link()
4903 pf->hw.port_info->phy.link_info.link_cfg_err); in ice_init_link()
4906 if (pf->hw.port_info->phy.link_info.link_info & in ice_init_link()
4909 err = ice_init_phy_user_cfg(pf->hw.port_info); in ice_init_link()
4913 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { in ice_init_link()
4914 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_init_link()
4920 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_init_link()
4926 static int ice_init_pf_sw(struct ice_pf *pf) in ice_init_pf_sw() argument
4928 bool dvm = ice_is_dvm_ena(&pf->hw); in ice_init_pf_sw()
4933 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); in ice_init_pf_sw()
4934 if (!pf->first_sw) in ice_init_pf_sw()
4937 if (pf->hw.evb_veb) in ice_init_pf_sw()
4938 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; in ice_init_pf_sw()
4940 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; in ice_init_pf_sw()
4942 pf->first_sw->pf = pf; in ice_init_pf_sw()
4945 pf->first_sw->sw_id = pf->hw.port_info->sw_id; in ice_init_pf_sw()
4947 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_init_pf_sw()
4951 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); in ice_init_pf_sw()
4961 kfree(pf->first_sw); in ice_init_pf_sw()
4965 static void ice_deinit_pf_sw(struct ice_pf *pf) in ice_deinit_pf_sw() argument
4967 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_deinit_pf_sw()
4973 kfree(pf->first_sw); in ice_deinit_pf_sw()
4976 static int ice_alloc_vsis(struct ice_pf *pf) in ice_alloc_vsis() argument
4978 struct device *dev = ice_pf_to_dev(pf); in ice_alloc_vsis()
4980 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; in ice_alloc_vsis()
4981 if (!pf->num_alloc_vsi) in ice_alloc_vsis()
4984 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in ice_alloc_vsis()
4987 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in ice_alloc_vsis()
4988 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in ice_alloc_vsis()
4991 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), in ice_alloc_vsis()
4993 if (!pf->vsi) in ice_alloc_vsis()
4996 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, in ice_alloc_vsis()
4997 sizeof(*pf->vsi_stats), GFP_KERNEL); in ice_alloc_vsis()
4998 if (!pf->vsi_stats) { in ice_alloc_vsis()
4999 devm_kfree(dev, pf->vsi); in ice_alloc_vsis()
5006 static void ice_dealloc_vsis(struct ice_pf *pf) in ice_dealloc_vsis() argument
5008 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); in ice_dealloc_vsis()
5009 pf->vsi_stats = NULL; in ice_dealloc_vsis()
5011 pf->num_alloc_vsi = 0; in ice_dealloc_vsis()
5012 devm_kfree(ice_pf_to_dev(pf), pf->vsi); in ice_dealloc_vsis()
5013 pf->vsi = NULL; in ice_dealloc_vsis()
5016 static int ice_init_devlink(struct ice_pf *pf) in ice_init_devlink() argument
5020 err = ice_devlink_register_params(pf); in ice_init_devlink()
5024 ice_devlink_init_regions(pf); in ice_init_devlink()
5025 ice_devlink_register(pf); in ice_init_devlink()
5026 ice_health_init(pf); in ice_init_devlink()
5031 static void ice_deinit_devlink(struct ice_pf *pf) in ice_deinit_devlink() argument
5033 ice_health_deinit(pf); in ice_deinit_devlink()
5034 ice_devlink_unregister(pf); in ice_deinit_devlink()
5035 ice_devlink_destroy_regions(pf); in ice_deinit_devlink()
5036 ice_devlink_unregister_params(pf); in ice_deinit_devlink()
5039 static int ice_init(struct ice_pf *pf) in ice_init() argument
5043 err = ice_init_dev(pf); in ice_init()
5047 if (pf->hw.mac_type == ICE_MAC_E830) { in ice_init()
5048 err = pci_enable_ptm(pf->pdev, NULL); in ice_init()
5050 dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n"); in ice_init()
5053 err = ice_alloc_vsis(pf); in ice_init()
5057 err = ice_init_pf_sw(pf); in ice_init()
5061 ice_init_wakeup(pf); in ice_init()
5063 err = ice_init_link(pf); in ice_init()
5067 err = ice_send_version(pf); in ice_init()
5071 ice_verify_cacheline_size(pf); in ice_init()
5073 if (ice_is_safe_mode(pf)) in ice_init()
5074 ice_set_safe_mode_vlan_cfg(pf); in ice_init()
5077 pcie_print_link_status(pf->pdev); in ice_init()
5080 clear_bit(ICE_DOWN, pf->state); in ice_init()
5081 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_init()
5084 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_init()
5089 ice_deinit_pf_sw(pf); in ice_init()
5091 ice_dealloc_vsis(pf); in ice_init()
5093 ice_deinit_dev(pf); in ice_init()
5097 static void ice_deinit(struct ice_pf *pf) in ice_deinit() argument
5099 set_bit(ICE_SERVICE_DIS, pf->state); in ice_deinit()
5100 set_bit(ICE_DOWN, pf->state); in ice_deinit()
5102 ice_deinit_pf_sw(pf); in ice_deinit()
5103 ice_dealloc_vsis(pf); in ice_deinit()
5104 ice_deinit_dev(pf); in ice_deinit()
5108 * ice_load - load pf by init hw and starting VSI
5109 * @pf: pointer to the pf instance
5113 int ice_load(struct ice_pf *pf) in ice_load() argument
5118 devl_assert_locked(priv_to_devlink(pf)); in ice_load()
5120 vsi = ice_get_main_vsi(pf); in ice_load()
5132 err = ice_init_mac_fltr(pf); in ice_load()
5136 err = ice_devlink_create_pf_port(pf); in ice_load()
5140 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in ice_load()
5152 ice_init_features(pf); in ice_load()
5154 err = ice_init_rdma(pf); in ice_load()
5158 ice_service_task_restart(pf); in ice_load()
5160 clear_bit(ICE_DOWN, pf->state); in ice_load()
5165 ice_deinit_features(pf); in ice_load()
5170 ice_devlink_destroy_pf_port(pf); in ice_load()
5178 * ice_unload - unload pf by stopping VSI and deinit hw
5179 * @pf: pointer to the pf instance
5183 void ice_unload(struct ice_pf *pf) in ice_unload() argument
5185 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_unload()
5187 devl_assert_locked(priv_to_devlink(pf)); in ice_unload()
5189 ice_deinit_rdma(pf); in ice_unload()
5190 ice_deinit_features(pf); in ice_unload()
5193 ice_devlink_destroy_pf_port(pf); in ice_unload()
5197 static int ice_probe_recovery_mode(struct ice_pf *pf) in ice_probe_recovery_mode() argument
5199 struct device *dev = ice_pf_to_dev(pf); in ice_probe_recovery_mode()
5204 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_probe_recovery_mode()
5205 spin_lock_init(&pf->aq_wait_lock); in ice_probe_recovery_mode()
5206 init_waitqueue_head(&pf->aq_wait_queue); in ice_probe_recovery_mode()
5208 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_probe_recovery_mode()
5209 pf->serv_tmr_period = HZ; in ice_probe_recovery_mode()
5210 INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode); in ice_probe_recovery_mode()
5211 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_probe_recovery_mode()
5212 err = ice_create_all_ctrlq(&pf->hw); in ice_probe_recovery_mode()
5216 scoped_guard(devl, priv_to_devlink(pf)) { in ice_probe_recovery_mode()
5217 err = ice_init_devlink(pf); in ice_probe_recovery_mode()
5222 ice_service_task_restart(pf); in ice_probe_recovery_mode()
5239 struct ice_pf *pf; in ice_probe() local
5275 pf = ice_allocate_pf(dev); in ice_probe()
5276 if (!pf) in ice_probe()
5280 pf->aux_idx = -1; in ice_probe()
5290 pf->pdev = pdev; in ice_probe()
5291 pci_set_drvdata(pdev, pf); in ice_probe()
5292 set_bit(ICE_DOWN, pf->state); in ice_probe()
5294 set_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
5296 hw = &pf->hw; in ice_probe()
5300 hw->back = pf; in ice_probe()
5311 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); in ice_probe()
5319 return ice_probe_recovery_mode(pf); in ice_probe()
5332 pf->adapter = adapter; in ice_probe()
5334 err = ice_init(pf); in ice_probe()
5338 devl_lock(priv_to_devlink(pf)); in ice_probe()
5339 err = ice_load(pf); in ice_probe()
5343 err = ice_init_devlink(pf); in ice_probe()
5346 devl_unlock(priv_to_devlink(pf)); in ice_probe()
5351 ice_unload(pf); in ice_probe()
5353 devl_unlock(priv_to_devlink(pf)); in ice_probe()
5354 ice_deinit(pf); in ice_probe()
5364 * @pf: pointer to the PF struct
5368 static void ice_set_wake(struct ice_pf *pf) in ice_set_wake() argument
5370 struct ice_hw *hw = &pf->hw; in ice_set_wake()
5371 bool wol = pf->wol_ena; in ice_set_wake()
5385 * @pf: pointer to the PF struct
5389 * wake, and that PF reset doesn't undo the LAA.
5391 static void ice_setup_mc_magic_wake(struct ice_pf *pf) in ice_setup_mc_magic_wake() argument
5393 struct device *dev = ice_pf_to_dev(pf); in ice_setup_mc_magic_wake()
5394 struct ice_hw *hw = &pf->hw; in ice_setup_mc_magic_wake()
5400 if (!pf->wol_ena) in ice_setup_mc_magic_wake()
5403 vsi = ice_get_main_vsi(pf); in ice_setup_mc_magic_wake()
5429 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_remove() local
5433 if (!ice_is_reset_in_progress(pf->state)) in ice_remove()
5438 if (ice_is_recovery_mode(&pf->hw)) { in ice_remove()
5439 ice_service_task_stop(pf); in ice_remove()
5440 scoped_guard(devl, priv_to_devlink(pf)) { in ice_remove()
5441 ice_deinit_devlink(pf); in ice_remove()
5446 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { in ice_remove()
5447 set_bit(ICE_VF_RESETS_DISABLED, pf->state); in ice_remove()
5448 ice_free_vfs(pf); in ice_remove()
5451 ice_hwmon_exit(pf); in ice_remove()
5453 ice_service_task_stop(pf); in ice_remove()
5454 ice_aq_cancel_waiting_tasks(pf); in ice_remove()
5455 set_bit(ICE_DOWN, pf->state); in ice_remove()
5457 if (!ice_is_safe_mode(pf)) in ice_remove()
5458 ice_remove_arfs(pf); in ice_remove()
5460 devl_lock(priv_to_devlink(pf)); in ice_remove()
5461 ice_dealloc_all_dynamic_ports(pf); in ice_remove()
5462 ice_deinit_devlink(pf); in ice_remove()
5464 ice_unload(pf); in ice_remove()
5465 devl_unlock(priv_to_devlink(pf)); in ice_remove()
5467 ice_deinit(pf); in ice_remove()
5468 ice_vsi_release_all(pf); in ice_remove()
5470 ice_setup_mc_magic_wake(pf); in ice_remove()
5471 ice_set_wake(pf); in ice_remove()
5482 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_shutdown() local
5487 pci_wake_from_d3(pdev, pf->wol_ena); in ice_shutdown()
5494 * @pf: board private structure
5498 static void ice_prepare_for_shutdown(struct ice_pf *pf) in ice_prepare_for_shutdown() argument
5500 struct ice_hw *hw = &pf->hw; in ice_prepare_for_shutdown()
5505 ice_vc_notify_reset(pf); in ice_prepare_for_shutdown()
5507 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); in ice_prepare_for_shutdown()
5510 ice_pf_dis_all_vsi(pf, false); in ice_prepare_for_shutdown()
5512 ice_for_each_vsi(pf, v) in ice_prepare_for_shutdown()
5513 if (pf->vsi[v]) in ice_prepare_for_shutdown()
5514 pf->vsi[v]->vsi_num = 0; in ice_prepare_for_shutdown()
5521 * @pf: board private structure to reinitialize
5529 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) in ice_reinit_interrupt_scheme() argument
5531 struct device *dev = ice_pf_to_dev(pf); in ice_reinit_interrupt_scheme()
5538 ret = ice_init_interrupt_scheme(pf); in ice_reinit_interrupt_scheme()
5545 ice_for_each_vsi(pf, v) { in ice_reinit_interrupt_scheme()
5546 if (!pf->vsi[v]) in ice_reinit_interrupt_scheme()
5549 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5552 ice_vsi_map_rings_to_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5554 ice_vsi_set_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5558 ret = ice_req_irq_msix_misc(pf); in ice_reinit_interrupt_scheme()
5569 if (pf->vsi[v]) { in ice_reinit_interrupt_scheme()
5571 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5573 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5589 struct ice_pf *pf; in ice_suspend() local
5592 pf = pci_get_drvdata(pdev); in ice_suspend()
5594 if (!ice_pf_state_is_nominal(pf)) { in ice_suspend()
5605 disabled = ice_service_task_stop(pf); in ice_suspend()
5607 ice_deinit_rdma(pf); in ice_suspend()
5610 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { in ice_suspend()
5612 ice_service_task_restart(pf); in ice_suspend()
5616 if (test_bit(ICE_DOWN, pf->state) || in ice_suspend()
5617 ice_is_reset_in_progress(pf->state)) { in ice_suspend()
5620 ice_service_task_restart(pf); in ice_suspend()
5624 ice_setup_mc_magic_wake(pf); in ice_suspend()
5626 ice_prepare_for_shutdown(pf); in ice_suspend()
5628 ice_set_wake(pf); in ice_suspend()
5635 ice_free_irq_msix_misc(pf); in ice_suspend()
5636 ice_for_each_vsi(pf, v) { in ice_suspend()
5637 if (!pf->vsi[v]) in ice_suspend()
5640 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_suspend()
5642 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_suspend()
5644 ice_clear_interrupt_scheme(pf); in ice_suspend()
5647 pci_wake_from_d3(pdev, pf->wol_ena); in ice_suspend()
5660 struct ice_pf *pf; in ice_resume() local
5677 pf = pci_get_drvdata(pdev); in ice_resume()
5678 hw = &pf->hw; in ice_resume()
5680 pf->wakeup_reason = rd32(hw, PFPM_WUS); in ice_resume()
5681 ice_print_wake_reason(pf); in ice_resume()
5686 ret = ice_reinit_interrupt_scheme(pf); in ice_resume()
5690 ret = ice_init_rdma(pf); in ice_resume()
5695 clear_bit(ICE_DOWN, pf->state); in ice_resume()
5696 /* Now perform PF reset and rebuild */ in ice_resume()
5699 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_resume()
5701 if (ice_schedule_reset(pf, reset_type)) in ice_resume()
5704 clear_bit(ICE_SUSPENDED, pf->state); in ice_resume()
5705 ice_service_task_restart(pf); in ice_resume()
5708 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_resume()
5724 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_detected() local
5726 if (!pf) { in ice_pci_err_detected()
5732 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_detected()
5733 ice_service_task_stop(pf); in ice_pci_err_detected()
5735 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_detected()
5736 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_detected()
5737 ice_prepare_for_reset(pf, ICE_RESET_PFR); in ice_pci_err_detected()
5753 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_slot_reset() local
5770 reg = rd32(&pf->hw, GLGEN_RTRIG); in ice_pci_err_slot_reset()
5789 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_resume() local
5791 if (!pf) { in ice_pci_err_resume()
5797 if (test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_resume()
5803 ice_restore_all_vfs_msi_state(pf); in ice_pci_err_resume()
5805 ice_do_reset(pf, ICE_RESET_PFR); in ice_pci_err_resume()
5806 ice_service_task_restart(pf); in ice_pci_err_resume()
5807 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_pci_err_resume()
5816 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_reset_prepare() local
5818 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_reset_prepare()
5819 ice_service_task_stop(pf); in ice_pci_err_reset_prepare()
5821 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_reset_prepare()
5822 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_reset_prepare()
5823 ice_prepare_for_reset(pf, ICE_RESET_PFR); in ice_pci_err_reset_prepare()
6005 struct ice_pf *pf = vsi->back; in ice_set_mac_address() local
6006 struct ice_hw *hw = &pf->hw; in ice_set_mac_address()
6018 if (test_bit(ICE_DOWN, pf->state) || in ice_set_mac_address()
6019 ice_is_reset_in_progress(pf->state)) { in ice_set_mac_address()
6025 if (ice_chnl_dmac_fltr_cnt(pf)) { in ice_set_mac_address()
6337 * @vsi: PF's VSI
6353 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6354 * @vsi: PF's VSI
6401 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6402 * @vsi: PF's VSI
6472 * ice_set_loopback - turn on/off loopback mode on underlying PF
6508 struct ice_pf *pf = vsi->back; in ice_set_features() local
6512 if (ice_is_safe_mode(pf)) { in ice_set_features()
6513 dev_err(ice_pf_to_dev(pf), in ice_set_features()
6519 if (ice_is_reset_in_progress(pf->state)) { in ice_set_features()
6520 dev_err(ice_pf_to_dev(pf), in ice_set_features()
6560 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { in ice_set_features()
6561 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); in ice_set_features()
6568 assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena); in ice_set_features()
6577 if (ice_is_feature_supported(pf, ICE_F_GCS) && in ice_set_features()
6580 dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n"); in ice_set_features()
6582 dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n"); in ice_set_features()
6590 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6785 struct ice_pf *pf = vsi->back; in ice_up_complete() local
6809 ice_ptp_link_change(pf, true); in ice_up_complete()
6818 ice_service_task_schedule(pf); in ice_up_complete()
6901 struct ice_pf *pf = vsi->back; in ice_update_vsi_ring_stats() local
6948 * random value after PF reset. And as we increase the reported stat by in ice_update_vsi_ring_stats()
6952 if (likely(pf->stat_prev_loaded)) { in ice_update_vsi_ring_stats()
6975 struct ice_pf *pf = vsi->back; in ice_update_vsi_stats() local
6978 test_bit(ICE_CFG_BUSY, pf->state)) in ice_update_vsi_stats()
6994 cur_ns->rx_crc_errors = pf->stats.crc_errors; in ice_update_vsi_stats()
6995 cur_ns->rx_errors = pf->stats.crc_errors + in ice_update_vsi_stats()
6996 pf->stats.illegal_bytes + in ice_update_vsi_stats()
6997 pf->stats.rx_undersize + in ice_update_vsi_stats()
6998 pf->hw_csum_rx_error + in ice_update_vsi_stats()
6999 pf->stats.rx_jabber + in ice_update_vsi_stats()
7000 pf->stats.rx_fragments + in ice_update_vsi_stats()
7001 pf->stats.rx_oversize; in ice_update_vsi_stats()
7003 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; in ice_update_vsi_stats()
7008 * ice_update_pf_stats - Update PF port stats counters
7009 * @pf: PF whose stats needs to be updated
7011 void ice_update_pf_stats(struct ice_pf *pf) in ice_update_pf_stats() argument
7014 struct ice_hw *hw = &pf->hw; in ice_update_pf_stats()
7019 prev_ps = &pf->stats_prev; in ice_update_pf_stats()
7020 cur_ps = &pf->stats; in ice_update_pf_stats()
7022 if (ice_is_reset_in_progress(pf->state)) in ice_update_pf_stats()
7023 pf->stat_prev_loaded = false; in ice_update_pf_stats()
7025 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7029 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7033 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7037 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7041 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, in ice_update_pf_stats()
7045 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7049 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7053 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7057 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7061 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7065 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7068 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7071 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7074 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7077 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7080 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7083 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7086 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7089 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7092 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7095 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7098 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7101 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7104 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7111 pf->stat_prev_loaded, &prev_ps->fd_sb_match, in ice_update_pf_stats()
7113 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7116 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7119 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7122 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7125 ice_update_dcb_stats(pf); in ice_update_pf_stats()
7127 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7130 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7133 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7137 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7141 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7144 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7147 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7150 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7153 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; in ice_update_pf_stats()
7155 pf->stat_prev_loaded = true; in ice_update_pf_stats()
7227 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq() local
7228 struct ice_hw *hw = &pf->hw; in ice_vsi_dis_irq()
7423 struct ice_pf *pf = vsi->back; in ice_vsi_open_ctrl() local
7427 dev = ice_pf_to_dev(pf); in ice_vsi_open_ctrl()
7479 struct ice_pf *pf = vsi->back; in ice_vsi_open() local
7496 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); in ice_vsi_open()
7501 if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs)) in ice_vsi_open()
7537 * @pf: PF from which all VSIs are being removed
7539 static void ice_vsi_release_all(struct ice_pf *pf) in ice_vsi_release_all() argument
7543 if (!pf->vsi) in ice_vsi_release_all()
7546 ice_for_each_vsi(pf, i) { in ice_vsi_release_all()
7547 if (!pf->vsi[i]) in ice_vsi_release_all()
7550 if (pf->vsi[i]->type == ICE_VSI_CHNL) in ice_vsi_release_all()
7553 err = ice_vsi_release(pf->vsi[i]); in ice_vsi_release_all()
7555 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", in ice_vsi_release_all()
7556 i, err, pf->vsi[i]->vsi_num); in ice_vsi_release_all()
7562 * @pf: pointer to the PF instance
7565 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7567 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) in ice_vsi_rebuild_by_type() argument
7569 struct device *dev = ice_pf_to_dev(pf); in ice_vsi_rebuild_by_type()
7572 ice_for_each_vsi(pf, i) { in ice_vsi_rebuild_by_type()
7573 struct ice_vsi *vsi = pf->vsi[i]; in ice_vsi_rebuild_by_type()
7587 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7597 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7615 * ice_update_pf_netdev_link - Update PF netdev link status
7616 * @pf: pointer to the PF instance
7618 static void ice_update_pf_netdev_link(struct ice_pf *pf) in ice_update_pf_netdev_link() argument
7623 ice_for_each_vsi(pf, i) { in ice_update_pf_netdev_link()
7624 struct ice_vsi *vsi = pf->vsi[i]; in ice_update_pf_netdev_link()
7629 ice_get_link_status(pf->vsi[i]->port_info, &link_up); in ice_update_pf_netdev_link()
7631 netif_carrier_on(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7632 netif_tx_wake_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7634 netif_carrier_off(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7635 netif_tx_stop_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7642 * @pf: PF to rebuild
7650 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_rebuild() argument
7652 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_rebuild()
7653 struct device *dev = ice_pf_to_dev(pf); in ice_rebuild()
7654 struct ice_hw *hw = &pf->hw; in ice_rebuild()
7658 if (test_bit(ICE_DOWN, pf->state)) in ice_rebuild()
7661 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); in ice_rebuild()
7669 pf->fw_emp_reset_disabled = false; in ice_rebuild()
7681 if (!ice_is_safe_mode(pf)) { in ice_rebuild()
7687 ice_load_pkg(NULL, pf); in ice_rebuild()
7692 dev_err(dev, "clear PF configuration failed %d\n", err); in ice_rebuild()
7718 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_rebuild()
7727 err = ice_req_irq_msix_misc(pf); in ice_rebuild()
7733 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7741 /* force guaranteed filter pool for PF */ in ice_rebuild()
7743 /* force shared filter pool for PF */ in ice_rebuild()
7748 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_rebuild()
7749 ice_dcb_rebuild(pf); in ice_rebuild()
7751 /* If the PF previously had enabled PTP, PTP init needs to happen before in ice_rebuild()
7755 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_rebuild()
7756 ice_ptp_rebuild(pf, reset_type); in ice_rebuild()
7758 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_rebuild()
7759 ice_gnss_init(pf); in ice_rebuild()
7761 /* rebuild PF VSI */ in ice_rebuild()
7762 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); in ice_rebuild()
7764 dev_err(dev, "PF VSI rebuild failed: %d\n", err); in ice_rebuild()
7769 err = ice_rebuild_channels(pf); in ice_rebuild()
7778 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7779 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); in ice_rebuild()
7790 ice_fdir_replay_fltrs(pf); in ice_rebuild()
7792 ice_rebuild_arfs(pf); in ice_rebuild()
7798 ice_update_pf_netdev_link(pf); in ice_rebuild()
7801 err = ice_send_version(pf); in ice_rebuild()
7811 clear_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7813 ice_health_clear(pf); in ice_rebuild()
7815 ice_plug_aux_dev(pf); in ice_rebuild()
7816 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) in ice_rebuild()
7817 ice_lag_rebuild(pf); in ice_rebuild()
7820 ice_ptp_restore_timestamp_mode(pf); in ice_rebuild()
7828 set_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7830 /* set this bit in PF state to control service task scheduling */ in ice_rebuild()
7831 set_bit(ICE_NEEDS_RESTART, pf->state); in ice_rebuild()
7846 struct ice_pf *pf = vsi->back; in ice_change_mtu() local
7865 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { in ice_change_mtu()
7875 if (ice_is_reset_in_progress(pf->state)) { in ice_change_mtu()
7895 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); in ice_change_mtu()
8076 struct ice_pf *pf = vsi->back; in ice_bridge_getlink() local
8079 bmode = pf->first_sw->bridge_mode; in ice_bridge_getlink()
8137 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8147 struct ice_pf *pf = np->vsi->back; in ice_bridge_setlink() local
8149 struct ice_hw *hw = &pf->hw; in ice_bridge_setlink()
8153 pf_sw = pf->first_sw; in ice_bridge_setlink()
8167 /* Iterates through the PF VSI list and update the loopback in ice_bridge_setlink()
8170 ice_for_each_vsi(pf, v) { in ice_bridge_setlink()
8171 if (!pf->vsi[v]) in ice_bridge_setlink()
8173 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); in ice_bridge_setlink()
8208 struct ice_pf *pf = vsi->back; in ice_tx_timeout() local
8211 pf->tx_timeout_count++; in ice_tx_timeout()
8217 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { in ice_tx_timeout()
8218 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", in ice_tx_timeout()
8234 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) in ice_tx_timeout()
8235 pf->tx_timeout_recovery_level = 1; in ice_tx_timeout()
8236 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + in ice_tx_timeout()
8241 struct ice_hw *hw = &pf->hw; in ice_tx_timeout()
8253 ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); in ice_tx_timeout()
8256 pf->tx_timeout_last_recovery = jiffies; in ice_tx_timeout()
8258 pf->tx_timeout_recovery_level, txqueue); in ice_tx_timeout()
8260 switch (pf->tx_timeout_recovery_level) { in ice_tx_timeout()
8262 set_bit(ICE_PFR_REQ, pf->state); in ice_tx_timeout()
8265 set_bit(ICE_CORER_REQ, pf->state); in ice_tx_timeout()
8268 set_bit(ICE_GLOBR_REQ, pf->state); in ice_tx_timeout()
8272 set_bit(ICE_DOWN, pf->state); in ice_tx_timeout()
8274 set_bit(ICE_SERVICE_DIS, pf->state); in ice_tx_timeout()
8278 ice_service_task_schedule(pf); in ice_tx_timeout()
8279 pf->tx_timeout_recovery_level++; in ice_tx_timeout()
8373 struct ice_pf *pf = vsi->back; in ice_validate_mqprio_qopt() local
8388 dev = ice_pf_to_dev(pf); in ice_validate_mqprio_qopt()
8500 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8501 * @pf: ptr to PF device
8504 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) in ice_add_vsi_to_fdir() argument
8506 struct device *dev = ice_pf_to_dev(pf); in ice_add_vsi_to_fdir()
8514 hw = &pf->hw; in ice_add_vsi_to_fdir()
8561 * @pf: ptr to PF device
8567 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) in ice_add_channel() argument
8569 struct device *dev = ice_pf_to_dev(pf); in ice_add_channel()
8577 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); in ice_add_channel()
8583 ice_add_vsi_to_fdir(pf, vsi); in ice_add_channel()
8675 * @pf: ptr to PF device
8685 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_hw_channel() argument
8688 struct device *dev = ice_pf_to_dev(pf); in ice_setup_hw_channel()
8694 ret = ice_add_channel(pf, sw_id, ch); in ice_setup_hw_channel()
8715 * @pf: ptr to PF device
8723 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_channel() argument
8726 struct device *dev = ice_pf_to_dev(pf); in ice_setup_channel()
8735 sw_id = pf->first_sw->sw_id; in ice_setup_channel()
8738 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); in ice_setup_channel()
8776 struct ice_pf *pf = vsi->back; in ice_create_q_channel() local
8782 dev = ice_pf_to_dev(pf); in ice_create_q_channel()
8794 if (!ice_setup_channel(pf, vsi, ch)) { in ice_create_q_channel()
8819 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8824 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) in ice_rem_all_chnl_fltrs() argument
8831 &pf->tc_flower_fltr_list, in ice_rem_all_chnl_fltrs()
8843 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); in ice_rem_all_chnl_fltrs()
8846 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", in ice_rem_all_chnl_fltrs()
8849 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", in ice_rem_all_chnl_fltrs()
8859 pf->num_dmac_chnl_fltrs--; in ice_rem_all_chnl_fltrs()
8878 struct ice_pf *pf = vsi->back; in ice_remove_q_channels() local
8883 ice_rem_all_chnl_fltrs(pf); in ice_remove_q_channels()
8887 struct ice_hw *hw = &pf->hw; in ice_remove_q_channels()
8925 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); in ice_remove_q_channels()
8930 /* Delete VSI from FW, PF and HW VSI arrays */ in ice_remove_q_channels()
8948 * @pf: ptr to PF
8952 static int ice_rebuild_channels(struct ice_pf *pf) in ice_rebuild_channels() argument
8954 struct device *dev = ice_pf_to_dev(pf); in ice_rebuild_channels()
8962 main_vsi = ice_get_main_vsi(pf); in ice_rebuild_channels()
8966 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || in ice_rebuild_channels()
8981 ice_for_each_vsi(pf, i) { in ice_rebuild_channels()
8984 vsi = pf->vsi[i]; in ice_rebuild_channels()
9001 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_rebuild_channels()
9004 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_rebuild_channels()
9068 struct ice_pf *pf = vsi->back; in ice_create_q_channels() local
9098 dev_err(ice_pf_to_dev(pf), in ice_create_q_channels()
9105 dev_dbg(ice_pf_to_dev(pf), in ice_create_q_channels()
9126 struct ice_pf *pf = vsi->back; in ice_setup_tc_mqprio_qdisc() local
9133 dev = ice_pf_to_dev(pf); in ice_setup_tc_mqprio_qdisc()
9138 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9151 if (pf->hw.port_info->is_custom_tx_enabled) { in ice_setup_tc_mqprio_qdisc()
9155 ice_tear_down_devlink_rate_tree(pf); in ice_setup_tc_mqprio_qdisc()
9164 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9170 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_setup_tc_mqprio_qdisc()
9186 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_setup_tc_mqprio_qdisc()
9189 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9190 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), in ice_setup_tc_mqprio_qdisc()
9192 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), in ice_setup_tc_mqprio_qdisc()
9229 clear_bit(ICE_RESET_FAILED, pf->state); in ice_setup_tc_mqprio_qdisc()
9245 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9302 struct ice_pf *pf; in ice_cfg_txtime() local
9309 pf = vsi->back; in ice_cfg_txtime()
9310 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { in ice_cfg_txtime()
9318 dev = ice_pf_to_dev(pf); in ice_cfg_txtime()
9328 clear_bit(ICE_CFG_BUSY, pf->state); in ice_cfg_txtime()
9343 struct ice_pf *pf = np->vsi->back; in ice_offload_txtime() local
9349 if (!ice_is_feature_supported(pf, ICE_F_TXTIME)) in ice_offload_txtime()
9357 set_bit(qopt->queue, pf->txtime_txqs); in ice_offload_txtime()
9359 clear_bit(qopt->queue, pf->txtime_txqs); in ice_offload_txtime()
9377 clear_bit(qopt->queue, pf->txtime_txqs); in ice_offload_txtime()
9390 struct ice_pf *pf = np->vsi->back; in ice_setup_tc() local
9416 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_setup_tc()
9421 cdev = pf->cdev_info; in ice_setup_tc()
9423 mutex_lock(&pf->adev_mutex); in ice_setup_tc()
9434 mutex_lock(&pf->tc_mutex); in ice_setup_tc()
9436 mutex_unlock(&pf->tc_mutex); in ice_setup_tc()
9441 mutex_unlock(&pf->adev_mutex); in ice_setup_tc()
9582 struct ice_pf *pf = np->vsi->back; in ice_open() local
9584 if (ice_is_reset_in_progress(pf->state)) { in ice_open()
9605 struct ice_pf *pf = vsi->back; in ice_open_internal() local
9609 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_open_internal()
9623 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_open_internal()
9627 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9628 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { in ice_open_internal()
9644 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9673 struct ice_pf *pf = vsi->back; in ice_stop() local
9675 if (ice_is_reset_in_progress(pf->state)) { in ice_stop()