Lines Matching refs:pf

65 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);  in ice_hw_to_dev()  local
67 return &pf->pdev->dev; in ice_hw_to_dev()
75 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
77 static void ice_vsi_release_all(struct ice_pf *pf);
79 static int ice_rebuild_channels(struct ice_pf *pf);
115 static void ice_check_for_hang_subtask(struct ice_pf *pf) in ice_check_for_hang_subtask() argument
123 ice_for_each_vsi(pf, v) in ice_check_for_hang_subtask()
124 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
125 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
183 static int ice_init_mac_fltr(struct ice_pf *pf) in ice_init_mac_fltr() argument
188 vsi = ice_get_main_vsi(pf); in ice_init_mac_fltr()
328 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr() local
329 struct ice_hw *hw = &pf->hw; in ice_vsi_sync_fltr()
485 static void ice_sync_fltr_subtask(struct ice_pf *pf) in ice_sync_fltr_subtask() argument
489 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) in ice_sync_fltr_subtask()
492 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
494 ice_for_each_vsi(pf, v) in ice_sync_fltr_subtask()
495 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && in ice_sync_fltr_subtask()
496 ice_vsi_sync_fltr(pf->vsi[v])) { in ice_sync_fltr_subtask()
498 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
508 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) in ice_pf_dis_all_vsi() argument
513 ice_for_each_vsi(pf, v) in ice_pf_dis_all_vsi()
514 if (pf->vsi[v]) in ice_pf_dis_all_vsi()
515 ice_dis_vsi(pf->vsi[v], locked); in ice_pf_dis_all_vsi()
518 pf->pf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
521 pf->vf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
532 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_prepare_for_reset() argument
534 struct ice_hw *hw = &pf->hw; in ice_prepare_for_reset()
539 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); in ice_prepare_for_reset()
542 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) in ice_prepare_for_reset()
545 synchronize_irq(pf->oicr_irq.virq); in ice_prepare_for_reset()
547 ice_unplug_aux_dev(pf); in ice_prepare_for_reset()
551 ice_vc_notify_reset(pf); in ice_prepare_for_reset()
554 mutex_lock(&pf->vfs.table_lock); in ice_prepare_for_reset()
555 ice_for_each_vf(pf, bkt, vf) in ice_prepare_for_reset()
557 mutex_unlock(&pf->vfs.table_lock); in ice_prepare_for_reset()
559 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_prepare_for_reset()
561 ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge); in ice_prepare_for_reset()
566 vsi = ice_get_main_vsi(pf); in ice_prepare_for_reset()
575 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_prepare_for_reset()
591 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_prepare_for_reset()
603 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); in ice_prepare_for_reset()
604 ice_pf_dis_all_vsi(pf, false); in ice_prepare_for_reset()
606 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_prepare_for_reset()
607 ice_ptp_prepare_for_reset(pf, reset_type); in ice_prepare_for_reset()
609 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_prepare_for_reset()
610 ice_gnss_exit(pf); in ice_prepare_for_reset()
617 set_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_prepare_for_reset()
625 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_do_reset() argument
627 struct device *dev = ice_pf_to_dev(pf); in ice_do_reset()
628 struct ice_hw *hw = &pf->hw; in ice_do_reset()
632 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { in ice_do_reset()
637 ice_prepare_for_reset(pf, reset_type); in ice_do_reset()
642 set_bit(ICE_RESET_FAILED, pf->state); in ice_do_reset()
643 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_do_reset()
644 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
645 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
646 clear_bit(ICE_CORER_REQ, pf->state); in ice_do_reset()
647 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_do_reset()
648 wake_up(&pf->reset_wait_queue); in ice_do_reset()
657 pf->pfr_count++; in ice_do_reset()
658 ice_rebuild(pf, reset_type); in ice_do_reset()
659 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
660 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
661 wake_up(&pf->reset_wait_queue); in ice_do_reset()
662 ice_reset_all_vfs(pf); in ice_do_reset()
670 static void ice_reset_subtask(struct ice_pf *pf) in ice_reset_subtask() argument
684 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_reset_subtask()
686 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) in ice_reset_subtask()
688 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) in ice_reset_subtask()
690 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) in ice_reset_subtask()
695 ice_prepare_for_reset(pf, reset_type); in ice_reset_subtask()
698 if (ice_check_reset(&pf->hw)) { in ice_reset_subtask()
699 set_bit(ICE_RESET_FAILED, pf->state); in ice_reset_subtask()
702 pf->hw.reset_ongoing = false; in ice_reset_subtask()
703 ice_rebuild(pf, reset_type); in ice_reset_subtask()
707 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_reset_subtask()
708 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_reset_subtask()
709 clear_bit(ICE_PFR_REQ, pf->state); in ice_reset_subtask()
710 clear_bit(ICE_CORER_REQ, pf->state); in ice_reset_subtask()
711 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_reset_subtask()
712 wake_up(&pf->reset_wait_queue); in ice_reset_subtask()
713 ice_reset_all_vfs(pf); in ice_reset_subtask()
720 if (test_bit(ICE_PFR_REQ, pf->state)) { in ice_reset_subtask()
722 if (pf->lag && pf->lag->bonded) { in ice_reset_subtask()
723 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n"); in ice_reset_subtask()
727 if (test_bit(ICE_CORER_REQ, pf->state)) in ice_reset_subtask()
729 if (test_bit(ICE_GLOBR_REQ, pf->state)) in ice_reset_subtask()
736 if (!test_bit(ICE_DOWN, pf->state) && in ice_reset_subtask()
737 !test_bit(ICE_CFG_BUSY, pf->state)) { in ice_reset_subtask()
738 ice_do_reset(pf, reset_type); in ice_reset_subtask()
943 static void ice_set_dflt_mib(struct ice_pf *pf) in ice_set_dflt_mib() argument
945 struct device *dev = ice_pf_to_dev(pf); in ice_set_dflt_mib()
949 struct ice_hw *hw = &pf->hw; in ice_set_dflt_mib()
1027 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) in ice_check_phy_fw_load() argument
1030 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1034 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) in ice_check_phy_fw_load()
1038 …dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and… in ice_check_phy_fw_load()
1039 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1051 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) in ice_check_module_power() argument
1056 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1063 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) in ice_check_module_power()
1067 …dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cann… in ice_check_module_power()
1068 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1070 …dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cann… in ice_check_module_power()
1071 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1083 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) in ice_check_link_cfg_err() argument
1085 ice_check_module_power(pf, link_cfg_err); in ice_check_link_cfg_err()
1086 ice_check_phy_fw_load(pf, link_cfg_err); in ice_check_link_cfg_err()
1099 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, in ice_link_event() argument
1102 struct device *dev = ice_pf_to_dev(pf); in ice_link_event()
1124 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_link_event()
1132 vsi = ice_get_main_vsi(pf); in ice_link_event()
1137 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && in ice_link_event()
1139 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_link_event()
1147 ice_ptp_link_change(pf, pf->hw.pf_id, link_up); in ice_link_event()
1149 if (ice_is_dcb_active(pf)) { in ice_link_event()
1150 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_link_event()
1151 ice_dcb_rebuild(pf); in ice_link_event()
1154 ice_set_dflt_mib(pf); in ice_link_event()
1159 ice_vc_notify_link_state(pf); in ice_link_event()
1168 static void ice_watchdog_subtask(struct ice_pf *pf) in ice_watchdog_subtask() argument
1173 if (test_bit(ICE_DOWN, pf->state) || in ice_watchdog_subtask()
1174 test_bit(ICE_CFG_BUSY, pf->state)) in ice_watchdog_subtask()
1179 pf->serv_tmr_prev + pf->serv_tmr_period)) in ice_watchdog_subtask()
1182 pf->serv_tmr_prev = jiffies; in ice_watchdog_subtask()
1187 ice_update_pf_stats(pf); in ice_watchdog_subtask()
1188 ice_for_each_vsi(pf, i) in ice_watchdog_subtask()
1189 if (pf->vsi[i] && pf->vsi[i]->netdev) in ice_watchdog_subtask()
1190 ice_update_vsi_stats(pf->vsi[i]); in ice_watchdog_subtask()
1228 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_handle_link_event() argument
1235 port_info = pf->hw.port_info; in ice_handle_link_event()
1239 status = ice_link_event(pf, port_info, in ice_handle_link_event()
1243 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", in ice_handle_link_event()
1255 ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_get_fwlog_data() argument
1258 struct ice_hw *hw = &pf->hw; in ice_get_fwlog_data()
1291 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, in ice_aq_prep_for_event() argument
1298 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1299 hlist_add_head(&task->entry, &pf->aq_wait_list); in ice_aq_prep_for_event()
1300 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1315 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, in ice_aq_wait_for_event() argument
1319 struct device *dev = ice_pf_to_dev(pf); in ice_aq_wait_for_event()
1324 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, in ice_aq_wait_for_event()
1352 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1354 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1377 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, in ice_aq_check_events() argument
1384 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1385 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { in ice_aq_check_events()
1405 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1408 wake_up(&pf->aq_wait_queue); in ice_aq_check_events()
1418 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) in ice_aq_cancel_waiting_tasks() argument
1422 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1423 hlist_for_each_entry(task, &pf->aq_wait_list, entry) in ice_aq_cancel_waiting_tasks()
1425 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1427 wake_up(&pf->aq_wait_queue); in ice_aq_cancel_waiting_tasks()
1437 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) in __ice_clean_ctrlq() argument
1439 struct device *dev = ice_pf_to_dev(pf); in __ice_clean_ctrlq()
1441 struct ice_hw *hw = &pf->hw; in __ice_clean_ctrlq()
1448 if (test_bit(ICE_RESET_FAILED, pf->state)) in __ice_clean_ctrlq()
1538 ice_aq_check_events(pf, opcode, &event); in __ice_clean_ctrlq()
1542 if (ice_handle_link_event(pf, &event)) in __ice_clean_ctrlq()
1546 ice_vf_lan_overflow_event(pf, &event); in __ice_clean_ctrlq()
1554 ice_vc_process_vf_msg(pf, &event, &data); in __ice_clean_ctrlq()
1557 ice_get_fwlog_data(pf, &event); in __ice_clean_ctrlq()
1560 ice_dcb_process_lldp_set_mib_change(pf, &event); in __ice_clean_ctrlq()
1593 static void ice_clean_adminq_subtask(struct ice_pf *pf) in ice_clean_adminq_subtask() argument
1595 struct ice_hw *hw = &pf->hw; in ice_clean_adminq_subtask()
1597 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_clean_adminq_subtask()
1600 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) in ice_clean_adminq_subtask()
1603 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_clean_adminq_subtask()
1611 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); in ice_clean_adminq_subtask()
1620 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) in ice_clean_mailboxq_subtask() argument
1622 struct ice_hw *hw = &pf->hw; in ice_clean_mailboxq_subtask()
1624 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) in ice_clean_mailboxq_subtask()
1627 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) in ice_clean_mailboxq_subtask()
1630 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_clean_mailboxq_subtask()
1633 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); in ice_clean_mailboxq_subtask()
1642 static void ice_clean_sbq_subtask(struct ice_pf *pf) in ice_clean_sbq_subtask() argument
1644 struct ice_hw *hw = &pf->hw; in ice_clean_sbq_subtask()
1650 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1654 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) in ice_clean_sbq_subtask()
1657 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) in ice_clean_sbq_subtask()
1660 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1663 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); in ice_clean_sbq_subtask()
1674 void ice_service_task_schedule(struct ice_pf *pf) in ice_service_task_schedule() argument
1676 if (!test_bit(ICE_SERVICE_DIS, pf->state) && in ice_service_task_schedule()
1677 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && in ice_service_task_schedule()
1678 !test_bit(ICE_NEEDS_RESTART, pf->state)) in ice_service_task_schedule()
1679 queue_work(ice_wq, &pf->serv_task); in ice_service_task_schedule()
1686 static void ice_service_task_complete(struct ice_pf *pf) in ice_service_task_complete() argument
1688 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); in ice_service_task_complete()
1692 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_complete()
1702 static int ice_service_task_stop(struct ice_pf *pf) in ice_service_task_stop() argument
1706 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_stop()
1708 if (pf->serv_tmr.function) in ice_service_task_stop()
1709 del_timer_sync(&pf->serv_tmr); in ice_service_task_stop()
1710 if (pf->serv_task.func) in ice_service_task_stop()
1711 cancel_work_sync(&pf->serv_task); in ice_service_task_stop()
1713 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_stop()
1723 static void ice_service_task_restart(struct ice_pf *pf) in ice_service_task_restart() argument
1725 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_restart()
1726 ice_service_task_schedule(pf); in ice_service_task_restart()
1735 struct ice_pf *pf = from_timer(pf, t, serv_tmr); in ice_service_timer() local
1737 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); in ice_service_timer()
1738 ice_service_task_schedule(pf); in ice_service_timer()
1752 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf, in ice_mdd_maybe_reset_vf() argument
1755 struct device *dev = ice_pf_to_dev(pf); in ice_mdd_maybe_reset_vf()
1757 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) in ice_mdd_maybe_reset_vf()
1770 pf->hw.pf_id, vf->vf_id); in ice_mdd_maybe_reset_vf()
1784 static void ice_handle_mdd_event(struct ice_pf *pf) in ice_handle_mdd_event() argument
1786 struct device *dev = ice_pf_to_dev(pf); in ice_handle_mdd_event()
1787 struct ice_hw *hw = &pf->hw; in ice_handle_mdd_event()
1792 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { in ice_handle_mdd_event()
1796 ice_print_vfs_mdd_events(pf); in ice_handle_mdd_event()
1808 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1821 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1834 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1844 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1851 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1858 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1865 mutex_lock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1866 ice_for_each_vf(pf, bkt, vf) { in ice_handle_mdd_event()
1873 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1874 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1885 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1886 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1897 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1898 if (netif_msg_tx_err(pf)) in ice_handle_mdd_event()
1909 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1910 if (netif_msg_rx_err(pf)) in ice_handle_mdd_event()
1918 ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx, in ice_handle_mdd_event()
1921 mutex_unlock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1923 ice_print_vfs_mdd_events(pf); in ice_handle_mdd_event()
2011 struct ice_pf *pf = pi->hw->back; in ice_init_nvm_phy_type() local
2022 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); in ice_init_nvm_phy_type()
2026 pf->nvm_phy_type_hi = pcaps->phy_type_high; in ice_init_nvm_phy_type()
2027 pf->nvm_phy_type_lo = pcaps->phy_type_low; in ice_init_nvm_phy_type()
2043 struct ice_pf *pf = pi->hw->back; in ice_init_link_dflt_override() local
2045 ldo = &pf->link_dflt_override; in ice_init_link_dflt_override()
2055 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); in ice_init_link_dflt_override()
2056 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); in ice_init_link_dflt_override()
2081 struct ice_pf *pf = pi->hw->back; in ice_init_phy_cfg_dflt_override() local
2083 ldo = &pf->link_dflt_override; in ice_init_phy_cfg_dflt_override()
2091 cfg->phy_type_low = pf->nvm_phy_type_lo & in ice_init_phy_cfg_dflt_override()
2093 cfg->phy_type_high = pf->nvm_phy_type_hi & in ice_init_phy_cfg_dflt_override()
2099 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); in ice_init_phy_cfg_dflt_override()
2120 struct ice_pf *pf = pi->hw->back; in ice_init_phy_user_cfg() local
2137 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); in ice_init_phy_user_cfg()
2147 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); in ice_init_phy_user_cfg()
2154 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { in ice_init_phy_user_cfg()
2169 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); in ice_init_phy_user_cfg()
2190 struct ice_pf *pf = vsi->back; in ice_configure_phy() local
2199 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && in ice_configure_phy()
2203 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) in ice_configure_phy()
2289 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); in ice_configure_phy()
2307 static void ice_check_media_subtask(struct ice_pf *pf) in ice_check_media_subtask() argument
2314 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) in ice_check_media_subtask()
2317 vsi = ice_get_main_vsi(pf); in ice_check_media_subtask()
2327 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_check_media_subtask()
2330 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) in ice_check_media_subtask()
2342 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_check_media_subtask()
2356 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); in ice_service_task() local
2362 ice_reset_subtask(pf); in ice_service_task()
2365 if (ice_is_reset_in_progress(pf->state) || in ice_service_task()
2366 test_bit(ICE_SUSPENDED, pf->state) || in ice_service_task()
2367 test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_service_task()
2368 ice_service_task_complete(pf); in ice_service_task()
2372 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { in ice_service_task()
2379 swap(event->reg, pf->oicr_err_reg); in ice_service_task()
2380 ice_send_event_to_aux(pf, event); in ice_service_task()
2388 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) in ice_service_task()
2389 ice_unplug_aux_dev(pf); in ice_service_task()
2392 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) in ice_service_task()
2393 ice_plug_aux_dev(pf); in ice_service_task()
2395 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { in ice_service_task()
2401 ice_send_event_to_aux(pf, event); in ice_service_task()
2406 ice_clean_adminq_subtask(pf); in ice_service_task()
2407 ice_check_media_subtask(pf); in ice_service_task()
2408 ice_check_for_hang_subtask(pf); in ice_service_task()
2409 ice_sync_fltr_subtask(pf); in ice_service_task()
2410 ice_handle_mdd_event(pf); in ice_service_task()
2411 ice_watchdog_subtask(pf); in ice_service_task()
2413 if (ice_is_safe_mode(pf)) { in ice_service_task()
2414 ice_service_task_complete(pf); in ice_service_task()
2418 ice_process_vflr_event(pf); in ice_service_task()
2419 ice_clean_mailboxq_subtask(pf); in ice_service_task()
2420 ice_clean_sbq_subtask(pf); in ice_service_task()
2421 ice_sync_arfs_fltrs(pf); in ice_service_task()
2422 ice_flush_fdir_ctx(pf); in ice_service_task()
2425 ice_service_task_complete(pf); in ice_service_task()
2431 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || in ice_service_task()
2432 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || in ice_service_task()
2433 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_service_task()
2434 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || in ice_service_task()
2435 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || in ice_service_task()
2436 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || in ice_service_task()
2437 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_service_task()
2438 mod_timer(&pf->serv_tmr, jiffies); in ice_service_task()
2466 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) in ice_schedule_reset() argument
2468 struct device *dev = ice_pf_to_dev(pf); in ice_schedule_reset()
2471 if (test_bit(ICE_RESET_FAILED, pf->state)) { in ice_schedule_reset()
2476 if (ice_is_reset_in_progress(pf->state)) { in ice_schedule_reset()
2483 set_bit(ICE_PFR_REQ, pf->state); in ice_schedule_reset()
2486 set_bit(ICE_CORER_REQ, pf->state); in ice_schedule_reset()
2489 set_bit(ICE_GLOBR_REQ, pf->state); in ice_schedule_reset()
2495 ice_service_task_schedule(pf); in ice_schedule_reset()
2551 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix() local
2558 dev = ice_pf_to_dev(pf); in ice_vsi_req_irq_msix()
2768 struct ice_pf *pf = vsi->back; in ice_prepare_xdp_rings() local
2770 .qs_mutex = &pf->avail_q_mutex, in ice_prepare_xdp_rings()
2771 .pf_map = pf->avail_txqs, in ice_prepare_xdp_rings()
2772 .pf_map_size = pf->max_pf_txqs, in ice_prepare_xdp_rings()
2782 dev = ice_pf_to_dev(pf); in ice_prepare_xdp_rings()
2843 mutex_lock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2845 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_prepare_xdp_rings()
2848 mutex_unlock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2865 struct ice_pf *pf = vsi->back; in ice_destroy_xdp_rings() local
2887 mutex_lock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2889 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_destroy_xdp_rings()
2892 mutex_unlock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2906 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); in ice_destroy_xdp_rings()
3111 static void ice_ena_misc_vector(struct ice_pf *pf) in ice_ena_misc_vector() argument
3113 struct ice_hw *hw = &pf->hw; in ice_ena_misc_vector()
3141 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), in ice_ena_misc_vector()
3144 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_ena_misc_vector()
3147 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ena_misc_vector()
3158 struct ice_pf *pf = data; in ice_ll_ts_intr() local
3166 hw = &pf->hw; in ice_ll_ts_intr()
3167 tx = &pf->ptp.port.tx; in ice_ll_ts_intr()
3180 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ll_ts_intr()
3193 struct ice_pf *pf = (struct ice_pf *)data; in ice_misc_intr() local
3195 struct ice_hw *hw = &pf->hw; in ice_misc_intr()
3199 dev = ice_pf_to_dev(pf); in ice_misc_intr()
3200 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3201 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3202 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3209 pf->sw_int_count++; in ice_misc_intr()
3214 set_bit(ICE_MDD_EVENT_PENDING, pf->state); in ice_misc_intr()
3218 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { in ice_misc_intr()
3225 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); in ice_misc_intr()
3238 pf->corer_count++; in ice_misc_intr()
3240 pf->globr_count++; in ice_misc_intr()
3242 pf->empr_count++; in ice_misc_intr()
3249 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_misc_intr()
3251 set_bit(ICE_CORER_RECV, pf->state); in ice_misc_intr()
3253 set_bit(ICE_GLOBR_RECV, pf->state); in ice_misc_intr()
3255 set_bit(ICE_EMPR_RECV, pf->state); in ice_misc_intr()
3276 if (ice_pf_state_is_nominal(pf) && in ice_misc_intr()
3277 pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { in ice_misc_intr()
3278 struct ice_ptp_tx *tx = &pf->ptp.port.tx; in ice_misc_intr()
3288 } else if (ice_ptp_pf_handles_tx_interrupt(pf)) { in ice_misc_intr()
3289 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); in ice_misc_intr()
3300 if (ice_pf_src_tmr_owned(pf)) { in ice_misc_intr()
3302 pf->ptp.ext_ts_irq |= gltsyn_stat & in ice_misc_intr()
3307 ice_ptp_extts_event(pf); in ice_misc_intr()
3313 pf->oicr_err_reg |= oicr; in ice_misc_intr()
3314 set_bit(ICE_AUX_ERR_PENDING, pf->state); in ice_misc_intr()
3327 set_bit(ICE_PFR_REQ, pf->state); in ice_misc_intr()
3330 ice_service_task_schedule(pf); in ice_misc_intr()
3344 struct ice_pf *pf = data; in ice_misc_intr_thread_fn() local
3347 hw = &pf->hw; in ice_misc_intr_thread_fn()
3349 if (ice_is_reset_in_progress(pf->state)) in ice_misc_intr_thread_fn()
3352 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { in ice_misc_intr_thread_fn()
3356 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { in ice_misc_intr_thread_fn()
3396 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf) in ice_free_irq_msix_ll_ts() argument
3398 int irq_num = pf->ll_ts_irq.virq; in ice_free_irq_msix_ll_ts()
3401 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); in ice_free_irq_msix_ll_ts()
3403 ice_free_irq(pf, pf->ll_ts_irq); in ice_free_irq_msix_ll_ts()
3410 static void ice_free_irq_msix_misc(struct ice_pf *pf) in ice_free_irq_msix_misc() argument
3412 int misc_irq_num = pf->oicr_irq.virq; in ice_free_irq_msix_misc()
3413 struct ice_hw *hw = &pf->hw; in ice_free_irq_msix_misc()
3422 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); in ice_free_irq_msix_misc()
3424 ice_free_irq(pf, pf->oicr_irq); in ice_free_irq_msix_misc()
3425 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_free_irq_msix_misc()
3426 ice_free_irq_msix_ll_ts(pf); in ice_free_irq_msix_misc()
3470 static int ice_req_irq_msix_misc(struct ice_pf *pf) in ice_req_irq_msix_misc() argument
3472 struct device *dev = ice_pf_to_dev(pf); in ice_req_irq_msix_misc()
3473 struct ice_hw *hw = &pf->hw; in ice_req_irq_msix_misc()
3478 if (!pf->int_name[0]) in ice_req_irq_msix_misc()
3479 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", in ice_req_irq_msix_misc()
3482 if (!pf->int_name_ll_ts[0]) in ice_req_irq_msix_misc()
3483 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, in ice_req_irq_msix_misc()
3489 if (ice_is_reset_in_progress(pf->state)) in ice_req_irq_msix_misc()
3493 irq = ice_alloc_irq(pf, false); in ice_req_irq_msix_misc()
3497 pf->oicr_irq = irq; in ice_req_irq_msix_misc()
3498 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, in ice_req_irq_msix_misc()
3500 pf->int_name, pf); in ice_req_irq_msix_misc()
3503 pf->int_name, err); in ice_req_irq_msix_misc()
3504 ice_free_irq(pf, pf->oicr_irq); in ice_req_irq_msix_misc()
3509 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3512 irq = ice_alloc_irq(pf, false); in ice_req_irq_msix_misc()
3516 pf->ll_ts_irq = irq; in ice_req_irq_msix_misc()
3517 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, in ice_req_irq_msix_misc()
3518 pf->int_name_ll_ts, pf); in ice_req_irq_msix_misc()
3521 pf->int_name_ll_ts, err); in ice_req_irq_msix_misc()
3522 ice_free_irq(pf, pf->ll_ts_irq); in ice_req_irq_msix_misc()
3527 ice_ena_misc_vector(pf); in ice_req_irq_msix_misc()
3529 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); in ice_req_irq_msix_misc()
3532 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3534 ((pf->ll_ts_irq.index + pf_intr_start_offset) & in ice_req_irq_msix_misc()
3536 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), in ice_req_irq_msix_misc()
3552 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_ops() local
3554 if (ice_is_safe_mode(pf)) { in ice_set_ops()
3561 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; in ice_set_ops()
3580 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_netdev_features() local
3581 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); in ice_set_netdev_features()
3587 if (ice_is_safe_mode(pf)) { in ice_set_netdev_features()
3687 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_pf_vsi_setup() argument
3695 return ice_vsi_setup(pf, &params); in ice_pf_vsi_setup()
3699 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, in ice_chnl_vsi_setup() argument
3709 return ice_vsi_setup(pf, &params); in ice_chnl_vsi_setup()
3721 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_ctrl_vsi_setup() argument
3729 return ice_vsi_setup(pf, &params); in ice_ctrl_vsi_setup()
3741 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) in ice_lb_vsi_setup() argument
3749 return ice_vsi_setup(pf, &params); in ice_lb_vsi_setup()
3949 u16 ice_get_avail_txq_count(struct ice_pf *pf) in ice_get_avail_txq_count() argument
3951 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, in ice_get_avail_txq_count()
3952 pf->max_pf_txqs); in ice_get_avail_txq_count()
3959 u16 ice_get_avail_rxq_count(struct ice_pf *pf) in ice_get_avail_rxq_count() argument
3961 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, in ice_get_avail_rxq_count()
3962 pf->max_pf_rxqs); in ice_get_avail_rxq_count()
3969 static void ice_deinit_pf(struct ice_pf *pf) in ice_deinit_pf() argument
3971 ice_service_task_stop(pf); in ice_deinit_pf()
3972 mutex_destroy(&pf->lag_mutex); in ice_deinit_pf()
3973 mutex_destroy(&pf->adev_mutex); in ice_deinit_pf()
3974 mutex_destroy(&pf->sw_mutex); in ice_deinit_pf()
3975 mutex_destroy(&pf->tc_mutex); in ice_deinit_pf()
3976 mutex_destroy(&pf->avail_q_mutex); in ice_deinit_pf()
3977 mutex_destroy(&pf->vfs.table_lock); in ice_deinit_pf()
3979 if (pf->avail_txqs) { in ice_deinit_pf()
3980 bitmap_free(pf->avail_txqs); in ice_deinit_pf()
3981 pf->avail_txqs = NULL; in ice_deinit_pf()
3984 if (pf->avail_rxqs) { in ice_deinit_pf()
3985 bitmap_free(pf->avail_rxqs); in ice_deinit_pf()
3986 pf->avail_rxqs = NULL; in ice_deinit_pf()
3989 if (pf->ptp.clock) in ice_deinit_pf()
3990 ptp_clock_unregister(pf->ptp.clock); in ice_deinit_pf()
3992 xa_destroy(&pf->dyn_ports); in ice_deinit_pf()
3993 xa_destroy(&pf->sf_nums); in ice_deinit_pf()
4000 static void ice_set_pf_caps(struct ice_pf *pf) in ice_set_pf_caps() argument
4002 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; in ice_set_pf_caps()
4004 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
4006 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
4007 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
4009 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
4010 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
4012 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
4013 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, in ice_set_pf_caps()
4016 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4018 set_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4020 clear_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4027 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_set_pf_caps()
4028 set_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4030 ice_alloc_fd_guar_item(&pf->hw, &unused, in ice_set_pf_caps()
4033 ice_alloc_fd_shrd_item(&pf->hw, &unused, in ice_set_pf_caps()
4037 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4039 !(pf->hw.mac_type == ICE_MAC_E830)) in ice_set_pf_caps()
4040 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4042 pf->max_pf_txqs = func_caps->common_cap.num_txq; in ice_set_pf_caps()
4043 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; in ice_set_pf_caps()
4050 static int ice_init_pf(struct ice_pf *pf) in ice_init_pf() argument
4052 ice_set_pf_caps(pf); in ice_init_pf()
4054 mutex_init(&pf->sw_mutex); in ice_init_pf()
4055 mutex_init(&pf->tc_mutex); in ice_init_pf()
4056 mutex_init(&pf->adev_mutex); in ice_init_pf()
4057 mutex_init(&pf->lag_mutex); in ice_init_pf()
4059 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_init_pf()
4060 spin_lock_init(&pf->aq_wait_lock); in ice_init_pf()
4061 init_waitqueue_head(&pf->aq_wait_queue); in ice_init_pf()
4063 init_waitqueue_head(&pf->reset_wait_queue); in ice_init_pf()
4066 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_init_pf()
4067 pf->serv_tmr_period = HZ; in ice_init_pf()
4068 INIT_WORK(&pf->serv_task, ice_service_task); in ice_init_pf()
4069 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_init_pf()
4071 mutex_init(&pf->avail_q_mutex); in ice_init_pf()
4072 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); in ice_init_pf()
4073 if (!pf->avail_txqs) in ice_init_pf()
4076 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); in ice_init_pf()
4077 if (!pf->avail_rxqs) { in ice_init_pf()
4078 bitmap_free(pf->avail_txqs); in ice_init_pf()
4079 pf->avail_txqs = NULL; in ice_init_pf()
4083 mutex_init(&pf->vfs.table_lock); in ice_init_pf()
4084 hash_init(pf->vfs.table); in ice_init_pf()
4085 ice_mbx_init_snapshot(&pf->hw); in ice_init_pf()
4087 xa_init(&pf->dyn_ports); in ice_init_pf()
4088 xa_init(&pf->sf_nums); in ice_init_pf()
4126 struct ice_pf *pf = vsi->back; in ice_vsi_recfg_qs() local
4132 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { in ice_vsi_recfg_qs()
4149 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); in ice_vsi_recfg_qs()
4165 ice_pf_dcb_recfg(pf, locked); in ice_vsi_recfg_qs()
4170 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", in ice_vsi_recfg_qs()
4173 clear_bit(ICE_CFG_BUSY, pf->state); in ice_vsi_recfg_qs()
4184 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) in ice_set_safe_mode_vlan_cfg() argument
4186 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_set_safe_mode_vlan_cfg()
4198 hw = &pf->hw; in ice_set_safe_mode_vlan_cfg()
4237 struct ice_pf *pf = hw->back; in ice_log_pkg_init() local
4240 dev = ice_pf_to_dev(pf); in ice_log_pkg_init()
4320 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) in ice_load_pkg() argument
4323 struct device *dev = ice_pf_to_dev(pf); in ice_load_pkg()
4324 struct ice_hw *hw = &pf->hw; in ice_load_pkg()
4341 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4348 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4359 static void ice_verify_cacheline_size(struct ice_pf *pf) in ice_verify_cacheline_size() argument
4361 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) in ice_verify_cacheline_size()
4362 …dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts… in ice_verify_cacheline_size()
4372 static int ice_send_version(struct ice_pf *pf) in ice_send_version() argument
4382 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); in ice_send_version()
4391 static int ice_init_fdir(struct ice_pf *pf) in ice_init_fdir() argument
4393 struct device *dev = ice_pf_to_dev(pf); in ice_init_fdir()
4400 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); in ice_init_fdir()
4412 mutex_init(&pf->hw.fdir_fltr_lock); in ice_init_fdir()
4414 err = ice_fdir_create_dflt_rules(pf); in ice_init_fdir()
4421 ice_fdir_release_flows(&pf->hw); in ice_init_fdir()
4425 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_init_fdir()
4426 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_init_fdir()
4427 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_init_fdir()
4432 static void ice_deinit_fdir(struct ice_pf *pf) in ice_deinit_fdir() argument
4434 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); in ice_deinit_fdir()
4441 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_deinit_fdir()
4442 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_deinit_fdir()
4443 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_deinit_fdir()
4446 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); in ice_deinit_fdir()
4453 static char *ice_get_opt_fw_name(struct ice_pf *pf) in ice_get_opt_fw_name() argument
4458 struct pci_dev *pdev = pf->pdev; in ice_get_opt_fw_name()
4486 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) in ice_request_fw() argument
4488 char *opt_fw_filename = ice_get_opt_fw_name(pf); in ice_request_fw()
4489 struct device *dev = ice_pf_to_dev(pf); in ice_request_fw()
4520 struct ice_pf *pf = hw->back; in ice_init_tx_topology() local
4524 dev = ice_pf_to_dev(pf); in ice_init_tx_topology()
4555 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) in ice_init_ddp_config() argument
4557 struct device *dev = ice_pf_to_dev(pf); in ice_init_ddp_config()
4561 err = ice_request_fw(pf, &firmware); in ice_init_ddp_config()
4576 ice_load_pkg(firmware, pf); in ice_init_ddp_config()
4586 static void ice_print_wake_reason(struct ice_pf *pf) in ice_print_wake_reason() argument
4588 u32 wus = pf->wakeup_reason; in ice_print_wake_reason()
4606 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); in ice_print_wake_reason()
4615 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module) in ice_pf_fwlog_update_module() argument
4617 struct ice_hw *hw = &pf->hw; in ice_pf_fwlog_update_module()
4726 int ice_init_dev(struct ice_pf *pf) in ice_init_dev() argument
4728 struct device *dev = ice_pf_to_dev(pf); in ice_init_dev()
4729 struct ice_hw *hw = &pf->hw; in ice_init_dev()
4750 ice_init_feature_support(pf); in ice_init_dev()
4752 err = ice_init_ddp_config(hw, pf); in ice_init_dev()
4758 if (err || ice_is_safe_mode(pf)) { in ice_init_dev()
4767 err = ice_init_pf(pf); in ice_init_dev()
4773 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; in ice_init_dev()
4774 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; in ice_init_dev()
4775 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in ice_init_dev()
4776 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; in ice_init_dev()
4777 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { in ice_init_dev()
4778 pf->hw.udp_tunnel_nic.tables[0].n_entries = in ice_init_dev()
4779 pf->hw.tnl.valid_count[TNL_VXLAN]; in ice_init_dev()
4780 pf->hw.udp_tunnel_nic.tables[0].tunnel_types = in ice_init_dev()
4783 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { in ice_init_dev()
4784 pf->hw.udp_tunnel_nic.tables[1].n_entries = in ice_init_dev()
4785 pf->hw.tnl.valid_count[TNL_GENEVE]; in ice_init_dev()
4786 pf->hw.udp_tunnel_nic.tables[1].tunnel_types = in ice_init_dev()
4790 err = ice_init_interrupt_scheme(pf); in ice_init_dev()
4802 err = ice_req_irq_msix_misc(pf); in ice_init_dev()
4811 ice_clear_interrupt_scheme(pf); in ice_init_dev()
4813 ice_deinit_pf(pf); in ice_init_dev()
4819 void ice_deinit_dev(struct ice_pf *pf) in ice_deinit_dev() argument
4821 ice_free_irq_msix_misc(pf); in ice_deinit_dev()
4822 ice_deinit_pf(pf); in ice_deinit_dev()
4823 ice_deinit_hw(&pf->hw); in ice_deinit_dev()
4826 ice_reset(&pf->hw, ICE_RESET_PFR); in ice_deinit_dev()
4827 pci_wait_for_pending_transaction(pf->pdev); in ice_deinit_dev()
4828 ice_clear_interrupt_scheme(pf); in ice_deinit_dev()
4831 static void ice_init_features(struct ice_pf *pf) in ice_init_features() argument
4833 struct device *dev = ice_pf_to_dev(pf); in ice_init_features()
4835 if (ice_is_safe_mode(pf)) in ice_init_features()
4839 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_init_features()
4840 ice_ptp_init(pf); in ice_init_features()
4842 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_init_features()
4843 ice_gnss_init(pf); in ice_init_features()
4845 if (ice_is_feature_supported(pf, ICE_F_CGU) || in ice_init_features()
4846 ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) in ice_init_features()
4847 ice_dpll_init(pf); in ice_init_features()
4850 if (ice_init_fdir(pf)) in ice_init_features()
4854 if (ice_init_pf_dcb(pf, false)) { in ice_init_features()
4855 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_init_features()
4856 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); in ice_init_features()
4858 ice_cfg_lldp_mib_change(&pf->hw, true); in ice_init_features()
4861 if (ice_init_lag(pf)) in ice_init_features()
4864 ice_hwmon_init(pf); in ice_init_features()
4867 static void ice_deinit_features(struct ice_pf *pf) in ice_deinit_features() argument
4869 if (ice_is_safe_mode(pf)) in ice_deinit_features()
4872 ice_deinit_lag(pf); in ice_deinit_features()
4873 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) in ice_deinit_features()
4874 ice_cfg_lldp_mib_change(&pf->hw, false); in ice_deinit_features()
4875 ice_deinit_fdir(pf); in ice_deinit_features()
4876 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_deinit_features()
4877 ice_gnss_exit(pf); in ice_deinit_features()
4878 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_deinit_features()
4879 ice_ptp_release(pf); in ice_deinit_features()
4880 if (test_bit(ICE_FLAG_DPLL, pf->flags)) in ice_deinit_features()
4881 ice_dpll_deinit(pf); in ice_deinit_features()
4882 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) in ice_deinit_features()
4883 xa_destroy(&pf->eswitch.reprs); in ice_deinit_features()
4886 static void ice_init_wakeup(struct ice_pf *pf) in ice_init_wakeup() argument
4889 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); in ice_init_wakeup()
4892 ice_print_wake_reason(pf); in ice_init_wakeup()
4895 wr32(&pf->hw, PFPM_WUS, U32_MAX); in ice_init_wakeup()
4898 device_set_wakeup_enable(ice_pf_to_dev(pf), false); in ice_init_wakeup()
4901 static int ice_init_link(struct ice_pf *pf) in ice_init_link() argument
4903 struct device *dev = ice_pf_to_dev(pf); in ice_init_link()
4906 err = ice_init_link_events(pf->hw.port_info); in ice_init_link()
4913 err = ice_init_nvm_phy_type(pf->hw.port_info); in ice_init_link()
4918 err = ice_update_link_info(pf->hw.port_info); in ice_init_link()
4922 ice_init_link_dflt_override(pf->hw.port_info); in ice_init_link()
4924 ice_check_link_cfg_err(pf, in ice_init_link()
4925 pf->hw.port_info->phy.link_info.link_cfg_err); in ice_init_link()
4928 if (pf->hw.port_info->phy.link_info.link_info & in ice_init_link()
4931 err = ice_init_phy_user_cfg(pf->hw.port_info); in ice_init_link()
4935 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { in ice_init_link()
4936 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_init_link()
4942 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_init_link()
4948 static int ice_init_pf_sw(struct ice_pf *pf) in ice_init_pf_sw() argument
4950 bool dvm = ice_is_dvm_ena(&pf->hw); in ice_init_pf_sw()
4955 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); in ice_init_pf_sw()
4956 if (!pf->first_sw) in ice_init_pf_sw()
4959 if (pf->hw.evb_veb) in ice_init_pf_sw()
4960 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; in ice_init_pf_sw()
4962 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; in ice_init_pf_sw()
4964 pf->first_sw->pf = pf; in ice_init_pf_sw()
4967 pf->first_sw->sw_id = pf->hw.port_info->sw_id; in ice_init_pf_sw()
4969 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_init_pf_sw()
4973 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); in ice_init_pf_sw()
4983 kfree(pf->first_sw); in ice_init_pf_sw()
4987 static void ice_deinit_pf_sw(struct ice_pf *pf) in ice_deinit_pf_sw() argument
4989 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_deinit_pf_sw()
4995 kfree(pf->first_sw); in ice_deinit_pf_sw()
4998 static int ice_alloc_vsis(struct ice_pf *pf) in ice_alloc_vsis() argument
5000 struct device *dev = ice_pf_to_dev(pf); in ice_alloc_vsis()
5002 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; in ice_alloc_vsis()
5003 if (!pf->num_alloc_vsi) in ice_alloc_vsis()
5006 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in ice_alloc_vsis()
5009 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in ice_alloc_vsis()
5010 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in ice_alloc_vsis()
5013 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), in ice_alloc_vsis()
5015 if (!pf->vsi) in ice_alloc_vsis()
5018 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, in ice_alloc_vsis()
5019 sizeof(*pf->vsi_stats), GFP_KERNEL); in ice_alloc_vsis()
5020 if (!pf->vsi_stats) { in ice_alloc_vsis()
5021 devm_kfree(dev, pf->vsi); in ice_alloc_vsis()
5028 static void ice_dealloc_vsis(struct ice_pf *pf) in ice_dealloc_vsis() argument
5030 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); in ice_dealloc_vsis()
5031 pf->vsi_stats = NULL; in ice_dealloc_vsis()
5033 pf->num_alloc_vsi = 0; in ice_dealloc_vsis()
5034 devm_kfree(ice_pf_to_dev(pf), pf->vsi); in ice_dealloc_vsis()
5035 pf->vsi = NULL; in ice_dealloc_vsis()
5038 static int ice_init_devlink(struct ice_pf *pf) in ice_init_devlink() argument
5042 err = ice_devlink_register_params(pf); in ice_init_devlink()
5046 ice_devlink_init_regions(pf); in ice_init_devlink()
5047 ice_devlink_register(pf); in ice_init_devlink()
5052 static void ice_deinit_devlink(struct ice_pf *pf) in ice_deinit_devlink() argument
5054 ice_devlink_unregister(pf); in ice_deinit_devlink()
5055 ice_devlink_destroy_regions(pf); in ice_deinit_devlink()
5056 ice_devlink_unregister_params(pf); in ice_deinit_devlink()
5059 static int ice_init(struct ice_pf *pf) in ice_init() argument
5063 err = ice_init_dev(pf); in ice_init()
5067 err = ice_alloc_vsis(pf); in ice_init()
5071 err = ice_init_pf_sw(pf); in ice_init()
5075 ice_init_wakeup(pf); in ice_init()
5077 err = ice_init_link(pf); in ice_init()
5081 err = ice_send_version(pf); in ice_init()
5085 ice_verify_cacheline_size(pf); in ice_init()
5087 if (ice_is_safe_mode(pf)) in ice_init()
5088 ice_set_safe_mode_vlan_cfg(pf); in ice_init()
5091 pcie_print_link_status(pf->pdev); in ice_init()
5094 clear_bit(ICE_DOWN, pf->state); in ice_init()
5095 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_init()
5098 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_init()
5103 ice_deinit_pf_sw(pf); in ice_init()
5105 ice_dealloc_vsis(pf); in ice_init()
5107 ice_deinit_dev(pf); in ice_init()
5111 static void ice_deinit(struct ice_pf *pf) in ice_deinit() argument
5113 set_bit(ICE_SERVICE_DIS, pf->state); in ice_deinit()
5114 set_bit(ICE_DOWN, pf->state); in ice_deinit()
5116 ice_deinit_pf_sw(pf); in ice_deinit()
5117 ice_dealloc_vsis(pf); in ice_deinit()
5118 ice_deinit_dev(pf); in ice_deinit()
5127 int ice_load(struct ice_pf *pf) in ice_load() argument
5132 devl_assert_locked(priv_to_devlink(pf)); in ice_load()
5134 vsi = ice_get_main_vsi(pf); in ice_load()
5146 err = ice_init_mac_fltr(pf); in ice_load()
5150 err = ice_devlink_create_pf_port(pf); in ice_load()
5154 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in ice_load()
5166 err = ice_init_rdma(pf); in ice_load()
5170 ice_init_features(pf); in ice_load()
5171 ice_service_task_restart(pf); in ice_load()
5173 clear_bit(ICE_DOWN, pf->state); in ice_load()
5182 ice_devlink_destroy_pf_port(pf); in ice_load()
5195 void ice_unload(struct ice_pf *pf) in ice_unload() argument
5197 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_unload()
5199 devl_assert_locked(priv_to_devlink(pf)); in ice_unload()
5201 ice_deinit_features(pf); in ice_unload()
5202 ice_deinit_rdma(pf); in ice_unload()
5205 ice_devlink_destroy_pf_port(pf); in ice_unload()
5221 struct ice_pf *pf; in ice_probe() local
5257 pf = ice_allocate_pf(dev); in ice_probe()
5258 if (!pf) in ice_probe()
5262 pf->aux_idx = -1; in ice_probe()
5277 pf->pdev = pdev; in ice_probe()
5278 pf->adapter = adapter; in ice_probe()
5279 pci_set_drvdata(pdev, pf); in ice_probe()
5280 set_bit(ICE_DOWN, pf->state); in ice_probe()
5282 set_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
5284 hw = &pf->hw; in ice_probe()
5288 hw->back = pf; in ice_probe()
5299 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); in ice_probe()
5306 err = ice_init(pf); in ice_probe()
5310 devl_lock(priv_to_devlink(pf)); in ice_probe()
5311 err = ice_load(pf); in ice_probe()
5315 err = ice_init_devlink(pf); in ice_probe()
5318 devl_unlock(priv_to_devlink(pf)); in ice_probe()
5323 ice_unload(pf); in ice_probe()
5325 devl_unlock(priv_to_devlink(pf)); in ice_probe()
5326 ice_deinit(pf); in ice_probe()
5338 static void ice_set_wake(struct ice_pf *pf) in ice_set_wake() argument
5340 struct ice_hw *hw = &pf->hw; in ice_set_wake()
5341 bool wol = pf->wol_ena; in ice_set_wake()
5361 static void ice_setup_mc_magic_wake(struct ice_pf *pf) in ice_setup_mc_magic_wake() argument
5363 struct device *dev = ice_pf_to_dev(pf); in ice_setup_mc_magic_wake()
5364 struct ice_hw *hw = &pf->hw; in ice_setup_mc_magic_wake()
5370 if (!pf->wol_ena) in ice_setup_mc_magic_wake()
5373 vsi = ice_get_main_vsi(pf); in ice_setup_mc_magic_wake()
5399 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_remove() local
5403 if (!ice_is_reset_in_progress(pf->state)) in ice_remove()
5408 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { in ice_remove()
5409 set_bit(ICE_VF_RESETS_DISABLED, pf->state); in ice_remove()
5410 ice_free_vfs(pf); in ice_remove()
5413 ice_hwmon_exit(pf); in ice_remove()
5415 ice_service_task_stop(pf); in ice_remove()
5416 ice_aq_cancel_waiting_tasks(pf); in ice_remove()
5417 set_bit(ICE_DOWN, pf->state); in ice_remove()
5419 if (!ice_is_safe_mode(pf)) in ice_remove()
5420 ice_remove_arfs(pf); in ice_remove()
5422 devl_lock(priv_to_devlink(pf)); in ice_remove()
5423 ice_dealloc_all_dynamic_ports(pf); in ice_remove()
5424 ice_deinit_devlink(pf); in ice_remove()
5426 ice_unload(pf); in ice_remove()
5427 devl_unlock(priv_to_devlink(pf)); in ice_remove()
5429 ice_deinit(pf); in ice_remove()
5430 ice_vsi_release_all(pf); in ice_remove()
5432 ice_setup_mc_magic_wake(pf); in ice_remove()
5433 ice_set_wake(pf); in ice_remove()
5444 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_shutdown() local
5449 pci_wake_from_d3(pdev, pf->wol_ena); in ice_shutdown()
5460 static void ice_prepare_for_shutdown(struct ice_pf *pf) in ice_prepare_for_shutdown() argument
5462 struct ice_hw *hw = &pf->hw; in ice_prepare_for_shutdown()
5467 ice_vc_notify_reset(pf); in ice_prepare_for_shutdown()
5469 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); in ice_prepare_for_shutdown()
5472 ice_pf_dis_all_vsi(pf, false); in ice_prepare_for_shutdown()
5474 ice_for_each_vsi(pf, v) in ice_prepare_for_shutdown()
5475 if (pf->vsi[v]) in ice_prepare_for_shutdown()
5476 pf->vsi[v]->vsi_num = 0; in ice_prepare_for_shutdown()
5491 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) in ice_reinit_interrupt_scheme() argument
5493 struct device *dev = ice_pf_to_dev(pf); in ice_reinit_interrupt_scheme()
5500 ret = ice_init_interrupt_scheme(pf); in ice_reinit_interrupt_scheme()
5507 ice_for_each_vsi(pf, v) { in ice_reinit_interrupt_scheme()
5508 if (!pf->vsi[v]) in ice_reinit_interrupt_scheme()
5511 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5514 ice_vsi_map_rings_to_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5516 ice_vsi_set_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5520 ret = ice_req_irq_msix_misc(pf); in ice_reinit_interrupt_scheme()
5531 if (pf->vsi[v]) { in ice_reinit_interrupt_scheme()
5533 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5535 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5551 struct ice_pf *pf; in ice_suspend() local
5554 pf = pci_get_drvdata(pdev); in ice_suspend()
5556 if (!ice_pf_state_is_nominal(pf)) { in ice_suspend()
5567 disabled = ice_service_task_stop(pf); in ice_suspend()
5569 ice_deinit_rdma(pf); in ice_suspend()
5572 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { in ice_suspend()
5574 ice_service_task_restart(pf); in ice_suspend()
5578 if (test_bit(ICE_DOWN, pf->state) || in ice_suspend()
5579 ice_is_reset_in_progress(pf->state)) { in ice_suspend()
5582 ice_service_task_restart(pf); in ice_suspend()
5586 ice_setup_mc_magic_wake(pf); in ice_suspend()
5588 ice_prepare_for_shutdown(pf); in ice_suspend()
5590 ice_set_wake(pf); in ice_suspend()
5597 ice_free_irq_msix_misc(pf); in ice_suspend()
5598 ice_for_each_vsi(pf, v) { in ice_suspend()
5599 if (!pf->vsi[v]) in ice_suspend()
5602 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_suspend()
5604 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_suspend()
5606 ice_clear_interrupt_scheme(pf); in ice_suspend()
5609 pci_wake_from_d3(pdev, pf->wol_ena); in ice_suspend()
5622 struct ice_pf *pf; in ice_resume() local
5639 pf = pci_get_drvdata(pdev); in ice_resume()
5640 hw = &pf->hw; in ice_resume()
5642 pf->wakeup_reason = rd32(hw, PFPM_WUS); in ice_resume()
5643 ice_print_wake_reason(pf); in ice_resume()
5648 ret = ice_reinit_interrupt_scheme(pf); in ice_resume()
5652 ret = ice_init_rdma(pf); in ice_resume()
5657 clear_bit(ICE_DOWN, pf->state); in ice_resume()
5661 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_resume()
5663 if (ice_schedule_reset(pf, reset_type)) in ice_resume()
5666 clear_bit(ICE_SUSPENDED, pf->state); in ice_resume()
5667 ice_service_task_restart(pf); in ice_resume()
5670 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_resume()
5686 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_detected() local
5688 if (!pf) { in ice_pci_err_detected()
5694 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_detected()
5695 ice_service_task_stop(pf); in ice_pci_err_detected()
5697 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_detected()
5698 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_detected()
5699 ice_prepare_for_reset(pf, ICE_RESET_PFR); in ice_pci_err_detected()
5715 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_slot_reset() local
5732 reg = rd32(&pf->hw, GLGEN_RTRIG); in ice_pci_err_slot_reset()
5751 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_resume() local
5753 if (!pf) { in ice_pci_err_resume()
5759 if (test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_resume()
5765 ice_restore_all_vfs_msi_state(pf); in ice_pci_err_resume()
5767 ice_do_reset(pf, ICE_RESET_PFR); in ice_pci_err_resume()
5768 ice_service_task_restart(pf); in ice_pci_err_resume()
5769 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_pci_err_resume()
5778 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_pci_err_reset_prepare() local
5780 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_reset_prepare()
5781 ice_service_task_stop(pf); in ice_pci_err_reset_prepare()
5783 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_reset_prepare()
5784 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_reset_prepare()
5785 ice_prepare_for_reset(pf, ICE_RESET_PFR); in ice_pci_err_reset_prepare()
5958 struct ice_pf *pf = vsi->back; in ice_set_mac_address() local
5959 struct ice_hw *hw = &pf->hw; in ice_set_mac_address()
5971 if (test_bit(ICE_DOWN, pf->state) || in ice_set_mac_address()
5972 ice_is_reset_in_progress(pf->state)) { in ice_set_mac_address()
5978 if (ice_chnl_dmac_fltr_cnt(pf)) { in ice_set_mac_address()
6455 struct ice_pf *pf = vsi->back; in ice_set_features() local
6459 if (ice_is_safe_mode(pf)) { in ice_set_features()
6460 dev_err(ice_pf_to_dev(pf), in ice_set_features()
6466 if (ice_is_reset_in_progress(pf->state)) { in ice_set_features()
6467 dev_err(ice_pf_to_dev(pf), in ice_set_features()
6507 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { in ice_set_features()
6508 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); in ice_set_features()
6515 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) : in ice_set_features()
6516 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_set_features()
6721 struct ice_pf *pf = vsi->back; in ice_up_complete() local
6745 ice_ptp_link_change(pf, pf->hw.pf_id, true); in ice_up_complete()
6754 ice_service_task_schedule(pf); in ice_up_complete()
6837 struct ice_pf *pf = vsi->back; in ice_update_vsi_ring_stats() local
6888 if (likely(pf->stat_prev_loaded)) { in ice_update_vsi_ring_stats()
6911 struct ice_pf *pf = vsi->back; in ice_update_vsi_stats() local
6914 test_bit(ICE_CFG_BUSY, pf->state)) in ice_update_vsi_stats()
6930 cur_ns->rx_crc_errors = pf->stats.crc_errors; in ice_update_vsi_stats()
6931 cur_ns->rx_errors = pf->stats.crc_errors + in ice_update_vsi_stats()
6932 pf->stats.illegal_bytes + in ice_update_vsi_stats()
6933 pf->stats.rx_undersize + in ice_update_vsi_stats()
6934 pf->hw_csum_rx_error + in ice_update_vsi_stats()
6935 pf->stats.rx_jabber + in ice_update_vsi_stats()
6936 pf->stats.rx_fragments + in ice_update_vsi_stats()
6937 pf->stats.rx_oversize; in ice_update_vsi_stats()
6939 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; in ice_update_vsi_stats()
6947 void ice_update_pf_stats(struct ice_pf *pf) in ice_update_pf_stats() argument
6950 struct ice_hw *hw = &pf->hw; in ice_update_pf_stats()
6955 prev_ps = &pf->stats_prev; in ice_update_pf_stats()
6956 cur_ps = &pf->stats; in ice_update_pf_stats()
6958 if (ice_is_reset_in_progress(pf->state)) in ice_update_pf_stats()
6959 pf->stat_prev_loaded = false; in ice_update_pf_stats()
6961 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6965 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6969 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6973 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6977 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, in ice_update_pf_stats()
6981 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6985 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6989 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6993 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6997 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7001 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7004 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7007 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7010 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7013 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7016 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7019 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7022 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7025 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7028 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7031 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7034 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7037 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7040 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7047 pf->stat_prev_loaded, &prev_ps->fd_sb_match, in ice_update_pf_stats()
7049 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7052 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7055 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7058 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7061 ice_update_dcb_stats(pf); in ice_update_pf_stats()
7063 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7066 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7069 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7073 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7077 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7080 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7083 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7086 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7089 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; in ice_update_pf_stats()
7091 pf->stat_prev_loaded = true; in ice_update_pf_stats()
7163 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq() local
7164 struct ice_hw *hw = &pf->hw; in ice_vsi_dis_irq()
7359 struct ice_pf *pf = vsi->back; in ice_vsi_open_ctrl() local
7363 dev = ice_pf_to_dev(pf); in ice_vsi_open_ctrl()
7415 struct ice_pf *pf = vsi->back; in ice_vsi_open() local
7432 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); in ice_vsi_open()
7474 static void ice_vsi_release_all(struct ice_pf *pf) in ice_vsi_release_all() argument
7478 if (!pf->vsi) in ice_vsi_release_all()
7481 ice_for_each_vsi(pf, i) { in ice_vsi_release_all()
7482 if (!pf->vsi[i]) in ice_vsi_release_all()
7485 if (pf->vsi[i]->type == ICE_VSI_CHNL) in ice_vsi_release_all()
7488 err = ice_vsi_release(pf->vsi[i]); in ice_vsi_release_all()
7490 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", in ice_vsi_release_all()
7491 i, err, pf->vsi[i]->vsi_num); in ice_vsi_release_all()
7502 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) in ice_vsi_rebuild_by_type() argument
7504 struct device *dev = ice_pf_to_dev(pf); in ice_vsi_rebuild_by_type()
7507 ice_for_each_vsi(pf, i) { in ice_vsi_rebuild_by_type()
7508 struct ice_vsi *vsi = pf->vsi[i]; in ice_vsi_rebuild_by_type()
7522 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7532 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7553 static void ice_update_pf_netdev_link(struct ice_pf *pf) in ice_update_pf_netdev_link() argument
7558 ice_for_each_vsi(pf, i) { in ice_update_pf_netdev_link()
7559 struct ice_vsi *vsi = pf->vsi[i]; in ice_update_pf_netdev_link()
7564 ice_get_link_status(pf->vsi[i]->port_info, &link_up); in ice_update_pf_netdev_link()
7566 netif_carrier_on(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7567 netif_tx_wake_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7569 netif_carrier_off(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7570 netif_tx_stop_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7585 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) in ice_rebuild() argument
7587 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_rebuild()
7588 struct device *dev = ice_pf_to_dev(pf); in ice_rebuild()
7589 struct ice_hw *hw = &pf->hw; in ice_rebuild()
7593 if (test_bit(ICE_DOWN, pf->state)) in ice_rebuild()
7604 pf->fw_emp_reset_disabled = false; in ice_rebuild()
7616 if (!ice_is_safe_mode(pf)) { in ice_rebuild()
7622 ice_load_pkg(NULL, pf); in ice_rebuild()
7653 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_rebuild()
7662 err = ice_req_irq_msix_misc(pf); in ice_rebuild()
7668 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7683 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_rebuild()
7684 ice_dcb_rebuild(pf); in ice_rebuild()
7690 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_rebuild()
7691 ice_ptp_rebuild(pf, reset_type); in ice_rebuild()
7693 if (ice_is_feature_supported(pf, ICE_F_GNSS)) in ice_rebuild()
7694 ice_gnss_init(pf); in ice_rebuild()
7697 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); in ice_rebuild()
7704 err = ice_rebuild_channels(pf); in ice_rebuild()
7713 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7714 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); in ice_rebuild()
7725 ice_fdir_replay_fltrs(pf); in ice_rebuild()
7727 ice_rebuild_arfs(pf); in ice_rebuild()
7733 ice_update_pf_netdev_link(pf); in ice_rebuild()
7736 err = ice_send_version(pf); in ice_rebuild()
7746 clear_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7748 ice_plug_aux_dev(pf); in ice_rebuild()
7749 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) in ice_rebuild()
7750 ice_lag_rebuild(pf); in ice_rebuild()
7753 ice_ptp_restore_timestamp_mode(pf); in ice_rebuild()
7761 set_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7764 set_bit(ICE_NEEDS_RESTART, pf->state); in ice_rebuild()
7779 struct ice_pf *pf = vsi->back; in ice_change_mtu() local
7798 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { in ice_change_mtu()
7808 if (ice_is_reset_in_progress(pf->state)) { in ice_change_mtu()
7828 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); in ice_change_mtu()
7842 struct ice_pf *pf = np->vsi->back; in ice_eth_ioctl() local
7846 return ice_ptp_get_ts_config(pf, ifr); in ice_eth_ioctl()
7848 return ice_ptp_set_ts_config(pf, ifr); in ice_eth_ioctl()
8072 struct ice_pf *pf = vsi->back; in ice_bridge_getlink() local
8075 bmode = pf->first_sw->bridge_mode; in ice_bridge_getlink()
8143 struct ice_pf *pf = np->vsi->back; in ice_bridge_setlink() local
8145 struct ice_hw *hw = &pf->hw; in ice_bridge_setlink()
8149 pf_sw = pf->first_sw; in ice_bridge_setlink()
8166 ice_for_each_vsi(pf, v) { in ice_bridge_setlink()
8167 if (!pf->vsi[v]) in ice_bridge_setlink()
8169 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); in ice_bridge_setlink()
8204 struct ice_pf *pf = vsi->back; in ice_tx_timeout() local
8207 pf->tx_timeout_count++; in ice_tx_timeout()
8213 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { in ice_tx_timeout()
8214 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", in ice_tx_timeout()
8230 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) in ice_tx_timeout()
8231 pf->tx_timeout_recovery_level = 1; in ice_tx_timeout()
8232 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + in ice_tx_timeout()
8237 struct ice_hw *hw = &pf->hw; in ice_tx_timeout()
8250 pf->tx_timeout_last_recovery = jiffies; in ice_tx_timeout()
8252 pf->tx_timeout_recovery_level, txqueue); in ice_tx_timeout()
8254 switch (pf->tx_timeout_recovery_level) { in ice_tx_timeout()
8256 set_bit(ICE_PFR_REQ, pf->state); in ice_tx_timeout()
8259 set_bit(ICE_CORER_REQ, pf->state); in ice_tx_timeout()
8262 set_bit(ICE_GLOBR_REQ, pf->state); in ice_tx_timeout()
8266 set_bit(ICE_DOWN, pf->state); in ice_tx_timeout()
8268 set_bit(ICE_SERVICE_DIS, pf->state); in ice_tx_timeout()
8272 ice_service_task_schedule(pf); in ice_tx_timeout()
8273 pf->tx_timeout_recovery_level++; in ice_tx_timeout()
8336 struct ice_pf *pf = vsi->back; in ice_validate_mqprio_qopt() local
8351 dev = ice_pf_to_dev(pf); in ice_validate_mqprio_qopt()
8467 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) in ice_add_vsi_to_fdir() argument
8469 struct device *dev = ice_pf_to_dev(pf); in ice_add_vsi_to_fdir()
8477 hw = &pf->hw; in ice_add_vsi_to_fdir()
8530 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) in ice_add_channel() argument
8532 struct device *dev = ice_pf_to_dev(pf); in ice_add_channel()
8540 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); in ice_add_channel()
8546 ice_add_vsi_to_fdir(pf, vsi); in ice_add_channel()
8648 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_hw_channel() argument
8651 struct device *dev = ice_pf_to_dev(pf); in ice_setup_hw_channel()
8657 ret = ice_add_channel(pf, sw_id, ch); in ice_setup_hw_channel()
8686 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_channel() argument
8689 struct device *dev = ice_pf_to_dev(pf); in ice_setup_channel()
8698 sw_id = pf->first_sw->sw_id; in ice_setup_channel()
8701 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); in ice_setup_channel()
8739 struct ice_pf *pf = vsi->back; in ice_create_q_channel() local
8745 dev = ice_pf_to_dev(pf); in ice_create_q_channel()
8757 if (!ice_setup_channel(pf, vsi, ch)) { in ice_create_q_channel()
8787 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) in ice_rem_all_chnl_fltrs() argument
8794 &pf->tc_flower_fltr_list, in ice_rem_all_chnl_fltrs()
8806 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); in ice_rem_all_chnl_fltrs()
8809 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", in ice_rem_all_chnl_fltrs()
8812 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", in ice_rem_all_chnl_fltrs()
8822 pf->num_dmac_chnl_fltrs--; in ice_rem_all_chnl_fltrs()
8841 struct ice_pf *pf = vsi->back; in ice_remove_q_channels() local
8846 ice_rem_all_chnl_fltrs(pf); in ice_remove_q_channels()
8850 struct ice_hw *hw = &pf->hw; in ice_remove_q_channels()
8888 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); in ice_remove_q_channels()
8915 static int ice_rebuild_channels(struct ice_pf *pf) in ice_rebuild_channels() argument
8917 struct device *dev = ice_pf_to_dev(pf); in ice_rebuild_channels()
8925 main_vsi = ice_get_main_vsi(pf); in ice_rebuild_channels()
8929 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || in ice_rebuild_channels()
8944 ice_for_each_vsi(pf, i) { in ice_rebuild_channels()
8947 vsi = pf->vsi[i]; in ice_rebuild_channels()
8964 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_rebuild_channels()
8967 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_rebuild_channels()
9031 struct ice_pf *pf = vsi->back; in ice_create_q_channels() local
9061 dev_err(ice_pf_to_dev(pf), in ice_create_q_channels()
9068 dev_dbg(ice_pf_to_dev(pf), in ice_create_q_channels()
9089 struct ice_pf *pf = vsi->back; in ice_setup_tc_mqprio_qdisc() local
9096 dev = ice_pf_to_dev(pf); in ice_setup_tc_mqprio_qdisc()
9101 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9114 if (pf->hw.port_info->is_custom_tx_enabled) { in ice_setup_tc_mqprio_qdisc()
9118 ice_tear_down_devlink_rate_tree(pf); in ice_setup_tc_mqprio_qdisc()
9127 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9133 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_setup_tc_mqprio_qdisc()
9149 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_setup_tc_mqprio_qdisc()
9152 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9153 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), in ice_setup_tc_mqprio_qdisc()
9155 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), in ice_setup_tc_mqprio_qdisc()
9192 clear_bit(ICE_RESET_FAILED, pf->state); in ice_setup_tc_mqprio_qdisc()
9208 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9261 struct ice_pf *pf = np->vsi->back; in ice_setup_tc() local
9272 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_setup_tc()
9277 if (pf->adev) { in ice_setup_tc()
9278 mutex_lock(&pf->adev_mutex); in ice_setup_tc()
9279 device_lock(&pf->adev->dev); in ice_setup_tc()
9281 if (pf->adev->dev.driver) { in ice_setup_tc()
9289 mutex_lock(&pf->tc_mutex); in ice_setup_tc()
9291 mutex_unlock(&pf->tc_mutex); in ice_setup_tc()
9295 device_unlock(&pf->adev->dev); in ice_setup_tc()
9296 mutex_unlock(&pf->adev_mutex); in ice_setup_tc()
9435 struct ice_pf *pf = np->vsi->back; in ice_open() local
9437 if (ice_is_reset_in_progress(pf->state)) { in ice_open()
9458 struct ice_pf *pf = vsi->back; in ice_open_internal() local
9462 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_open_internal()
9476 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_open_internal()
9480 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9481 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { in ice_open_internal()
9497 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9526 struct ice_pf *pf = vsi->back; in ice_stop() local
9528 if (ice_is_reset_in_progress(pf->state)) { in ice_stop()