Lines Matching refs:mvm
22 #include "mvm.h"
97 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
100 u32 phy_config = iwl_mvm_get_phy_config(mvm);
109 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
112 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
116 reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev);
134 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
137 if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
140 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
155 if (!mvm->trans->cfg->apmg_not_supported)
156 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
161 static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
166 struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
173 iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW);
175 iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW,
179 static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
191 vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
204 if (mvm->cca_40mhz_workaround)
211 mvm->cca_40mhz_workaround = 2;
220 sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
246 struct iwl_mvm *mvm = mvmvif->mvm;
252 if (mvm->fw_static_smps_request &&
257 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode,
275 static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
285 mvm->fw_static_smps_request =
287 ieee80211_iterate_interfaces(mvm->hw,
295 * which can't acquire mvm->mutex.
296 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
298 * it will be called from a worker with mvm->mutex held.
300 * mutex itself, it will be called from a worker without mvm->mutex held.
302 * and mvm->mutex. Will be handled with the wiphy_work queue infra
322 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
754 static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
756 const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
762 iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
776 struct iwl_mvm *mvm =
781 guard(mvm)(mvm);
784 rcu_dereference_protected(mvm->csa_tx_blocked_vif,
785 lockdep_is_held(&mvm->mutex));
791 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
792 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
797 struct iwl_mvm *mvm = ctx;
799 mutex_lock(&mvm->mutex);
804 struct iwl_mvm *mvm = ctx;
806 mutex_unlock(&mvm->mutex);
811 struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
813 guard(mvm)(mvm);
814 return iwl_mvm_send_cmd(mvm, host_cmd);
829 static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
831 struct iwl_trans *trans = mvm->trans;
835 if (WARN(!mvm->mei_registered,
839 mvm->mei_nvm_data = iwl_mei_get_nvm();
840 if (mvm->mei_nvm_data) {
842 * mvm->mei_nvm_data is set and because of that,
846 mvm->nvm_data =
848 mvm->mei_nvm_data,
849 mvm->fw,
850 mvm->set_tx_ant,
851 mvm->set_rx_ant);
855 IWL_ERR(mvm,
861 wiphy_lock(mvm->hw->wiphy);
862 mutex_lock(&mvm->mutex);
864 ret = iwl_trans_start_hw(mvm->trans);
866 mutex_unlock(&mvm->mutex);
867 wiphy_unlock(mvm->hw->wiphy);
872 ret = iwl_run_init_mvm_ucode(mvm);
874 iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
875 if (!ret && iwl_mvm_is_lar_supported(mvm)) {
876 mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
877 ret = iwl_mvm_init_mcc(mvm);
880 iwl_mvm_stop_device(mvm);
882 mutex_unlock(&mvm->mutex);
883 wiphy_unlock(mvm->hw->wiphy);
887 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
890 mvm->fw_product_reset = false;
895 static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
900 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
902 ret = iwl_mvm_mac_setup_register(mvm);
906 mvm->hw_registered = true;
908 iwl_mvm_dbgfs_register(mvm);
910 wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
911 mvm->mei_rfkill_blocked,
914 iwl_mvm_mei_set_sw_rfkill_state(mvm);
990 struct iwl_mvm *mvm = ctx;
993 if (iwl_mvm_has_new_tx_api(mvm))
997 ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf);
1048 struct iwl_mvm *mvm = ctx;
1051 switch (mvm->fwrt.cur_fw_img) {
1058 excl = mvm->fw->dump_excl;
1061 excl = mvm->fw->dump_excl_wowlan;
1065 BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) !=
1066 sizeof(mvm->fw->dump_excl_wowlan));
1068 for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) {
1099 struct iwl_mvm *mvm = priv;
1106 prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true);
1114 rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info);
1123 struct iwl_mvm *mvm = priv;
1128 mvm->mei_rfkill_blocked = blocked;
1129 if (!mvm->hw_registered)
1132 wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
1133 mvm->mei_rfkill_blocked,
1139 struct iwl_mvm *mvm = priv;
1141 if (!mvm->hw_registered || !mvm->csme_vif)
1144 iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden);
1150 struct iwl_mvm *mvm =
1154 ret = iwl_mvm_start_get_nvm(mvm);
1158 ret = iwl_mvm_start_post_nvm(mvm);
1165 IWL_ERR(mvm, "Couldn't get started...\n");
1168 iwl_fw_flush_dumps(&mvm->fwrt);
1169 iwl_mvm_thermal_exit(mvm);
1170 iwl_fw_runtime_free(&mvm->fwrt);
1171 iwl_phy_db_free(mvm->phy_db);
1172 kfree(mvm->scan_cmd);
1173 iwl_trans_op_mode_leave(mvm->trans);
1174 kfree(mvm->nvm_data);
1175 kfree(mvm->mei_nvm_data);
1177 ieee80211_free_hw(mvm->hw);
1183 struct iwl_mvm *mvm = priv;
1185 if (!mvm->hw_registered)
1186 schedule_work(&mvm->sap_connected_wk);
1191 struct iwl_mvm *mvm = priv;
1194 cfg80211_shutdown_all_interfaces(mvm->hw->wiphy);
1213 iwl_mvm_select_links(mvmvif->mvm, vif);
1219 struct iwl_mvm *mvm =
1222 mutex_lock(&mvm->mutex);
1223 ieee80211_iterate_active_interfaces(mvm->hw,
1227 mutex_unlock(&mvm->mutex);
1236 struct iwl_mvm *mvm;
1251 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
1278 mvm = IWL_OP_MODE_GET_MVM(op_mode);
1279 mvm->dev = trans->dev;
1280 mvm->trans = trans;
1281 mvm->cfg = cfg;
1282 mvm->fw = fw;
1283 mvm->hw = hw;
1285 iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
1286 &iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
1288 iwl_mvm_get_bios_tables(mvm);
1289 iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
1292 mvm->init_status = 0;
1294 if (iwl_mvm_has_new_rx_api(mvm)) {
1310 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
1312 if (iwl_mvm_has_new_tx_api(mvm)) {
1322 mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
1323 mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
1324 mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
1325 mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
1327 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
1328 mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
1329 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1330 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
1333 mvm->sf_state = SF_UNINIT;
1334 if (iwl_mvm_has_unified_ucode(mvm))
1335 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
1337 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
1338 mvm->drop_bcn_ap_mode = true;
1340 mutex_init(&mvm->mutex);
1341 spin_lock_init(&mvm->async_handlers_lock);
1342 INIT_LIST_HEAD(&mvm->time_event_list);
1343 INIT_LIST_HEAD(&mvm->aux_roc_te_list);
1344 INIT_LIST_HEAD(&mvm->async_handlers_list);
1345 spin_lock_init(&mvm->time_event_lock);
1346 INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
1347 INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list);
1348 INIT_LIST_HEAD(&mvm->resp_pasn_list);
1350 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
1351 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
1352 INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk);
1353 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
1354 INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
1355 INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
1356 INIT_LIST_HEAD(&mvm->add_stream_txqs);
1357 spin_lock_init(&mvm->add_stream_lock);
1359 wiphy_work_init(&mvm->async_handlers_wiphy_wk,
1362 wiphy_work_init(&mvm->trig_link_selection_wk,
1365 init_waitqueue_head(&mvm->rx_sync_waitq);
1367 mvm->queue_sync_state = 0;
1369 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
1371 spin_lock_init(&mvm->tcm.lock);
1372 INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
1373 mvm->tcm.ts = jiffies;
1374 mvm->tcm.ll_ts = jiffies;
1375 mvm->tcm.uapsd_nonagg_ts = jiffies;
1377 INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
1379 mvm->cmd_ver.range_resp =
1380 iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
1383 if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9))
1398 mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
1412 iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
1414 snprintf(mvm->hw->wiphy->fw_version,
1415 sizeof(mvm->hw->wiphy->fw_version),
1418 trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
1422 iwl_fw_lookup_cmd_ver(mvm->fw,
1426 mvm->sta_remove_requires_queue_remove =
1429 mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw);
1432 iwl_trans_configure(mvm->trans, &trans_cfg);
1435 trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
1436 trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
1437 memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv,
1439 trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv;
1441 trans->iml = mvm->fw->iml;
1442 trans->iml_len = mvm->fw->iml_len;
1445 iwl_notification_wait_init(&mvm->notif_wait);
1448 mvm->phy_db = iwl_phy_db_init(trans);
1449 if (!mvm->phy_db) {
1450 IWL_ERR(mvm, "Cannot init phy_db\n");
1455 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
1457 IWL_DEBUG_EEPROM(mvm->trans->dev,
1460 scan_size = iwl_mvm_scan_size(mvm);
1462 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
1463 if (!mvm->scan_cmd)
1465 mvm->scan_cmd_size = scan_size;
1468 mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
1469 mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
1472 mvm->last_ebs_successful = true;
1474 min_backoff = iwl_mvm_min_backoff(mvm);
1475 iwl_mvm_thermal_initialize(mvm, min_backoff);
1477 if (!iwl_mvm_has_new_rx_stats_api(mvm))
1478 memset(&mvm->rx_stats_v3, 0,
1481 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
1483 iwl_mvm_ftm_initiator_smooth_config(mvm);
1485 iwl_mvm_init_time_sync(&mvm->time_sync);
1487 mvm->debugfs_dir = dbgfs_dir;
1490 mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);
1492 mvm->mei_registered = false;
1495 iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter);
1497 if (iwl_mvm_start_get_nvm(mvm)) {
1503 if (trans->csme_own && mvm->mei_registered)
1510 if (iwl_mvm_start_post_nvm(mvm))
1516 iwl_mvm_thermal_exit(mvm);
1517 if (mvm->mei_registered) {
1522 iwl_fw_flush_dumps(&mvm->fwrt);
1523 iwl_fw_runtime_free(&mvm->fwrt);
1525 iwl_phy_db_free(mvm->phy_db);
1526 kfree(mvm->scan_cmd);
1529 ieee80211_free_hw(mvm->hw);
1533 void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1535 lockdep_assert_held(&mvm->mutex);
1537 iwl_fw_cancel_timestamp(&mvm->fwrt);
1539 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1541 iwl_mvm_pause_tcm(mvm, false);
1543 iwl_fw_dbg_stop_sync(&mvm->fwrt);
1544 iwl_trans_stop_device(mvm->trans);
1545 iwl_free_fw_paging(&mvm->fwrt);
1546 iwl_fw_dump_conf_clear(&mvm->fwrt);
1547 iwl_mvm_mei_device_state(mvm, false);
1552 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1555 if (mvm->mei_registered) {
1566 cancel_work_sync(&mvm->sap_connected_wk);
1568 iwl_mvm_leds_exit(mvm);
1570 iwl_mvm_thermal_exit(mvm);
1580 if (mvm->hw_registered)
1581 ieee80211_unregister_hw(mvm->hw);
1583 kfree(mvm->scan_cmd);
1584 kfree(mvm->mcast_filter_cmd);
1585 mvm->mcast_filter_cmd = NULL;
1587 kfree(mvm->error_recovery_buf);
1588 mvm->error_recovery_buf = NULL;
1590 iwl_mvm_ptp_remove(mvm);
1592 iwl_trans_op_mode_leave(mvm->trans);
1594 iwl_phy_db_free(mvm->phy_db);
1595 mvm->phy_db = NULL;
1597 kfree(mvm->nvm_data);
1598 kfree(mvm->mei_nvm_data);
1599 kfree(rcu_access_pointer(mvm->csme_conn_info));
1600 kfree(mvm->temp_nvm_data);
1602 kfree(mvm->nvm_sections[i].data);
1603 kfree(mvm->acs_survey);
1605 cancel_delayed_work_sync(&mvm->tcm.work);
1607 iwl_fw_runtime_free(&mvm->fwrt);
1608 mutex_destroy(&mvm->mutex);
1610 if (mvm->mei_registered)
1613 ieee80211_free_hw(mvm->hw);
1620 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1623 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
1627 spin_lock_bh(&mvm->async_handlers_lock);
1628 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
1633 spin_unlock_bh(&mvm->async_handlers_lock);
1640 static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
1651 spin_lock_bh(&mvm->async_handlers_lock);
1652 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
1658 spin_unlock_bh(&mvm->async_handlers_lock);
1662 mutex_lock(&mvm->mutex);
1663 entry->fn(mvm, &entry->rxb);
1667 mutex_unlock(&mvm->mutex);
1675 struct iwl_mvm *mvm =
1679 iwl_mvm_async_handlers_by_context(mvm, contexts);
1684 struct iwl_mvm *mvm =
1689 iwl_mvm_async_handlers_by_context(mvm, contexts);
1692 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
1699 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1715 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1722 static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
1730 iwl_dbg_tlv_time_point(&mvm->fwrt,
1732 iwl_mvm_rx_check_trigger(mvm, pkt);
1739 iwl_notification_wait_notify(&mvm->notif_wait, pkt);
1748 if (IWL_FW_CHECK(mvm, pkt_len < rx_h->min_size,
1754 rx_h->fn(mvm, rxb);
1768 spin_lock(&mvm->async_handlers_lock);
1769 list_add_tail(&entry->list, &mvm->async_handlers_list);
1770 spin_unlock(&mvm->async_handlers_lock);
1772 wiphy_work_queue(mvm->hw->wiphy,
1773 &mvm->async_handlers_wiphy_wk);
1775 schedule_work(&mvm->async_handlers_wk);
1785 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1789 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
1791 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
1793 iwl_mvm_rx_common(mvm, rxb, pkt);
1801 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1805 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
1808 iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
1810 iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
1812 iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0);
1814 iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0);
1816 iwl_mvm_rx_common(mvm, rxb, pkt);
1819 static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
1821 return queue == mvm->aux_queue || queue == mvm->probe_queue ||
1822 queue == mvm->p2p_dev_queue || queue == mvm->snif_queue;
1828 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1837 sta_id = iwl_mvm_has_new_tx_api(mvm) ?
1838 mvm->tvqm_info[hw_queue].sta_id :
1839 mvm->queue_info[hw_queue].ra_sta_id;
1841 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
1846 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1851 if (iwl_mvm_is_static_queue(mvm, hw_queue)) {
1853 ieee80211_stop_queues(mvm->hw);
1855 ieee80211_wake_queues(mvm->hw);
1860 if (iwl_mvm_has_new_tx_api(mvm)) {
1861 int tid = mvm->tvqm_info[hw_queue].txq_tid;
1865 tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
1883 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1902 static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
1904 wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
1905 iwl_mvm_is_radio_killed(mvm));
1908 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
1911 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1913 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1915 iwl_mvm_set_rfkill_state(mvm);
1918 struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm)
1920 return rcu_dereference_protected(mvm->csme_conn_info,
1921 lockdep_is_held(&mvm->mutex));
1926 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1927 bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
1928 bool unified = iwl_mvm_has_unified_ucode(mvm);
1931 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1933 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1935 iwl_mvm_set_rfkill_state(mvm);
1939 iwl_abort_notification_waits(&mvm->notif_wait);
1957 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1961 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1962 ieee80211_free_txskb(mvm->hw, skb);
1982 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
1984 iwl_abort_notification_waits(&mvm->notif_wait);
1985 iwl_dbg_tlv_del_timers(mvm->trans);
1997 iwl_mvm_report_scan_aborted(mvm);
2005 if (!mvm->fw_restart && fw_error) {
2006 iwl_fw_error_collect(&mvm->fwrt, false);
2008 &mvm->status)) {
2009 IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
2010 } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2013 IWL_ERR(mvm,
2022 IWL_ERR(mvm, "Module is being unloaded - abort\n");
2031 reprobe->dev = get_device(mvm->trans->dev);
2035 &mvm->status)) {
2036 IWL_ERR(mvm, "HW restart already requested, but not started\n");
2037 } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
2038 mvm->hw_registered &&
2039 !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
2044 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
2046 if (mvm->fw->ucode_capa.error_log_size) {
2047 u32 src_size = mvm->fw->ucode_capa.error_log_size;
2048 u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
2052 mvm->error_recovery_buf = recover_buf;
2053 iwl_trans_read_mem_bytes(mvm->trans,
2060 iwl_fw_error_collect(&mvm->fwrt, false);
2062 if (fw_error && mvm->fw_restart > 0) {
2063 mvm->fw_restart--;
2064 ieee80211_restart_hw(mvm->hw);
2065 } else if (mvm->fwrt.trans->dbg.restart_required) {
2066 IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
2067 mvm->fwrt.trans->dbg.restart_required = false;
2068 ieee80211_restart_hw(mvm->hw);
2069 } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
2070 ieee80211_restart_hw(mvm->hw);
2077 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2079 if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
2081 &mvm->status))
2082 iwl_mvm_dump_nic_error_log(mvm);
2085 iwl_fw_error_collect(&mvm->fwrt, true);
2099 if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
2102 iwl_mvm_nic_restart(mvm, false);
2107 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2110 iwl_mvm_nic_restart(mvm, true);
2117 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2119 iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
2124 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2126 mutex_lock(&mvm->mutex);
2127 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
2128 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2129 iwl_mvm_stop_device(mvm);
2131 mvm->fast_resume = false;
2133 mutex_unlock(&mvm->mutex);
2161 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2165 if (unlikely(queue >= mvm->trans->num_rx_queues))
2169 iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
2172 iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
2174 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);