1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of version 2 of the GNU General Public License as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 23 * USA 24 * 25 * The full GNU General Public License is included in this distribution 26 * in the file called COPYING. 27 * 28 * Contact Information: 29 * Intel Linux Wireless <linuxwifi@intel.com> 30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 31 * 32 * BSD LICENSE 33 * 34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 * All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 42 * * Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * * Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in 46 * the documentation and/or other materials provided with the 47 * distribution. 48 * * Neither the name Intel Corporation nor the names of its 49 * contributors may be used to endorse or promote products derived 50 * from this software without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 * 64 *****************************************************************************/ 65 #include <linux/module.h> 66 #include <linux/vmalloc.h> 67 #include <net/mac80211.h> 68 69 #include "iwl-notif-wait.h" 70 #include "iwl-trans.h" 71 #include "iwl-op-mode.h" 72 #include "iwl-fw.h" 73 #include "iwl-debug.h" 74 #include "iwl-drv.h" 75 #include "iwl-modparams.h" 76 #include "mvm.h" 77 #include "iwl-phy-db.h" 78 #include "iwl-eeprom-parse.h" 79 #include "iwl-csr.h" 80 #include "iwl-io.h" 81 #include "iwl-prph.h" 82 #include "rs.h" 83 #include "fw-api-scan.h" 84 #include "time-event.h" 85 #include "fw-dbg.h" 86 #include "fw-api.h" 87 #include "fw-api-scan.h" 88 89 #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" 90 MODULE_DESCRIPTION(DRV_DESCRIPTION); 91 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 92 MODULE_LICENSE("GPL"); 93 94 static const struct iwl_op_mode_ops iwl_mvm_ops; 95 static const struct iwl_op_mode_ops iwl_mvm_ops_mq; 96 97 struct iwl_mvm_mod_params iwlmvm_mod_params = { 98 .power_scheme = IWL_POWER_SCHEME_BPS, 99 .tfd_q_hang_detect = true 100 /* rest of fields are 0 by default */ 101 }; 102 103 module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO); 104 MODULE_PARM_DESC(init_dbg, 105 "set to true to debug an ASSERT in INIT fw (default: false"); 106 module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO); 107 MODULE_PARM_DESC(power_scheme, 108 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); 109 module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect, 110 bool, S_IRUGO); 111 MODULE_PARM_DESC(tfd_q_hang_detect, 112 "TFD queues hang detection (default: true"); 113 114 /* 115 * module init and exit functions 116 */ 117 static int __init iwl_mvm_init(void) 118 { 119 int ret; 120 121 ret = iwl_mvm_rate_control_register(); 122 if (ret) { 123 pr_err("Unable to register rate control algorithm: %d\n", ret); 124 return ret; 125 } 126 127 ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); 128 129 if (ret) { 130 pr_err("Unable to register MVM op_mode: %d\n", ret); 131 iwl_mvm_rate_control_unregister(); 132 } 133 134 return ret; 135 } 136 module_init(iwl_mvm_init); 137 138 static void __exit iwl_mvm_exit(void) 139 { 140 iwl_opmode_deregister("iwlmvm"); 141 iwl_mvm_rate_control_unregister(); 142 } 143 module_exit(iwl_mvm_exit); 144 145 static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) 146 { 147 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 148 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; 149 u32 reg_val = 0; 150 u32 phy_config = iwl_mvm_get_phy_config(mvm); 151 152 radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> 153 FW_PHY_CFG_RADIO_TYPE_POS; 154 radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> 155 FW_PHY_CFG_RADIO_STEP_POS; 156 radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> 157 FW_PHY_CFG_RADIO_DASH_POS; 158 159 /* SKU control */ 160 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << 161 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; 162 reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << 163 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; 164 165 /* radio configuration */ 166 reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; 167 reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; 168 reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 169 170 WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & 171 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); 172 173 /* 174 * TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and 175 * shouldn't be set to any non-zero value. The same is supposed to be 176 * true of the other HW, but unsetting them (such as the 7260) causes 177 * automatic tests to fail on seemingly unrelated errors. Need to 178 * further investigate this, but for now we'll separate cases. 179 */ 180 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) 181 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; 182 183 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, 184 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | 185 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | 186 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | 187 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | 188 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | 189 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 190 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI, 191 reg_val); 192 193 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, 194 radio_cfg_step, radio_cfg_dash); 195 196 /* 197 * W/A : NIC is stuck in a reset state after Early PCIe power off 198 * (PCIe power is lost before PERST# is asserted), causing ME FW 199 * to lose ownership and not being able to obtain it back. 200 */ 201 if (!mvm->trans->cfg->apmg_not_supported) 202 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, 203 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 204 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 205 } 206 207 struct iwl_rx_handlers { 208 u16 cmd_id; 209 bool async; 210 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); 211 }; 212 213 #define RX_HANDLER(_cmd_id, _fn, _async) \ 214 { .cmd_id = _cmd_id , .fn = _fn , .async = _async } 215 #define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \ 216 { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async } 217 218 /* 219 * Handlers for fw notifications 220 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME 221 * This list should be in order of frequency for performance purposes. 222 * 223 * The handler can be SYNC - this means that it will be called in the Rx path 224 * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and 225 * only in this case!), it should be set as ASYNC. In that case, it will be 226 * called from a worker with mvm->mutex held. 227 */ 228 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { 229 RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), 230 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), 231 232 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), 233 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true), 234 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), 235 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, 236 iwl_mvm_rx_ant_coupling_notif, true), 237 238 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), 239 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true), 240 241 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), 242 243 RX_HANDLER(SCAN_ITERATION_COMPLETE, 244 iwl_mvm_rx_lmac_scan_iter_complete_notif, false), 245 RX_HANDLER(SCAN_OFFLOAD_COMPLETE, 246 iwl_mvm_rx_lmac_scan_complete_notif, true), 247 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, 248 false), 249 RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, 250 true), 251 RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, 252 iwl_mvm_rx_umac_scan_iter_complete_notif, false), 253 254 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), 255 256 RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, 257 false), 258 259 RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), 260 RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, 261 iwl_mvm_power_uapsd_misbehaving_ap_notif, false), 262 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), 263 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, 264 iwl_mvm_temp_notif, true), 265 266 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, 267 true), 268 RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), 269 RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), 270 271 }; 272 #undef RX_HANDLER 273 #undef RX_HANDLER_GRP 274 275 /* Please keep this array *SORTED* by hex value. 276 * Access is done through binary search 277 */ 278 static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { 279 HCMD_NAME(MVM_ALIVE), 280 HCMD_NAME(REPLY_ERROR), 281 HCMD_NAME(ECHO_CMD), 282 HCMD_NAME(INIT_COMPLETE_NOTIF), 283 HCMD_NAME(PHY_CONTEXT_CMD), 284 HCMD_NAME(DBG_CFG), 285 HCMD_NAME(ANTENNA_COUPLING_NOTIFICATION), 286 HCMD_NAME(SCAN_CFG_CMD), 287 HCMD_NAME(SCAN_REQ_UMAC), 288 HCMD_NAME(SCAN_ABORT_UMAC), 289 HCMD_NAME(SCAN_COMPLETE_UMAC), 290 HCMD_NAME(TOF_CMD), 291 HCMD_NAME(TOF_NOTIFICATION), 292 HCMD_NAME(ADD_STA_KEY), 293 HCMD_NAME(ADD_STA), 294 HCMD_NAME(REMOVE_STA), 295 HCMD_NAME(FW_GET_ITEM_CMD), 296 HCMD_NAME(TX_CMD), 297 HCMD_NAME(SCD_QUEUE_CFG), 298 HCMD_NAME(TXPATH_FLUSH), 299 HCMD_NAME(MGMT_MCAST_KEY), 300 HCMD_NAME(WEP_KEY), 301 HCMD_NAME(SHARED_MEM_CFG), 302 HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD), 303 HCMD_NAME(MAC_CONTEXT_CMD), 304 HCMD_NAME(TIME_EVENT_CMD), 305 HCMD_NAME(TIME_EVENT_NOTIFICATION), 306 HCMD_NAME(BINDING_CONTEXT_CMD), 307 HCMD_NAME(TIME_QUOTA_CMD), 308 HCMD_NAME(NON_QOS_TX_COUNTER_CMD), 309 HCMD_NAME(LQ_CMD), 310 HCMD_NAME(FW_PAGING_BLOCK_CMD), 311 HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD), 312 HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD), 313 HCMD_NAME(HOT_SPOT_CMD), 314 HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), 315 HCMD_NAME(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD), 316 HCMD_NAME(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD), 317 HCMD_NAME(BT_COEX_UPDATE_SW_BOOST), 318 HCMD_NAME(BT_COEX_UPDATE_CORUN_LUT), 319 HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), 320 HCMD_NAME(BT_COEX_CI), 321 HCMD_NAME(PHY_CONFIGURATION_CMD), 322 HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), 323 HCMD_NAME(SCAN_OFFLOAD_COMPLETE), 324 HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), 325 HCMD_NAME(SCAN_OFFLOAD_CONFIG_CMD), 326 HCMD_NAME(POWER_TABLE_CMD), 327 HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), 328 HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF), 329 HCMD_NAME(DC2DC_CONFIG_CMD), 330 HCMD_NAME(NVM_ACCESS_CMD), 331 HCMD_NAME(SET_CALIB_DEFAULT_CMD), 332 HCMD_NAME(BEACON_NOTIFICATION), 333 HCMD_NAME(BEACON_TEMPLATE_CMD), 334 HCMD_NAME(TX_ANT_CONFIGURATION_CMD), 335 HCMD_NAME(BT_CONFIG), 336 HCMD_NAME(STATISTICS_CMD), 337 HCMD_NAME(STATISTICS_NOTIFICATION), 338 HCMD_NAME(EOSP_NOTIFICATION), 339 HCMD_NAME(REDUCE_TX_POWER_CMD), 340 HCMD_NAME(CARD_STATE_CMD), 341 HCMD_NAME(CARD_STATE_NOTIFICATION), 342 HCMD_NAME(MISSED_BEACONS_NOTIFICATION), 343 HCMD_NAME(TDLS_CONFIG_CMD), 344 HCMD_NAME(MAC_PM_POWER_TABLE), 345 HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), 346 HCMD_NAME(MFUART_LOAD_NOTIFICATION), 347 HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), 348 HCMD_NAME(REPLY_RX_PHY_CMD), 349 HCMD_NAME(REPLY_RX_MPDU_CMD), 350 HCMD_NAME(BA_NOTIF), 351 HCMD_NAME(MCC_UPDATE_CMD), 352 HCMD_NAME(MCC_CHUB_UPDATE_CMD), 353 HCMD_NAME(MARKER_CMD), 354 HCMD_NAME(BT_COEX_PRIO_TABLE), 355 HCMD_NAME(BT_COEX_PROT_ENV), 356 HCMD_NAME(BT_PROFILE_NOTIFICATION), 357 HCMD_NAME(BCAST_FILTER_CMD), 358 HCMD_NAME(MCAST_FILTER_CMD), 359 HCMD_NAME(REPLY_SF_CFG_CMD), 360 HCMD_NAME(REPLY_BEACON_FILTERING_CMD), 361 HCMD_NAME(D3_CONFIG_CMD), 362 HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), 363 HCMD_NAME(OFFLOADS_QUERY_CMD), 364 HCMD_NAME(REMOTE_WAKE_CONFIG_CMD), 365 HCMD_NAME(MATCH_FOUND_NOTIFICATION), 366 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER), 367 HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION), 368 HCMD_NAME(WOWLAN_PATTERNS), 369 HCMD_NAME(WOWLAN_CONFIGURATION), 370 HCMD_NAME(WOWLAN_TSC_RSC_PARAM), 371 HCMD_NAME(WOWLAN_TKIP_PARAM), 372 HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL), 373 HCMD_NAME(WOWLAN_GET_STATUSES), 374 HCMD_NAME(WOWLAN_TX_POWER_PER_DB), 375 HCMD_NAME(SCAN_ITERATION_COMPLETE), 376 HCMD_NAME(D0I3_END_CMD), 377 HCMD_NAME(LTR_CONFIG), 378 HCMD_NAME(REPLY_DEBUG_CMD), 379 }; 380 381 /* Please keep this array *SORTED* by hex value. 382 * Access is done through binary search 383 */ 384 static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { 385 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), 386 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), 387 }; 388 389 static const struct iwl_hcmd_arr iwl_mvm_groups[] = { 390 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), 391 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), 392 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), 393 }; 394 395 396 /* this forward declaration can avoid to export the function */ 397 static void iwl_mvm_async_handlers_wk(struct work_struct *wk); 398 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); 399 400 static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg) 401 { 402 const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs; 403 404 if (!pwr_tx_backoff) 405 return 0; 406 407 while (pwr_tx_backoff->pwr) { 408 if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr) 409 return pwr_tx_backoff->backoff; 410 411 pwr_tx_backoff++; 412 } 413 414 return 0; 415 } 416 417 static void iwl_mvm_fw_error_dump_wk(struct work_struct *work); 418 419 static struct iwl_op_mode * 420 iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, 421 const struct iwl_fw *fw, struct dentry *dbgfs_dir) 422 { 423 struct ieee80211_hw *hw; 424 struct iwl_op_mode *op_mode; 425 struct iwl_mvm *mvm; 426 struct iwl_trans_config trans_cfg = {}; 427 static const u8 no_reclaim_cmds[] = { 428 TX_CMD, 429 }; 430 int err, scan_size; 431 u32 min_backoff; 432 433 /* 434 * We use IWL_MVM_STATION_COUNT to check the validity of the station 435 * index all over the driver - check that its value corresponds to the 436 * array size. 437 */ 438 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT); 439 440 /******************************** 441 * 1. Allocating and configuring HW data 442 ********************************/ 443 hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + 444 sizeof(struct iwl_mvm), 445 &iwl_mvm_hw_ops); 446 if (!hw) 447 return NULL; 448 449 if (cfg->max_rx_agg_size) 450 hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; 451 452 if (cfg->max_tx_agg_size) 453 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; 454 455 op_mode = hw->priv; 456 457 mvm = IWL_OP_MODE_GET_MVM(op_mode); 458 mvm->dev = trans->dev; 459 mvm->trans = trans; 460 mvm->cfg = cfg; 461 mvm->fw = fw; 462 mvm->hw = hw; 463 464 if (iwl_mvm_has_new_rx_api(mvm)) { 465 op_mode->ops = &iwl_mvm_ops_mq; 466 } else { 467 op_mode->ops = &iwl_mvm_ops; 468 469 if (WARN_ON(trans->num_rx_queues > 1)) 470 goto out_free; 471 } 472 473 mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; 474 475 mvm->aux_queue = 15; 476 mvm->first_agg_queue = 16; 477 mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; 478 if (mvm->cfg->base_params->num_of_queues == 16) { 479 mvm->aux_queue = 11; 480 mvm->first_agg_queue = 12; 481 } 482 mvm->sf_state = SF_UNINIT; 483 mvm->cur_ucode = IWL_UCODE_INIT; 484 485 mutex_init(&mvm->mutex); 486 mutex_init(&mvm->d0i3_suspend_mutex); 487 spin_lock_init(&mvm->async_handlers_lock); 488 INIT_LIST_HEAD(&mvm->time_event_list); 489 INIT_LIST_HEAD(&mvm->aux_roc_te_list); 490 INIT_LIST_HEAD(&mvm->async_handlers_list); 491 spin_lock_init(&mvm->time_event_lock); 492 spin_lock_init(&mvm->queue_info_lock); 493 494 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); 495 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); 496 INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); 497 INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); 498 INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); 499 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); 500 501 spin_lock_init(&mvm->d0i3_tx_lock); 502 spin_lock_init(&mvm->refs_lock); 503 skb_queue_head_init(&mvm->d0i3_tx); 504 init_waitqueue_head(&mvm->d0i3_exit_waitq); 505 506 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); 507 508 /* 509 * Populate the state variables that the transport layer needs 510 * to know about. 511 */ 512 trans_cfg.op_mode = op_mode; 513 trans_cfg.no_reclaim_cmds = no_reclaim_cmds; 514 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); 515 switch (iwlwifi_mod_params.amsdu_size) { 516 case IWL_AMSDU_4K: 517 trans_cfg.rx_buf_size = IWL_AMSDU_4K; 518 break; 519 case IWL_AMSDU_8K: 520 trans_cfg.rx_buf_size = IWL_AMSDU_8K; 521 break; 522 case IWL_AMSDU_12K: 523 trans_cfg.rx_buf_size = IWL_AMSDU_12K; 524 break; 525 default: 526 pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, 527 iwlwifi_mod_params.amsdu_size); 528 trans_cfg.rx_buf_size = IWL_AMSDU_4K; 529 } 530 trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa, 531 IWL_UCODE_TLV_API_WIDE_CMD_HDR); 532 533 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE) 534 trans_cfg.bc_table_dword = true; 535 536 trans_cfg.command_groups = iwl_mvm_groups; 537 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); 538 539 trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; 540 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; 541 trans_cfg.scd_set_active = true; 542 543 trans_cfg.sdio_adma_addr = fw->sdio_adma_addr; 544 trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD; 545 546 /* Set a short watchdog for the command queue */ 547 trans_cfg.cmd_q_wdg_timeout = 548 iwl_mvm_get_wd_timeout(mvm, NULL, false, true); 549 550 snprintf(mvm->hw->wiphy->fw_version, 551 sizeof(mvm->hw->wiphy->fw_version), 552 "%s", fw->fw_version); 553 554 /* Configure transport layer */ 555 iwl_trans_configure(mvm->trans, &trans_cfg); 556 557 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; 558 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); 559 trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; 560 trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; 561 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, 562 sizeof(trans->dbg_conf_tlv)); 563 trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; 564 565 /* set up notification wait support */ 566 iwl_notification_wait_init(&mvm->notif_wait); 567 568 /* Init phy db */ 569 mvm->phy_db = iwl_phy_db_init(trans); 570 if (!mvm->phy_db) { 571 IWL_ERR(mvm, "Cannot init phy_db\n"); 572 goto out_free; 573 } 574 575 IWL_INFO(mvm, "Detected %s, REV=0x%X\n", 576 mvm->cfg->name, mvm->trans->hw_rev); 577 578 min_backoff = calc_min_backoff(trans, cfg); 579 iwl_mvm_tt_initialize(mvm, min_backoff); 580 581 if (iwlwifi_mod_params.nvm_file) 582 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; 583 else 584 IWL_DEBUG_EEPROM(mvm->trans->dev, 585 "working without external nvm file\n"); 586 587 if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name, 588 "not allowing power-up and not having nvm_file\n")) 589 goto out_free; 590 591 /* 592 * Even if nvm exists in the nvm_file driver should read again the nvm 593 * from the nic because there might be entries that exist in the OTP 594 * and not in the file. 595 * for nics with no_power_up_nic_in_init: rely completley on nvm_file 596 */ 597 if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) { 598 err = iwl_nvm_init(mvm, false); 599 if (err) 600 goto out_free; 601 } else { 602 err = iwl_trans_start_hw(mvm->trans); 603 if (err) 604 goto out_free; 605 606 mutex_lock(&mvm->mutex); 607 iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); 608 err = iwl_run_init_mvm_ucode(mvm, true); 609 if (!err || !iwlmvm_mod_params.init_dbg) 610 iwl_trans_stop_device(trans); 611 iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); 612 mutex_unlock(&mvm->mutex); 613 /* returns 0 if successful, 1 if success but in rfkill */ 614 if (err < 0 && !iwlmvm_mod_params.init_dbg) { 615 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); 616 goto out_free; 617 } 618 } 619 620 scan_size = iwl_mvm_scan_size(mvm); 621 622 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); 623 if (!mvm->scan_cmd) 624 goto out_free; 625 626 /* Set EBS as successful as long as not stated otherwise by the FW. */ 627 mvm->last_ebs_successful = true; 628 629 err = iwl_mvm_mac_setup_register(mvm); 630 if (err) 631 goto out_free; 632 633 err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir); 634 if (err) 635 goto out_unregister; 636 637 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); 638 639 /* rpm starts with a taken reference, we can release it now */ 640 iwl_trans_unref(mvm->trans); 641 642 iwl_mvm_tof_init(mvm); 643 644 return op_mode; 645 646 out_unregister: 647 ieee80211_unregister_hw(mvm->hw); 648 iwl_mvm_leds_exit(mvm); 649 out_free: 650 flush_delayed_work(&mvm->fw_dump_wk); 651 iwl_phy_db_free(mvm->phy_db); 652 kfree(mvm->scan_cmd); 653 if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name) 654 iwl_trans_op_mode_leave(trans); 655 ieee80211_free_hw(mvm->hw); 656 return NULL; 657 } 658 659 static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) 660 { 661 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 662 int i; 663 664 iwl_mvm_leds_exit(mvm); 665 666 iwl_mvm_tt_exit(mvm); 667 668 ieee80211_unregister_hw(mvm->hw); 669 670 kfree(mvm->scan_cmd); 671 kfree(mvm->mcast_filter_cmd); 672 mvm->mcast_filter_cmd = NULL; 673 674 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) 675 kfree(mvm->d3_resume_sram); 676 #endif 677 678 iwl_trans_op_mode_leave(mvm->trans); 679 680 iwl_phy_db_free(mvm->phy_db); 681 mvm->phy_db = NULL; 682 683 iwl_free_nvm_data(mvm->nvm_data); 684 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 685 kfree(mvm->nvm_sections[i].data); 686 687 iwl_free_fw_paging(mvm); 688 689 iwl_mvm_tof_clean(mvm); 690 691 ieee80211_free_hw(mvm->hw); 692 } 693 694 struct iwl_async_handler_entry { 695 struct list_head list; 696 struct iwl_rx_cmd_buffer rxb; 697 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); 698 }; 699 700 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) 701 { 702 struct iwl_async_handler_entry *entry, *tmp; 703 704 spin_lock_bh(&mvm->async_handlers_lock); 705 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { 706 iwl_free_rxb(&entry->rxb); 707 list_del(&entry->list); 708 kfree(entry); 709 } 710 spin_unlock_bh(&mvm->async_handlers_lock); 711 } 712 713 static void iwl_mvm_async_handlers_wk(struct work_struct *wk) 714 { 715 struct iwl_mvm *mvm = 716 container_of(wk, struct iwl_mvm, async_handlers_wk); 717 struct iwl_async_handler_entry *entry, *tmp; 718 struct list_head local_list; 719 720 INIT_LIST_HEAD(&local_list); 721 722 /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ 723 mutex_lock(&mvm->mutex); 724 725 /* 726 * Sync with Rx path with a lock. Remove all the entries from this list, 727 * add them to a local one (lock free), and then handle them. 728 */ 729 spin_lock_bh(&mvm->async_handlers_lock); 730 list_splice_init(&mvm->async_handlers_list, &local_list); 731 spin_unlock_bh(&mvm->async_handlers_lock); 732 733 list_for_each_entry_safe(entry, tmp, &local_list, list) { 734 entry->fn(mvm, &entry->rxb); 735 iwl_free_rxb(&entry->rxb); 736 list_del(&entry->list); 737 kfree(entry); 738 } 739 mutex_unlock(&mvm->mutex); 740 } 741 742 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, 743 struct iwl_rx_packet *pkt) 744 { 745 struct iwl_fw_dbg_trigger_tlv *trig; 746 struct iwl_fw_dbg_trigger_cmd *cmds_trig; 747 int i; 748 749 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) 750 return; 751 752 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); 753 cmds_trig = (void *)trig->data; 754 755 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) 756 return; 757 758 for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { 759 /* don't collect on CMD 0 */ 760 if (!cmds_trig->cmds[i].cmd_id) 761 break; 762 763 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || 764 cmds_trig->cmds[i].group_id != pkt->hdr.group_id) 765 continue; 766 767 iwl_mvm_fw_dbg_collect_trig(mvm, trig, 768 "CMD 0x%02x.%02x received", 769 pkt->hdr.group_id, pkt->hdr.cmd); 770 break; 771 } 772 } 773 774 static void iwl_mvm_rx_common(struct iwl_mvm *mvm, 775 struct iwl_rx_cmd_buffer *rxb, 776 struct iwl_rx_packet *pkt) 777 { 778 int i; 779 780 iwl_mvm_rx_check_trigger(mvm, pkt); 781 782 /* 783 * Do the notification wait before RX handlers so 784 * even if the RX handler consumes the RXB we have 785 * access to it in the notification wait entry. 786 */ 787 iwl_notification_wait_notify(&mvm->notif_wait, pkt); 788 789 for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) { 790 const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; 791 struct iwl_async_handler_entry *entry; 792 793 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) 794 continue; 795 796 if (!rx_h->async) { 797 rx_h->fn(mvm, rxb); 798 return; 799 } 800 801 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 802 /* we can't do much... */ 803 if (!entry) 804 return; 805 806 entry->rxb._page = rxb_steal_page(rxb); 807 entry->rxb._offset = rxb->_offset; 808 entry->rxb._rx_page_order = rxb->_rx_page_order; 809 entry->fn = rx_h->fn; 810 spin_lock(&mvm->async_handlers_lock); 811 list_add_tail(&entry->list, &mvm->async_handlers_list); 812 spin_unlock(&mvm->async_handlers_lock); 813 schedule_work(&mvm->async_handlers_wk); 814 break; 815 } 816 } 817 818 static void iwl_mvm_rx(struct iwl_op_mode *op_mode, 819 struct napi_struct *napi, 820 struct iwl_rx_cmd_buffer *rxb) 821 { 822 struct iwl_rx_packet *pkt = rxb_addr(rxb); 823 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 824 825 if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) 826 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); 827 else if (pkt->hdr.cmd == FRAME_RELEASE) 828 iwl_mvm_rx_frame_release(mvm, rxb, 0); 829 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) 830 iwl_mvm_rx_rx_phy_cmd(mvm, rxb); 831 else 832 iwl_mvm_rx_common(mvm, rxb, pkt); 833 } 834 835 static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, 836 struct napi_struct *napi, 837 struct iwl_rx_cmd_buffer *rxb) 838 { 839 struct iwl_rx_packet *pkt = rxb_addr(rxb); 840 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 841 842 if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) 843 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); 844 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) 845 iwl_mvm_rx_phy_cmd_mq(mvm, rxb); 846 else 847 iwl_mvm_rx_common(mvm, rxb, pkt); 848 } 849 850 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) 851 { 852 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 853 unsigned long mq; 854 int q; 855 856 spin_lock_bh(&mvm->queue_info_lock); 857 mq = mvm->queue_info[queue].hw_queue_to_mac80211; 858 spin_unlock_bh(&mvm->queue_info_lock); 859 860 if (WARN_ON_ONCE(!mq)) 861 return; 862 863 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { 864 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { 865 IWL_DEBUG_TX_QUEUES(mvm, 866 "queue %d (mac80211 %d) already stopped\n", 867 queue, q); 868 continue; 869 } 870 871 ieee80211_stop_queue(mvm->hw, q); 872 } 873 } 874 875 static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, 876 const struct iwl_device_cmd *cmd) 877 { 878 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 879 880 /* 881 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA 882 * commands that need to block the Tx queues. 883 */ 884 iwl_trans_block_txq_ptrs(mvm->trans, false); 885 } 886 887 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) 888 { 889 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 890 unsigned long mq; 891 int q; 892 893 spin_lock_bh(&mvm->queue_info_lock); 894 mq = mvm->queue_info[queue].hw_queue_to_mac80211; 895 spin_unlock_bh(&mvm->queue_info_lock); 896 897 if (WARN_ON_ONCE(!mq)) 898 return; 899 900 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { 901 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { 902 IWL_DEBUG_TX_QUEUES(mvm, 903 "queue %d (mac80211 %d) still stopped\n", 904 queue, q); 905 continue; 906 } 907 908 ieee80211_wake_queue(mvm->hw, q); 909 } 910 } 911 912 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) 913 { 914 if (state) 915 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); 916 else 917 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); 918 919 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); 920 } 921 922 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 923 { 924 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 925 bool calibrating = ACCESS_ONCE(mvm->calibrating); 926 927 if (state) 928 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); 929 else 930 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); 931 932 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); 933 934 /* iwl_run_init_mvm_ucode is waiting for results, abort it */ 935 if (calibrating) 936 iwl_abort_notification_waits(&mvm->notif_wait); 937 938 /* 939 * Stop the device if we run OPERATIONAL firmware or if we are in the 940 * middle of the calibrations. 941 */ 942 return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); 943 } 944 945 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) 946 { 947 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 948 struct ieee80211_tx_info *info; 949 950 info = IEEE80211_SKB_CB(skb); 951 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); 952 ieee80211_free_txskb(mvm->hw, skb); 953 } 954 955 struct iwl_mvm_reprobe { 956 struct device *dev; 957 struct work_struct work; 958 }; 959 960 static void iwl_mvm_reprobe_wk(struct work_struct *wk) 961 { 962 struct iwl_mvm_reprobe *reprobe; 963 964 reprobe = container_of(wk, struct iwl_mvm_reprobe, work); 965 if (device_reprobe(reprobe->dev)) 966 dev_err(reprobe->dev, "reprobe failed!\n"); 967 kfree(reprobe); 968 module_put(THIS_MODULE); 969 } 970 971 static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) 972 { 973 struct iwl_mvm *mvm = 974 container_of(work, struct iwl_mvm, fw_dump_wk.work); 975 976 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT)) 977 return; 978 979 mutex_lock(&mvm->mutex); 980 981 /* stop recording */ 982 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 983 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); 984 } else { 985 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); 986 /* wait before we collect the data till the DBGC stop */ 987 udelay(100); 988 } 989 990 iwl_mvm_fw_error_dump(mvm); 991 992 /* start recording again if the firmware is not crashed */ 993 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && 994 mvm->fw->dbg_dest_tlv && 995 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); 996 997 mutex_unlock(&mvm->mutex); 998 999 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); 1000 } 1001 1002 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) 1003 { 1004 iwl_abort_notification_waits(&mvm->notif_wait); 1005 1006 /* 1007 * This is a bit racy, but worst case we tell mac80211 about 1008 * a stopped/aborted scan when that was already done which 1009 * is not a problem. It is necessary to abort any os scan 1010 * here because mac80211 requires having the scan cleared 1011 * before restarting. 1012 * We'll reset the scan_status to NONE in restart cleanup in 1013 * the next start() call from mac80211. If restart isn't called 1014 * (no fw restart) scan status will stay busy. 1015 */ 1016 iwl_mvm_report_scan_aborted(mvm); 1017 1018 /* 1019 * If we're restarting already, don't cycle restarts. 1020 * If INIT fw asserted, it will likely fail again. 1021 * If WoWLAN fw asserted, don't restart either, mac80211 1022 * can't recover this since we're already half suspended. 1023 */ 1024 if (!mvm->restart_fw && fw_error) { 1025 iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 1026 NULL); 1027 } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, 1028 &mvm->status)) { 1029 struct iwl_mvm_reprobe *reprobe; 1030 1031 IWL_ERR(mvm, 1032 "Firmware error during reconfiguration - reprobe!\n"); 1033 1034 /* 1035 * get a module reference to avoid doing this while unloading 1036 * anyway and to avoid scheduling a work with code that's 1037 * being removed. 1038 */ 1039 if (!try_module_get(THIS_MODULE)) { 1040 IWL_ERR(mvm, "Module is being unloaded - abort\n"); 1041 return; 1042 } 1043 1044 reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); 1045 if (!reprobe) { 1046 module_put(THIS_MODULE); 1047 return; 1048 } 1049 reprobe->dev = mvm->trans->dev; 1050 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); 1051 schedule_work(&reprobe->work); 1052 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) { 1053 /* don't let the transport/FW power down */ 1054 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1055 1056 if (fw_error && mvm->restart_fw > 0) 1057 mvm->restart_fw--; 1058 ieee80211_restart_hw(mvm->hw); 1059 } 1060 } 1061 1062 static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) 1063 { 1064 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1065 1066 iwl_mvm_dump_nic_error_log(mvm); 1067 1068 iwl_mvm_nic_restart(mvm, true); 1069 } 1070 1071 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) 1072 { 1073 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1074 1075 WARN_ON(1); 1076 iwl_mvm_nic_restart(mvm, true); 1077 } 1078 1079 struct iwl_d0i3_iter_data { 1080 struct iwl_mvm *mvm; 1081 struct ieee80211_vif *connected_vif; 1082 u8 ap_sta_id; 1083 u8 vif_count; 1084 u8 offloading_tid; 1085 bool disable_offloading; 1086 }; 1087 1088 static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, 1089 struct ieee80211_vif *vif, 1090 struct iwl_d0i3_iter_data *iter_data) 1091 { 1092 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1093 struct ieee80211_sta *ap_sta; 1094 struct iwl_mvm_sta *mvmsta; 1095 u32 available_tids = 0; 1096 u8 tid; 1097 1098 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || 1099 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) 1100 return false; 1101 1102 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); 1103 if (IS_ERR_OR_NULL(ap_sta)) 1104 return false; 1105 1106 mvmsta = iwl_mvm_sta_from_mac80211(ap_sta); 1107 spin_lock_bh(&mvmsta->lock); 1108 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 1109 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 1110 1111 /* 1112 * in case of pending tx packets, don't use this tid 1113 * for offloading in order to prevent reuse of the same 1114 * qos seq counters. 1115 */ 1116 if (iwl_mvm_tid_queued(tid_data)) 1117 continue; 1118 1119 if (tid_data->state != IWL_AGG_OFF) 1120 continue; 1121 1122 available_tids |= BIT(tid); 1123 } 1124 spin_unlock_bh(&mvmsta->lock); 1125 1126 /* 1127 * disallow protocol offloading if we have no available tid 1128 * (with no pending frames and no active aggregation, 1129 * as we don't handle "holes" properly - the scheduler needs the 1130 * frame's seq number and TFD index to match) 1131 */ 1132 if (!available_tids) 1133 return true; 1134 1135 /* for simplicity, just use the first available tid */ 1136 iter_data->offloading_tid = ffs(available_tids) - 1; 1137 return false; 1138 } 1139 1140 static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac, 1141 struct ieee80211_vif *vif) 1142 { 1143 struct iwl_d0i3_iter_data *data = _data; 1144 struct iwl_mvm *mvm = data->mvm; 1145 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1146 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; 1147 1148 IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr); 1149 if (vif->type != NL80211_IFTYPE_STATION || 1150 !vif->bss_conf.assoc) 1151 return; 1152 1153 /* 1154 * in case of pending tx packets or active aggregations, 1155 * avoid offloading features in order to prevent reuse of 1156 * the same qos seq counters. 1157 */ 1158 if (iwl_mvm_disallow_offloading(mvm, vif, data)) 1159 data->disable_offloading = true; 1160 1161 iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags); 1162 iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, 1163 false, flags); 1164 1165 /* 1166 * on init/association, mvm already configures POWER_TABLE_CMD 1167 * and REPLY_MCAST_FILTER_CMD, so currently don't 1168 * reconfigure them (we might want to use different 1169 * params later on, though). 1170 */ 1171 data->ap_sta_id = mvmvif->ap_sta_id; 1172 data->vif_count++; 1173 1174 /* 1175 * no new commands can be sent at this stage, so it's safe 1176 * to save the vif pointer during d0i3 entrance. 1177 */ 1178 data->connected_vif = vif; 1179 } 1180 1181 static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, 1182 struct iwl_wowlan_config_cmd *cmd, 1183 struct iwl_d0i3_iter_data *iter_data) 1184 { 1185 struct ieee80211_sta *ap_sta; 1186 struct iwl_mvm_sta *mvm_ap_sta; 1187 1188 if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT) 1189 return; 1190 1191 rcu_read_lock(); 1192 1193 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]); 1194 if (IS_ERR_OR_NULL(ap_sta)) 1195 goto out; 1196 1197 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); 1198 cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; 1199 cmd->offloading_tid = iter_data->offloading_tid; 1200 cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | 1201 ENABLE_DHCP_FILTERING; 1202 /* 1203 * The d0i3 uCode takes care of the nonqos counters, 1204 * so configure only the qos seq ones. 1205 */ 1206 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd); 1207 out: 1208 rcu_read_unlock(); 1209 } 1210 1211 int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) 1212 { 1213 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1214 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; 1215 int ret; 1216 struct iwl_d0i3_iter_data d0i3_iter_data = { 1217 .mvm = mvm, 1218 }; 1219 struct iwl_wowlan_config_cmd wowlan_config_cmd = { 1220 .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | 1221 IWL_WOWLAN_WAKEUP_BEACON_MISS | 1222 IWL_WOWLAN_WAKEUP_LINK_CHANGE | 1223 IWL_WOWLAN_WAKEUP_BCN_FILTERING), 1224 }; 1225 struct iwl_d3_manager_config d3_cfg_cmd = { 1226 .min_sleep_time = cpu_to_le32(1000), 1227 .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR), 1228 }; 1229 1230 IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); 1231 1232 if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) 1233 return -EINVAL; 1234 1235 set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); 1236 1237 /* 1238 * iwl_mvm_ref_sync takes a reference before checking the flag. 1239 * so by checking there is no held reference we prevent a state 1240 * in which iwl_mvm_ref_sync continues successfully while we 1241 * configure the firmware to enter d0i3 1242 */ 1243 if (iwl_mvm_ref_taken(mvm)) { 1244 IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n"); 1245 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); 1246 wake_up(&mvm->d0i3_exit_waitq); 1247 return 1; 1248 } 1249 1250 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 1251 IEEE80211_IFACE_ITER_NORMAL, 1252 iwl_mvm_enter_d0i3_iterator, 1253 &d0i3_iter_data); 1254 if (d0i3_iter_data.vif_count == 1) { 1255 mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id; 1256 mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading; 1257 } else { 1258 WARN_ON_ONCE(d0i3_iter_data.vif_count > 1); 1259 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; 1260 mvm->d0i3_offloading = false; 1261 } 1262 1263 /* make sure we have no running tx while configuring the seqno */ 1264 synchronize_net(); 1265 1266 /* Flush the hw queues, in case something got queued during entry */ 1267 ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), flags); 1268 if (ret) 1269 return ret; 1270 1271 /* configure wowlan configuration only if needed */ 1272 if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { 1273 iwl_mvm_wowlan_config_key_params(mvm, 1274 d0i3_iter_data.connected_vif, 1275 true, flags); 1276 1277 iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, 1278 &d0i3_iter_data); 1279 1280 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, 1281 sizeof(wowlan_config_cmd), 1282 &wowlan_config_cmd); 1283 if (ret) 1284 return ret; 1285 } 1286 1287 return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, 1288 flags | CMD_MAKE_TRANS_IDLE, 1289 sizeof(d3_cfg_cmd), &d3_cfg_cmd); 1290 } 1291 1292 static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac, 1293 struct ieee80211_vif *vif) 1294 { 1295 struct iwl_mvm *mvm = _data; 1296 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO; 1297 1298 IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr); 1299 if (vif->type != NL80211_IFTYPE_STATION || 1300 !vif->bss_conf.assoc) 1301 return; 1302 1303 iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags); 1304 } 1305 1306 struct iwl_mvm_d0i3_exit_work_iter_data { 1307 struct iwl_mvm *mvm; 1308 struct iwl_wowlan_status *status; 1309 u32 wakeup_reasons; 1310 }; 1311 1312 static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac, 1313 struct ieee80211_vif *vif) 1314 { 1315 struct iwl_mvm_d0i3_exit_work_iter_data *data = _data; 1316 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1317 u32 reasons = data->wakeup_reasons; 1318 1319 /* consider only the relevant station interface */ 1320 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || 1321 data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id) 1322 return; 1323 1324 if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) 1325 iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); 1326 else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON) 1327 ieee80211_beacon_loss(vif); 1328 else 1329 iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status); 1330 } 1331 1332 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) 1333 { 1334 struct ieee80211_sta *sta = NULL; 1335 struct iwl_mvm_sta *mvm_ap_sta; 1336 int i; 1337 bool wake_queues = false; 1338 1339 lockdep_assert_held(&mvm->mutex); 1340 1341 spin_lock_bh(&mvm->d0i3_tx_lock); 1342 1343 if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT) 1344 goto out; 1345 1346 IWL_DEBUG_RPM(mvm, "re-enqueue packets\n"); 1347 1348 /* get the sta in order to update seq numbers and re-enqueue skbs */ 1349 sta = rcu_dereference_protected( 1350 mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id], 1351 lockdep_is_held(&mvm->mutex)); 1352 1353 if (IS_ERR_OR_NULL(sta)) { 1354 sta = NULL; 1355 goto out; 1356 } 1357 1358 if (mvm->d0i3_offloading && qos_seq) { 1359 /* update qos seq numbers if offloading was enabled */ 1360 mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta); 1361 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 1362 u16 seq = le16_to_cpu(qos_seq[i]); 1363 /* firmware stores last-used one, we store next one */ 1364 seq += 0x10; 1365 mvm_ap_sta->tid_data[i].seq_number = seq; 1366 } 1367 } 1368 out: 1369 /* re-enqueue (or drop) all packets */ 1370 while (!skb_queue_empty(&mvm->d0i3_tx)) { 1371 struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx); 1372 1373 if (!sta || iwl_mvm_tx_skb(mvm, skb, sta)) 1374 ieee80211_free_txskb(mvm->hw, skb); 1375 1376 /* if the skb_queue is not empty, we need to wake queues */ 1377 wake_queues = true; 1378 } 1379 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); 1380 wake_up(&mvm->d0i3_exit_waitq); 1381 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; 1382 if (wake_queues) 1383 ieee80211_wake_queues(mvm->hw); 1384 1385 spin_unlock_bh(&mvm->d0i3_tx_lock); 1386 } 1387 1388 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) 1389 { 1390 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work); 1391 struct iwl_host_cmd get_status_cmd = { 1392 .id = WOWLAN_GET_STATUSES, 1393 .flags = CMD_HIGH_PRIO | CMD_WANT_SKB, 1394 }; 1395 struct iwl_mvm_d0i3_exit_work_iter_data iter_data = { 1396 .mvm = mvm, 1397 }; 1398 1399 struct iwl_wowlan_status *status; 1400 int ret; 1401 u32 wakeup_reasons = 0; 1402 __le16 *qos_seq = NULL; 1403 1404 mutex_lock(&mvm->mutex); 1405 ret = iwl_mvm_send_cmd(mvm, &get_status_cmd); 1406 if (ret) 1407 goto out; 1408 1409 if (!get_status_cmd.resp_pkt) 1410 goto out; 1411 1412 status = (void *)get_status_cmd.resp_pkt->data; 1413 wakeup_reasons = le32_to_cpu(status->wakeup_reasons); 1414 qos_seq = status->qos_seq_ctr; 1415 1416 IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); 1417 1418 iter_data.wakeup_reasons = wakeup_reasons; 1419 iter_data.status = status; 1420 ieee80211_iterate_active_interfaces(mvm->hw, 1421 IEEE80211_IFACE_ITER_NORMAL, 1422 iwl_mvm_d0i3_exit_work_iter, 1423 &iter_data); 1424 out: 1425 iwl_mvm_d0i3_enable_tx(mvm, qos_seq); 1426 1427 IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n", 1428 wakeup_reasons); 1429 1430 /* qos_seq might point inside resp_pkt, so free it only now */ 1431 if (get_status_cmd.resp_pkt) 1432 iwl_free_resp(&get_status_cmd); 1433 1434 /* the FW might have updated the regdomain */ 1435 iwl_mvm_update_changed_regdom(mvm); 1436 1437 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); 1438 mutex_unlock(&mvm->mutex); 1439 } 1440 1441 int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) 1442 { 1443 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | 1444 CMD_WAKE_UP_TRANS; 1445 int ret; 1446 1447 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); 1448 1449 if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) 1450 return -EINVAL; 1451 1452 mutex_lock(&mvm->d0i3_suspend_mutex); 1453 if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) { 1454 IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n"); 1455 __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags); 1456 mutex_unlock(&mvm->d0i3_suspend_mutex); 1457 return 0; 1458 } 1459 mutex_unlock(&mvm->d0i3_suspend_mutex); 1460 1461 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); 1462 if (ret) 1463 goto out; 1464 1465 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 1466 IEEE80211_IFACE_ITER_NORMAL, 1467 iwl_mvm_exit_d0i3_iterator, 1468 mvm); 1469 out: 1470 schedule_work(&mvm->d0i3_exit_work); 1471 return ret; 1472 } 1473 1474 int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) 1475 { 1476 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1477 1478 iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK); 1479 return _iwl_mvm_exit_d0i3(mvm); 1480 } 1481 1482 #define IWL_MVM_COMMON_OPS \ 1483 /* these could be differentiated */ \ 1484 .async_cb = iwl_mvm_async_cb, \ 1485 .queue_full = iwl_mvm_stop_sw_queue, \ 1486 .queue_not_full = iwl_mvm_wake_sw_queue, \ 1487 .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \ 1488 .free_skb = iwl_mvm_free_skb, \ 1489 .nic_error = iwl_mvm_nic_error, \ 1490 .cmd_queue_full = iwl_mvm_cmd_queue_full, \ 1491 .nic_config = iwl_mvm_nic_config, \ 1492 .enter_d0i3 = iwl_mvm_enter_d0i3, \ 1493 .exit_d0i3 = iwl_mvm_exit_d0i3, \ 1494 /* as we only register one, these MUST be common! */ \ 1495 .start = iwl_op_mode_mvm_start, \ 1496 .stop = iwl_op_mode_mvm_stop 1497 1498 static const struct iwl_op_mode_ops iwl_mvm_ops = { 1499 IWL_MVM_COMMON_OPS, 1500 .rx = iwl_mvm_rx, 1501 }; 1502 1503 static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, 1504 struct napi_struct *napi, 1505 struct iwl_rx_cmd_buffer *rxb, 1506 unsigned int queue) 1507 { 1508 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1509 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1510 1511 if (unlikely(pkt->hdr.cmd == FRAME_RELEASE)) 1512 iwl_mvm_rx_frame_release(mvm, rxb, queue); 1513 else 1514 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); 1515 } 1516 1517 static const struct iwl_op_mode_ops iwl_mvm_ops_mq = { 1518 IWL_MVM_COMMON_OPS, 1519 .rx = iwl_mvm_rx_mq, 1520 .rx_rss = iwl_mvm_rx_mq_rss, 1521 }; 1522