1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 #include <linux/rtnetlink.h> 6 #include <net/mac80211.h> 7 8 #include "fw/api/rx.h" 9 #include "fw/api/datapath.h" 10 #include "fw/api/commands.h" 11 #include "fw/api/offload.h" 12 #include "fw/api/coex.h" 13 #include "fw/dbg.h" 14 #include "fw/uefi.h" 15 16 #include "mld.h" 17 #include "mlo.h" 18 #include "mac80211.h" 19 #include "led.h" 20 #include "scan.h" 21 #include "tx.h" 22 #include "sta.h" 23 #include "regulatory.h" 24 #include "thermal.h" 25 #include "low_latency.h" 26 #include "hcmd.h" 27 #include "fw/api/location.h" 28 29 #include "iwl-nvm-parse.h" 30 31 #define DRV_DESCRIPTION "Intel(R) MLD wireless driver for Linux" 32 MODULE_DESCRIPTION(DRV_DESCRIPTION); 33 MODULE_LICENSE("GPL"); 34 MODULE_IMPORT_NS("IWLWIFI"); 35 36 static const struct iwl_op_mode_ops iwl_mld_ops; 37 38 static int __init iwl_mld_init(void) 39 { 40 int ret = iwl_opmode_register("iwlmld", &iwl_mld_ops); 41 42 if (ret) 43 pr_err("Unable to register MLD op_mode: %d\n", ret); 44 45 return ret; 46 } 47 module_init(iwl_mld_init); 48 49 static void __exit iwl_mld_exit(void) 50 { 51 iwl_opmode_deregister("iwlmld"); 52 } 53 module_exit(iwl_mld_exit); 54 55 static void iwl_mld_hw_set_regulatory(struct iwl_mld *mld) 56 { 57 struct wiphy *wiphy = mld->wiphy; 58 59 wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; 60 wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; 61 } 62 63 VISIBLE_IF_IWLWIFI_KUNIT 64 void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans, 65 const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw, 66 struct ieee80211_hw *hw, struct dentry *dbgfs_dir) 67 { 68 mld->dev = trans->dev; 69 mld->trans = trans; 70 mld->cfg = cfg; 71 mld->fw = fw; 72 mld->hw = hw; 73 mld->wiphy = hw->wiphy; 74 mld->debugfs_dir = dbgfs_dir; 75 76 iwl_notification_wait_init(&mld->notif_wait); 77 78 /* Setup async RX handling */ 79 spin_lock_init(&mld->async_handlers_lock); 80 wiphy_work_init(&mld->async_handlers_wk, 81 iwl_mld_async_handlers_wk); 82 83 /* Dynamic Queue Allocation */ 84 spin_lock_init(&mld->add_txqs_lock); 85 INIT_LIST_HEAD(&mld->txqs_to_add); 86 wiphy_work_init(&mld->add_txqs_wk, iwl_mld_add_txqs_wk); 87 88 /* Setup RX queues sync wait queue */ 89 init_waitqueue_head(&mld->rxq_sync.waitq); 90 } 91 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_construct_mld); 92 93 static void __acquires(&mld->wiphy->mtx) 94 iwl_mld_fwrt_dump_start(void *ctx) 95 { 96 struct iwl_mld *mld = ctx; 97 98 wiphy_lock(mld->wiphy); 99 } 100 101 static void __releases(&mld->wiphy->mtx) 102 iwl_mld_fwrt_dump_end(void *ctx) 103 { 104 struct iwl_mld *mld = ctx; 105 106 wiphy_unlock(mld->wiphy); 107 } 108 109 static bool iwl_mld_d3_debug_enable(void *ctx) 110 { 111 return IWL_MLD_D3_DEBUG; 112 } 113 114 static int iwl_mld_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) 115 { 116 struct iwl_mld *mld = (struct iwl_mld *)ctx; 117 int ret; 118 119 wiphy_lock(mld->wiphy); 120 ret = iwl_mld_send_cmd(mld, host_cmd); 121 wiphy_unlock(mld->wiphy); 122 123 return ret; 124 } 125 126 static const struct iwl_fw_runtime_ops iwl_mld_fwrt_ops = { 127 .dump_start = iwl_mld_fwrt_dump_start, 128 .dump_end = iwl_mld_fwrt_dump_end, 129 .send_hcmd = iwl_mld_fwrt_send_hcmd, 130 .d3_debug_enable = iwl_mld_d3_debug_enable, 131 }; 132 133 static void 134 iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans, 135 const struct iwl_fw *fw, 136 struct dentry *debugfs_dir) 137 { 138 iwl_fw_runtime_init(&mld->fwrt, trans, fw, &iwl_mld_fwrt_ops, mld, 139 NULL, NULL, debugfs_dir); 140 141 iwl_fw_set_current_image(&mld->fwrt, IWL_UCODE_REGULAR); 142 } 143 144 /* Please keep this array *SORTED* by hex value. 145 * Access is done through binary search 146 */ 147 static const struct iwl_hcmd_names iwl_mld_legacy_names[] = { 148 HCMD_NAME(UCODE_ALIVE_NTFY), 149 HCMD_NAME(INIT_COMPLETE_NOTIF), 150 HCMD_NAME(PHY_CONTEXT_CMD), 151 HCMD_NAME(SCAN_CFG_CMD), 152 HCMD_NAME(SCAN_REQ_UMAC), 153 HCMD_NAME(SCAN_ABORT_UMAC), 154 HCMD_NAME(SCAN_COMPLETE_UMAC), 155 HCMD_NAME(TX_CMD), 156 HCMD_NAME(TXPATH_FLUSH), 157 HCMD_NAME(LEDS_CMD), 158 HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION), 159 HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION), 160 HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), 161 HCMD_NAME(POWER_TABLE_CMD), 162 HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), 163 HCMD_NAME(BEACON_NOTIFICATION), 164 HCMD_NAME(BEACON_TEMPLATE_CMD), 165 HCMD_NAME(TX_ANT_CONFIGURATION_CMD), 166 HCMD_NAME(REDUCE_TX_POWER_CMD), 167 HCMD_NAME(MISSED_BEACONS_NOTIFICATION), 168 HCMD_NAME(MAC_PM_POWER_TABLE), 169 HCMD_NAME(MFUART_LOAD_NOTIFICATION), 170 HCMD_NAME(RSS_CONFIG_CMD), 171 HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), 172 HCMD_NAME(REPLY_RX_MPDU_CMD), 173 HCMD_NAME(BA_NOTIF), 174 HCMD_NAME(MCC_UPDATE_CMD), 175 HCMD_NAME(MCC_CHUB_UPDATE_CMD), 176 HCMD_NAME(MCAST_FILTER_CMD), 177 HCMD_NAME(REPLY_BEACON_FILTERING_CMD), 178 HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), 179 HCMD_NAME(MATCH_FOUND_NOTIFICATION), 180 HCMD_NAME(WOWLAN_PATTERNS), 181 HCMD_NAME(WOWLAN_CONFIGURATION), 182 HCMD_NAME(WOWLAN_TSC_RSC_PARAM), 183 HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL), 184 HCMD_NAME(DEBUG_HOST_COMMAND), 185 HCMD_NAME(LDBG_CONFIG_CMD), 186 }; 187 188 /* Please keep this array *SORTED* by hex value. 189 * Access is done through binary search 190 */ 191 static const struct iwl_hcmd_names iwl_mld_system_names[] = { 192 HCMD_NAME(SHARED_MEM_CFG_CMD), 193 HCMD_NAME(SOC_CONFIGURATION_CMD), 194 HCMD_NAME(INIT_EXTENDED_CFG_CMD), 195 HCMD_NAME(FW_ERROR_RECOVERY_CMD), 196 HCMD_NAME(RFI_CONFIG_CMD), 197 HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), 198 HCMD_NAME(SYSTEM_STATISTICS_CMD), 199 HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF), 200 }; 201 202 /* Please keep this array *SORTED* by hex value. 203 * Access is done through binary search 204 */ 205 static const struct iwl_hcmd_names iwl_mld_reg_and_nvm_names[] = { 206 HCMD_NAME(LARI_CONFIG_CHANGE), 207 HCMD_NAME(NVM_GET_INFO), 208 HCMD_NAME(TAS_CONFIG), 209 HCMD_NAME(SAR_OFFSET_MAPPING_TABLE_CMD), 210 HCMD_NAME(MCC_ALLOWED_AP_TYPE_CMD), 211 }; 212 213 /* Please keep this array *SORTED* by hex value. 214 * Access is done through binary search 215 */ 216 static const struct iwl_hcmd_names iwl_mld_debug_names[] = { 217 HCMD_NAME(HOST_EVENT_CFG), 218 HCMD_NAME(DBGC_SUSPEND_RESUME), 219 }; 220 221 /* Please keep this array *SORTED* by hex value. 222 * Access is done through binary search 223 */ 224 static const struct iwl_hcmd_names iwl_mld_mac_conf_names[] = { 225 HCMD_NAME(LOW_LATENCY_CMD), 226 HCMD_NAME(SESSION_PROTECTION_CMD), 227 HCMD_NAME(MAC_CONFIG_CMD), 228 HCMD_NAME(LINK_CONFIG_CMD), 229 HCMD_NAME(STA_CONFIG_CMD), 230 HCMD_NAME(AUX_STA_CMD), 231 HCMD_NAME(STA_REMOVE_CMD), 232 HCMD_NAME(ROC_CMD), 233 HCMD_NAME(MISSED_BEACONS_NOTIF), 234 HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF), 235 HCMD_NAME(ROC_NOTIF), 236 HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF), 237 HCMD_NAME(SESSION_PROTECTION_NOTIF), 238 HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF), 239 HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), 240 }; 241 242 /* Please keep this array *SORTED* by hex value. 243 * Access is done through binary search 244 */ 245 static const struct iwl_hcmd_names iwl_mld_data_path_names[] = { 246 HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), 247 HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD), 248 HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), 249 HCMD_NAME(RFH_QUEUE_CONFIG_CMD), 250 HCMD_NAME(TLC_MNG_CONFIG_CMD), 251 HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD), 252 HCMD_NAME(SCD_QUEUE_CONFIG_CMD), 253 HCMD_NAME(OMI_SEND_STATUS_NOTIF), 254 HCMD_NAME(ESR_MODE_NOTIF), 255 HCMD_NAME(MONITOR_NOTIF), 256 HCMD_NAME(TLC_MNG_UPDATE_NOTIF), 257 HCMD_NAME(MU_GROUP_MGMT_NOTIF), 258 }; 259 260 /* Please keep this array *SORTED* by hex value. 261 * Access is done through binary search 262 */ 263 static const struct iwl_hcmd_names iwl_mld_location_names[] = { 264 HCMD_NAME(TOF_RANGE_REQ_CMD), 265 HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), 266 }; 267 268 /* Please keep this array *SORTED* by hex value. 269 * Access is done through binary search 270 */ 271 static const struct iwl_hcmd_names iwl_mld_phy_names[] = { 272 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), 273 HCMD_NAME(CTDP_CONFIG_CMD), 274 HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), 275 HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD), 276 HCMD_NAME(CT_KILL_NOTIFICATION), 277 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), 278 }; 279 280 /* Please keep this array *SORTED* by hex value. 281 * Access is done through binary search 282 */ 283 static const struct iwl_hcmd_names iwl_mld_statistics_names[] = { 284 HCMD_NAME(STATISTICS_OPER_NOTIF), 285 HCMD_NAME(STATISTICS_OPER_PART1_NOTIF), 286 }; 287 288 /* Please keep this array *SORTED* by hex value. 289 * Access is done through binary search 290 */ 291 static const struct iwl_hcmd_names iwl_mld_prot_offload_names[] = { 292 HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION), 293 HCMD_NAME(WOWLAN_INFO_NOTIFICATION), 294 HCMD_NAME(D3_END_NOTIFICATION), 295 }; 296 297 /* Please keep this array *SORTED* by hex value. 298 * Access is done through binary search 299 */ 300 static const struct iwl_hcmd_names iwl_mld_coex_names[] = { 301 HCMD_NAME(PROFILE_NOTIF), 302 }; 303 304 VISIBLE_IF_IWLWIFI_KUNIT 305 const struct iwl_hcmd_arr iwl_mld_groups[] = { 306 [LEGACY_GROUP] = HCMD_ARR(iwl_mld_legacy_names), 307 [LONG_GROUP] = HCMD_ARR(iwl_mld_legacy_names), 308 [SYSTEM_GROUP] = HCMD_ARR(iwl_mld_system_names), 309 [MAC_CONF_GROUP] = HCMD_ARR(iwl_mld_mac_conf_names), 310 [DATA_PATH_GROUP] = HCMD_ARR(iwl_mld_data_path_names), 311 [LOCATION_GROUP] = HCMD_ARR(iwl_mld_location_names), 312 [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mld_reg_and_nvm_names), 313 [DEBUG_GROUP] = HCMD_ARR(iwl_mld_debug_names), 314 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mld_phy_names), 315 [STATISTICS_GROUP] = HCMD_ARR(iwl_mld_statistics_names), 316 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mld_prot_offload_names), 317 [BT_COEX_GROUP] = HCMD_ARR(iwl_mld_coex_names), 318 }; 319 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_groups); 320 321 #if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) 322 const unsigned int global_iwl_mld_goups_size = ARRAY_SIZE(iwl_mld_groups); 323 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size); 324 #endif 325 326 static void 327 iwl_mld_configure_trans(struct iwl_op_mode *op_mode) 328 { 329 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 330 static const u8 no_reclaim_cmds[] = {TX_CMD}; 331 struct iwl_trans *trans = mld->trans; 332 u32 eckv_value; 333 334 iwl_bios_setup_step(trans, &mld->fwrt); 335 iwl_uefi_get_step_table(trans); 336 337 if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value)) 338 IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n"); 339 else 340 trans->conf.ext_32khz_clock_valid = !!eckv_value; 341 342 trans->conf.rx_buf_size = iwl_amsdu_size_to_rxb_size(); 343 trans->conf.command_groups = iwl_mld_groups; 344 trans->conf.command_groups_size = ARRAY_SIZE(iwl_mld_groups); 345 trans->conf.fw_reset_handshake = true; 346 trans->conf.queue_alloc_cmd_ver = 347 iwl_fw_lookup_cmd_ver(mld->fw, WIDE_ID(DATA_PATH_GROUP, 348 SCD_QUEUE_CONFIG_CMD), 349 0); 350 trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info, 351 driver_data[2]); 352 BUILD_BUG_ON(sizeof(no_reclaim_cmds) > 353 sizeof(trans->conf.no_reclaim_cmds)); 354 memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds, 355 sizeof(no_reclaim_cmds)); 356 trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); 357 358 trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD; 359 trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); 360 trans->conf.wide_cmd_header = true; 361 362 iwl_trans_op_mode_enter(trans, op_mode); 363 } 364 365 /* 366 ***************************************************** 367 * op mode ops functions 368 ***************************************************** 369 */ 370 371 #define NUM_FW_LOAD_RETRIES 3 372 static struct iwl_op_mode * 373 iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, 374 const struct iwl_fw *fw, struct dentry *dbgfs_dir) 375 { 376 struct ieee80211_hw *hw; 377 struct iwl_op_mode *op_mode; 378 struct iwl_mld *mld; 379 int ret; 380 381 /* Allocate and initialize a new hardware device */ 382 hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + 383 sizeof(struct iwl_mld), 384 &iwl_mld_hw_ops); 385 if (!hw) 386 return ERR_PTR(-ENOMEM); 387 388 op_mode = hw->priv; 389 390 op_mode->ops = &iwl_mld_ops; 391 392 mld = IWL_OP_MODE_GET_MLD(op_mode); 393 394 iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir); 395 396 /* we'll verify later it matches between commands */ 397 mld->fw_rates_ver_3 = iwl_fw_lookup_cmd_ver(mld->fw, TX_CMD, 0) >= 11; 398 399 iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir); 400 401 iwl_mld_get_bios_tables(mld); 402 iwl_uefi_get_sgom_table(trans, &mld->fwrt); 403 mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt); 404 405 iwl_mld_hw_set_regulatory(mld); 406 407 /* Configure transport layer with the opmode specific params */ 408 iwl_mld_configure_trans(op_mode); 409 410 /* needed for regulatory init */ 411 rtnl_lock(); 412 /* Needed for sending commands */ 413 wiphy_lock(mld->wiphy); 414 415 for (int i = 0; i < NUM_FW_LOAD_RETRIES; i++) { 416 ret = iwl_mld_load_fw(mld); 417 if (!ret) 418 break; 419 } 420 421 if (!ret) { 422 mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0); 423 if (IS_ERR(mld->nvm_data)) { 424 IWL_ERR(mld, "Failed to read NVM: %d\n", ret); 425 ret = PTR_ERR(mld->nvm_data); 426 } 427 } 428 429 if (ret) { 430 wiphy_unlock(mld->wiphy); 431 rtnl_unlock(); 432 goto err; 433 } 434 435 /* We are about to stop the FW. Notifications may require an 436 * operational FW, so handle them all here before we stop. 437 */ 438 wiphy_work_flush(mld->wiphy, &mld->async_handlers_wk); 439 440 iwl_mld_stop_fw(mld); 441 442 wiphy_unlock(mld->wiphy); 443 rtnl_unlock(); 444 445 ret = iwl_mld_leds_init(mld); 446 if (ret) 447 goto free_nvm; 448 449 ret = iwl_mld_alloc_scan_cmd(mld); 450 if (ret) 451 goto leds_exit; 452 453 ret = iwl_mld_low_latency_init(mld); 454 if (ret) 455 goto free_scan_cmd; 456 457 ret = iwl_mld_register_hw(mld); 458 if (ret) 459 goto low_latency_free; 460 461 iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant); 462 463 iwl_mld_add_debugfs_files(mld, dbgfs_dir); 464 iwl_mld_thermal_initialize(mld); 465 466 iwl_mld_ptp_init(mld); 467 468 return op_mode; 469 470 low_latency_free: 471 iwl_mld_low_latency_free(mld); 472 free_scan_cmd: 473 kfree(mld->scan.cmd); 474 leds_exit: 475 iwl_mld_leds_exit(mld); 476 free_nvm: 477 kfree(mld->nvm_data); 478 err: 479 iwl_trans_op_mode_leave(mld->trans); 480 ieee80211_free_hw(mld->hw); 481 return ERR_PTR(ret); 482 } 483 484 static void 485 iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode) 486 { 487 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 488 489 iwl_mld_ptp_remove(mld); 490 iwl_mld_leds_exit(mld); 491 492 iwl_mld_thermal_exit(mld); 493 494 wiphy_lock(mld->wiphy); 495 iwl_mld_low_latency_stop(mld); 496 iwl_mld_deinit_time_sync(mld); 497 wiphy_unlock(mld->wiphy); 498 499 ieee80211_unregister_hw(mld->hw); 500 501 iwl_fw_runtime_free(&mld->fwrt); 502 iwl_mld_low_latency_free(mld); 503 504 iwl_trans_op_mode_leave(mld->trans); 505 506 kfree(mld->nvm_data); 507 kfree(mld->scan.cmd); 508 kfree(mld->error_recovery_buf); 509 kfree(mld->mcast_filter_cmd); 510 511 ieee80211_free_hw(mld->hw); 512 } 513 514 static void iwl_mld_queue_state_change(struct iwl_op_mode *op_mode, 515 int hw_queue, bool queue_full) 516 { 517 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 518 struct ieee80211_txq *txq; 519 struct iwl_mld_sta *mld_sta; 520 struct iwl_mld_txq *mld_txq; 521 522 rcu_read_lock(); 523 524 txq = rcu_dereference(mld->fw_id_to_txq[hw_queue]); 525 if (!txq) { 526 rcu_read_unlock(); 527 528 if (queue_full) { 529 /* An internal queue is not expected to become full */ 530 IWL_WARN(mld, 531 "Internal hw_queue %d is full! stopping all queues\n", 532 hw_queue); 533 /* Stop all queues, as an internal queue is not 534 * mapped to a mac80211 one 535 */ 536 ieee80211_stop_queues(mld->hw); 537 } else { 538 ieee80211_wake_queues(mld->hw); 539 } 540 541 return; 542 } 543 544 mld_txq = iwl_mld_txq_from_mac80211(txq); 545 mld_sta = txq->sta ? iwl_mld_sta_from_mac80211(txq->sta) : NULL; 546 547 mld_txq->status.stop_full = queue_full; 548 549 if (!queue_full && mld_sta && 550 mld_sta->sta_state != IEEE80211_STA_NOTEXIST) { 551 local_bh_disable(); 552 iwl_mld_tx_from_txq(mld, txq); 553 local_bh_enable(); 554 } 555 556 rcu_read_unlock(); 557 } 558 559 static void 560 iwl_mld_queue_full(struct iwl_op_mode *op_mode, int hw_queue) 561 { 562 iwl_mld_queue_state_change(op_mode, hw_queue, true); 563 } 564 565 static void 566 iwl_mld_queue_not_full(struct iwl_op_mode *op_mode, int hw_queue) 567 { 568 iwl_mld_queue_state_change(op_mode, hw_queue, false); 569 } 570 571 static bool 572 iwl_mld_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 573 { 574 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 575 576 iwl_mld_set_hwkill(mld, state); 577 578 return false; 579 } 580 581 static void 582 iwl_mld_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) 583 { 584 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 585 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 586 587 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]); 588 ieee80211_free_txskb(mld->hw, skb); 589 } 590 591 static void iwl_mld_read_error_recovery_buffer(struct iwl_mld *mld) 592 { 593 u32 src_size = mld->fw->ucode_capa.error_log_size; 594 u32 src_addr = mld->fw->ucode_capa.error_log_addr; 595 u8 *recovery_buf; 596 int ret; 597 598 /* no recovery buffer size defined in a TLV */ 599 if (!src_size) 600 return; 601 602 recovery_buf = kzalloc(src_size, GFP_ATOMIC); 603 if (!recovery_buf) 604 return; 605 606 ret = iwl_trans_read_mem_bytes(mld->trans, src_addr, 607 recovery_buf, src_size); 608 if (ret) { 609 IWL_ERR(mld, "Failed to read error recovery buffer (%d)\n", 610 ret); 611 kfree(recovery_buf); 612 return; 613 } 614 615 mld->error_recovery_buf = recovery_buf; 616 } 617 618 static void iwl_mld_restart_nic(struct iwl_mld *mld) 619 { 620 iwl_mld_read_error_recovery_buffer(mld); 621 622 mld->fwrt.trans->dbg.restart_required = false; 623 624 ieee80211_restart_hw(mld->hw); 625 } 626 627 static void 628 iwl_mld_nic_error(struct iwl_op_mode *op_mode, 629 enum iwl_fw_error_type type) 630 { 631 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 632 bool trans_dead = test_bit(STATUS_TRANS_DEAD, &mld->trans->status); 633 634 if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL) 635 IWL_ERR(mld, "Command queue full!\n"); 636 else if (!trans_dead && !mld->fw_status.do_not_dump_once) 637 iwl_fwrt_dump_error_logs(&mld->fwrt); 638 639 mld->fw_status.do_not_dump_once = false; 640 641 /* It is necessary to abort any os scan here because mac80211 requires 642 * having the scan cleared before restarting. 643 * We'll reset the scan_status to NONE in restart cleanup in 644 * the next drv_start() call from mac80211. If ieee80211_hw_restart 645 * isn't called scan status will stay busy. 646 */ 647 iwl_mld_report_scan_aborted(mld); 648 649 /* 650 * This should be first thing before trying to collect any 651 * data to avoid endless loops if any HW error happens while 652 * collecting debug data. 653 * It might not actually be true that we'll restart, but the 654 * setting doesn't matter if we're going to be unbound either. 655 */ 656 if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT && 657 mld->fw_status.running) 658 mld->fw_status.in_hw_restart = true; 659 } 660 661 static void iwl_mld_dump_error(struct iwl_op_mode *op_mode, 662 struct iwl_fw_error_dump_mode *mode) 663 { 664 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 665 666 /* if we come in from opmode we have the mutex held */ 667 if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) { 668 lockdep_assert_wiphy(mld->wiphy); 669 iwl_fw_error_collect(&mld->fwrt); 670 } else { 671 wiphy_lock(mld->wiphy); 672 if (mode->context != IWL_ERR_CONTEXT_ABORT) 673 iwl_fw_error_collect(&mld->fwrt); 674 wiphy_unlock(mld->wiphy); 675 } 676 } 677 678 static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode, 679 enum iwl_fw_error_type type) 680 { 681 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 682 683 /* SW reset can happen for TOP error w/o NIC error, so 684 * also abort scan here and set in_hw_restart, when we 685 * had a NIC error both were already done. 686 */ 687 iwl_mld_report_scan_aborted(mld); 688 mld->fw_status.in_hw_restart = true; 689 690 /* Do restart only in the following conditions are met: 691 * - we consider the FW as running 692 * - The trigger that brought us here is defined as one that requires 693 * a restart (in the debug TLVs) 694 */ 695 if (!mld->fw_status.running || !mld->fwrt.trans->dbg.restart_required) 696 return false; 697 698 iwl_mld_restart_nic(mld); 699 return true; 700 } 701 702 static void 703 iwl_mld_time_point(struct iwl_op_mode *op_mode, 704 enum iwl_fw_ini_time_point tp_id, 705 union iwl_dbg_tlv_tp_data *tp_data) 706 { 707 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 708 709 iwl_dbg_tlv_time_point(&mld->fwrt, tp_id, tp_data); 710 } 711 712 #ifdef CONFIG_PM_SLEEP 713 static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode) 714 { 715 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 716 717 wiphy_lock(mld->wiphy); 718 iwl_mld_stop_fw(mld); 719 mld->fw_status.in_d3 = false; 720 wiphy_unlock(mld->wiphy); 721 } 722 #else 723 static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode) 724 {} 725 #endif 726 727 static const struct iwl_op_mode_ops iwl_mld_ops = { 728 .start = iwl_op_mode_mld_start, 729 .stop = iwl_op_mode_mld_stop, 730 .rx = iwl_mld_rx, 731 .rx_rss = iwl_mld_rx_rss, 732 .queue_full = iwl_mld_queue_full, 733 .queue_not_full = iwl_mld_queue_not_full, 734 .hw_rf_kill = iwl_mld_set_hw_rfkill_state, 735 .free_skb = iwl_mld_free_skb, 736 .nic_error = iwl_mld_nic_error, 737 .dump_error = iwl_mld_dump_error, 738 .sw_reset = iwl_mld_sw_reset, 739 .time_point = iwl_mld_time_point, 740 .device_powered_off = pm_sleep_ptr(iwl_mld_device_powered_off), 741 }; 742 743 struct iwl_mld_mod_params iwlmld_mod_params = { 744 .power_scheme = IWL_POWER_SCHEME_BPS, 745 }; 746 747 module_param_named(power_scheme, iwlmld_mod_params.power_scheme, int, 0444); 748 MODULE_PARM_DESC(power_scheme, 749 "power management scheme: 1-active, 2-balanced, default: 2"); 750