1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 #include "mlo.h" 6 #include "phy.h" 7 8 /* Block reasons helper */ 9 #define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \ 10 HOW(PREVENTION) \ 11 HOW(WOWLAN) \ 12 HOW(ROC) \ 13 HOW(NON_BSS) \ 14 HOW(TMP_NON_BSS) \ 15 HOW(TPT) 16 17 static const char * 18 iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked) 19 { 20 /* Using switch without "default" will warn about missing entries */ 21 switch (blocked) { 22 #define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x; 23 HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE) 24 #undef REASON_CASE 25 } 26 27 return "ERROR"; 28 } 29 30 static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask) 31 { 32 #define NAME_FMT(x) "%s" 33 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "", 34 IWL_DEBUG_INFO(mld, 35 "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT) 36 " (0x%x)\n", 37 HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR) 38 mask); 39 #undef NAME_FMT 40 #undef NAME_PR 41 } 42 43 /* Exit reasons helper */ 44 #define HANDLE_EMLSR_EXIT_REASONS(HOW) \ 45 HOW(BLOCK) \ 46 HOW(MISSED_BEACON) \ 47 HOW(FAIL_ENTRY) \ 48 HOW(CSA) \ 49 HOW(EQUAL_BAND) \ 50 HOW(LOW_RSSI) \ 51 HOW(LINK_USAGE) \ 52 HOW(BT_COEX) \ 53 HOW(CHAN_LOAD) \ 54 HOW(RFI) \ 55 HOW(FW_REQUEST) \ 56 HOW(INVALID) 57 58 static const char * 59 iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit) 60 { 61 /* Using switch without "default" will warn about missing entries */ 62 switch (exit) { 63 #define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x; 64 HANDLE_EMLSR_EXIT_REASONS(REASON_CASE) 65 #undef REASON_CASE 66 } 67 68 return "ERROR"; 69 } 70 71 static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask) 72 { 73 #define NAME_FMT(x) "%s" 74 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "", 75 IWL_DEBUG_INFO(mld, 76 "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT) 77 " (0x%x)\n", 78 HANDLE_EMLSR_EXIT_REASONS(NAME_PR) 79 mask); 80 #undef NAME_FMT 81 #undef NAME_PR 82 } 83 84 void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk) 85 { 86 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 87 emlsr.prevent_done_wk.work); 88 struct ieee80211_vif *vif = 89 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 90 91 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons & 92 IWL_MLD_EMLSR_BLOCKED_PREVENTION))) 93 return; 94 95 iwl_mld_unblock_emlsr(mld_vif->mld, vif, 96 IWL_MLD_EMLSR_BLOCKED_PREVENTION); 97 } 98 99 void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy, 100 struct wiphy_work *wk) 101 { 102 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 103 emlsr.tmp_non_bss_done_wk.work); 104 struct ieee80211_vif *vif = 105 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 106 107 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons & 108 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS))) 109 return; 110 111 iwl_mld_unblock_emlsr(mld_vif->mld, vif, 112 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS); 113 } 114 115 #define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC) 116 #define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC) 117 118 /* Exit reasons that can cause longer EMLSR prevention */ 119 #define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \ 120 IWL_MLD_EMLSR_EXIT_LINK_USAGE | \ 121 IWL_MLD_EMLSR_EXIT_FW_REQUEST) 122 #define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400) 123 124 #define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300) 125 #define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600) 126 127 static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld, 128 struct iwl_mld_vif *mld_vif, 129 enum iwl_mld_emlsr_exit reason) 130 { 131 unsigned long delay; 132 133 /* 134 * Reset the counter if more than 400 seconds have passed between one 135 * exit and the other, or if we exited due to a different reason. 136 * Will also reset the counter after the long prevention is done. 137 */ 138 if (time_after(jiffies, mld_vif->emlsr.last_exit_ts + 139 IWL_MLD_PREVENT_EMLSR_TIMEOUT) || 140 mld_vif->emlsr.last_exit_reason != reason) 141 mld_vif->emlsr.exit_repeat_count = 0; 142 143 mld_vif->emlsr.last_exit_reason = reason; 144 mld_vif->emlsr.last_exit_ts = jiffies; 145 mld_vif->emlsr.exit_repeat_count++; 146 147 /* 148 * Do not add a prevention when the reason was a block. For a block, 149 * EMLSR will be enabled again on unblock. 150 */ 151 if (reason == IWL_MLD_EMLSR_EXIT_BLOCK) 152 return; 153 154 /* Set prevention for a minimum of 30 seconds */ 155 mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION; 156 delay = IWL_MLD_TRIGGER_LINK_SEL_TIME; 157 158 /* Handle repeats for reasons that can cause long prevention */ 159 if (mld_vif->emlsr.exit_repeat_count > 1 && 160 reason & IWL_MLD_PREVENT_EMLSR_REASONS) { 161 if (mld_vif->emlsr.exit_repeat_count == 2) 162 delay = IWL_MLD_EMLSR_PREVENT_SHORT; 163 else 164 delay = IWL_MLD_EMLSR_PREVENT_LONG; 165 166 /* 167 * The timeouts are chosen so that this will not happen, i.e. 168 * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT 169 */ 170 WARN_ON(mld_vif->emlsr.exit_repeat_count > 3); 171 } 172 173 IWL_DEBUG_INFO(mld, 174 "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n", 175 delay / HZ, mld_vif->emlsr.exit_repeat_count, 176 iwl_mld_get_emlsr_exit_string(reason), reason); 177 178 wiphy_delayed_work_queue(mld->wiphy, 179 &mld_vif->emlsr.prevent_done_wk, delay); 180 } 181 182 static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw, 183 struct ieee80211_chanctx_conf *ctx, 184 void *dat) 185 { 186 struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); 187 188 /* It is ok to do it for all chanctx (and not only for the ones that 189 * belong to the EMLSR vif) since EMLSR is not allowed if there is 190 * another vif. 191 */ 192 phy->avg_channel_load_not_by_us = 0; 193 } 194 195 static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 196 enum iwl_mld_emlsr_exit exit, u8 link_to_keep, 197 bool sync) 198 { 199 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 200 u16 new_active_links; 201 int ret = 0; 202 203 lockdep_assert_wiphy(mld->wiphy); 204 205 /* On entry failure need to exit anyway, even if entered from debugfs */ 206 if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE) 207 return 0; 208 209 /* Ignore exit request if EMLSR is not active */ 210 if (!iwl_mld_emlsr_active(vif)) 211 return 0; 212 213 if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized)) 214 return 0; 215 216 if (WARN_ON(!(vif->active_links & BIT(link_to_keep)))) 217 link_to_keep = __ffs(vif->active_links); 218 219 new_active_links = BIT(link_to_keep); 220 IWL_DEBUG_INFO(mld, 221 "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n", 222 iwl_mld_get_emlsr_exit_string(exit), exit, 223 vif->active_links, new_active_links); 224 225 if (sync) 226 ret = ieee80211_set_active_links(vif, new_active_links); 227 else 228 ieee80211_set_active_links_async(vif, new_active_links); 229 230 /* Update latest exit reason and check EMLSR prevention */ 231 iwl_mld_check_emlsr_prevention(mld, mld_vif, exit); 232 233 /* channel_load_not_by_us is invalid when in EMLSR. 234 * Clear it so wrong values won't be used. 235 */ 236 ieee80211_iter_chan_contexts_atomic(mld->hw, 237 iwl_mld_clear_avg_chan_load_iter, 238 NULL); 239 240 return ret; 241 } 242 243 void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 244 enum iwl_mld_emlsr_exit exit, u8 link_to_keep) 245 { 246 _iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false); 247 } 248 249 static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif, 250 enum iwl_mld_emlsr_blocked reason, 251 u8 link_to_keep, bool sync) 252 { 253 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 254 255 lockdep_assert_wiphy(mld->wiphy); 256 257 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif)) 258 return 0; 259 260 if (mld_vif->emlsr.blocked_reasons & reason) 261 return 0; 262 263 mld_vif->emlsr.blocked_reasons |= reason; 264 265 IWL_DEBUG_INFO(mld, 266 "Blocking EMLSR mode. reason = %s (0x%x)\n", 267 iwl_mld_get_emlsr_blocked_string(reason), reason); 268 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); 269 270 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) 271 wiphy_delayed_work_cancel(mld_vif->mld->wiphy, 272 &mld_vif->emlsr.check_tpt_wk); 273 274 return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK, 275 link_to_keep, sync); 276 } 277 278 void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 279 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep) 280 { 281 _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false); 282 } 283 284 int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif, 285 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep) 286 { 287 return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true); 288 } 289 290 static void _iwl_mld_select_links(struct iwl_mld *mld, 291 struct ieee80211_vif *vif); 292 293 void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 294 enum iwl_mld_emlsr_blocked reason) 295 { 296 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 297 298 lockdep_assert_wiphy(mld->wiphy); 299 300 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif)) 301 return; 302 303 if (!(mld_vif->emlsr.blocked_reasons & reason)) 304 return; 305 306 mld_vif->emlsr.blocked_reasons &= ~reason; 307 308 IWL_DEBUG_INFO(mld, 309 "Unblocking EMLSR mode. reason = %s (0x%x)\n", 310 iwl_mld_get_emlsr_blocked_string(reason), reason); 311 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); 312 313 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) 314 wiphy_delayed_work_queue(mld_vif->mld->wiphy, 315 &mld_vif->emlsr.check_tpt_wk, 316 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW)); 317 318 if (mld_vif->emlsr.blocked_reasons) 319 return; 320 321 IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n"); 322 iwl_mld_int_mlo_scan(mld, vif); 323 } 324 325 static void 326 iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac, 327 struct ieee80211_vif *vif) 328 { 329 const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 330 enum iwl_mvm_fw_esr_recommendation action; 331 const struct iwl_esr_mode_notif *notif = NULL; 332 333 if (iwl_fw_lookup_notif_ver(mld_vif->mld->fw, DATA_PATH_GROUP, 334 ESR_MODE_NOTIF, 0) > 1) { 335 notif = (void *)data; 336 action = le32_to_cpu(notif->action); 337 } else { 338 const struct iwl_esr_mode_notif_v1 *notif_v1 = (void *)data; 339 340 action = le32_to_cpu(notif_v1->action); 341 } 342 343 if (!iwl_mld_vif_has_emlsr_cap(vif)) 344 return; 345 346 switch (action) { 347 case ESR_RECOMMEND_LEAVE: 348 if (notif) 349 IWL_DEBUG_INFO(mld_vif->mld, 350 "FW recommend leave reason = 0x%x\n", 351 le32_to_cpu(notif->leave_reason_mask)); 352 353 iwl_mld_exit_emlsr(mld_vif->mld, vif, 354 IWL_MLD_EMLSR_EXIT_FW_REQUEST, 355 iwl_mld_get_primary_link(vif)); 356 break; 357 case ESR_FORCE_LEAVE: 358 if (notif) 359 IWL_DEBUG_INFO(mld_vif->mld, 360 "FW force leave reason = 0x%x\n", 361 le32_to_cpu(notif->leave_reason_mask)); 362 fallthrough; 363 case ESR_RECOMMEND_ENTER: 364 default: 365 IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n", 366 action); 367 } 368 } 369 370 void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld, 371 struct iwl_rx_packet *pkt) 372 { 373 ieee80211_iterate_active_interfaces_mtx(mld->hw, 374 IEEE80211_IFACE_ITER_NORMAL, 375 iwl_mld_vif_iter_emlsr_mode_notif, 376 pkt->data); 377 } 378 379 static void 380 iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac, 381 struct ieee80211_vif *vif) 382 { 383 if (!iwl_mld_vif_has_emlsr_cap(vif)) 384 return; 385 386 ieee80211_connection_loss(vif); 387 } 388 389 void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld, 390 struct iwl_rx_packet *pkt) 391 { 392 const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data; 393 u32 fw_link_id = le32_to_cpu(notif->link_id); 394 struct ieee80211_bss_conf *bss_conf = 395 iwl_mld_fw_id_to_link_conf(mld, fw_link_id); 396 397 IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n", 398 le32_to_cpu(notif->activation) ? "enter" : "exit", 399 bss_conf ? bss_conf->link_id : -1, 400 le32_to_cpu(notif->link_id), 401 le32_to_cpu(notif->err_code)); 402 403 if (IWL_FW_CHECK(mld, !bss_conf, 404 "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n", 405 le32_to_cpu(notif->activation) ? "" : "de", 406 fw_link_id)) { 407 ieee80211_iterate_active_interfaces_mtx( 408 mld->hw, IEEE80211_IFACE_ITER_NORMAL, 409 iwl_mld_vif_iter_disconnect_emlsr, NULL); 410 return; 411 } 412 413 /* Disconnect if we failed to deactivate a link */ 414 if (!le32_to_cpu(notif->activation)) { 415 ieee80211_connection_loss(bss_conf->vif); 416 return; 417 } 418 419 /* 420 * We failed to activate the second link, go back to the link specified 421 * by the firmware as that is the one that is still valid now. 422 */ 423 iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY, 424 bss_conf->link_id); 425 } 426 427 /* Active non-station link tracking */ 428 static void iwl_mld_count_non_bss_links(void *_data, u8 *mac, 429 struct ieee80211_vif *vif) 430 { 431 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 432 int *count = _data; 433 434 if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) 435 return; 436 437 *count += iwl_mld_count_active_links(mld_vif->mld, vif); 438 } 439 440 struct iwl_mld_update_emlsr_block_data { 441 bool block; 442 int result; 443 }; 444 445 static void 446 iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac, 447 struct ieee80211_vif *vif) 448 { 449 struct iwl_mld_update_emlsr_block_data *data = _data; 450 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 451 int ret; 452 453 if (data->block) { 454 ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif, 455 IWL_MLD_EMLSR_BLOCKED_NON_BSS, 456 iwl_mld_get_primary_link(vif)); 457 if (ret) 458 data->result = ret; 459 } else { 460 iwl_mld_unblock_emlsr(mld_vif->mld, vif, 461 IWL_MLD_EMLSR_BLOCKED_NON_BSS); 462 } 463 } 464 465 int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld, 466 int pending_link_changes) 467 { 468 /* An active link of a non-station vif blocks EMLSR. Upon activation 469 * block EMLSR on the bss vif. Upon deactivation, check if this link 470 * was the last non-station link active, and if so unblock the bss vif 471 */ 472 struct iwl_mld_update_emlsr_block_data block_data = {}; 473 int count = pending_link_changes; 474 475 /* No need to count if we are activating a non-BSS link */ 476 if (count <= 0) 477 ieee80211_iterate_active_interfaces_mtx(mld->hw, 478 IEEE80211_IFACE_ITER_NORMAL, 479 iwl_mld_count_non_bss_links, 480 &count); 481 482 /* 483 * We could skip updating it if the block change did not change (and 484 * pending_link_changes is non-zero). 485 */ 486 block_data.block = !!count; 487 488 ieee80211_iterate_active_interfaces_mtx(mld->hw, 489 IEEE80211_IFACE_ITER_NORMAL, 490 iwl_mld_vif_iter_update_emlsr_non_bss_block, 491 &block_data); 492 493 return block_data.result; 494 } 495 496 #define EMLSR_SEC_LINK_MIN_PERC 10 497 #define EMLSR_MIN_TX 3000 498 #define EMLSR_MIN_RX 400 499 500 void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk) 501 { 502 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 503 emlsr.check_tpt_wk.work); 504 struct ieee80211_vif *vif = 505 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 506 struct iwl_mld *mld = mld_vif->mld; 507 struct iwl_mld_sta *mld_sta; 508 struct iwl_mld_link *sec_link; 509 unsigned long total_tx = 0, total_rx = 0; 510 unsigned long sec_link_tx = 0, sec_link_rx = 0; 511 u8 sec_link_tx_perc, sec_link_rx_perc; 512 s8 sec_link_id; 513 514 if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta) 515 return; 516 517 mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta); 518 519 /* We only count for the AP sta in a MLO connection */ 520 if (!mld_sta->mpdu_counters) 521 return; 522 523 /* This wk should only run when the TPT blocker isn't set. 524 * When the blocker is set, the decision to remove it, as well as 525 * clearing the counters is done in DP (to avoid having a wk every 526 * 5 seconds when idle. When the blocker is unset, we are not idle anyway) 527 */ 528 if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT)) 529 return; 530 /* 531 * TPT is unblocked, need to check if the TPT criteria is still met. 532 * 533 * If EMLSR is active, then we also need to check the secondar link 534 * requirements. 535 */ 536 if (iwl_mld_emlsr_active(vif)) { 537 sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif)); 538 sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id); 539 if (WARN_ON_ONCE(!sec_link)) 540 return; 541 /* We need the FW ID here */ 542 sec_link_id = sec_link->fw_id; 543 } else { 544 sec_link_id = -1; 545 } 546 547 /* Sum up RX and TX MPDUs from the different queues/links */ 548 for (int q = 0; q < mld->trans->info.num_rxqs; q++) { 549 struct iwl_mld_per_q_mpdu_counter *queue_counter = 550 &mld_sta->mpdu_counters[q]; 551 552 spin_lock_bh(&queue_counter->lock); 553 554 /* The link IDs that doesn't exist will contain 0 */ 555 for (int link = 0; 556 link < ARRAY_SIZE(queue_counter->per_link); 557 link++) { 558 total_tx += queue_counter->per_link[link].tx; 559 total_rx += queue_counter->per_link[link].rx; 560 } 561 562 if (sec_link_id != -1) { 563 sec_link_tx += queue_counter->per_link[sec_link_id].tx; 564 sec_link_rx += queue_counter->per_link[sec_link_id].rx; 565 } 566 567 memset(queue_counter->per_link, 0, 568 sizeof(queue_counter->per_link)); 569 570 spin_unlock_bh(&queue_counter->lock); 571 } 572 573 IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", 574 total_tx, total_rx); 575 576 /* If we don't have enough MPDUs - exit EMLSR */ 577 if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH && 578 total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) { 579 iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT, 580 iwl_mld_get_primary_link(vif)); 581 return; 582 } 583 584 /* EMLSR is not active */ 585 if (sec_link_id == -1) 586 return; 587 588 IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n", 589 sec_link_id, sec_link_tx, sec_link_rx); 590 591 /* Calculate the percentage of the secondary link TX/RX */ 592 sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0; 593 sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0; 594 595 /* 596 * The TX/RX percentage is checked only if it exceeds the required 597 * minimum. In addition, RX is checked only if the TX check failed. 598 */ 599 if ((total_tx > EMLSR_MIN_TX && 600 sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) || 601 (total_rx > EMLSR_MIN_RX && 602 sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) { 603 iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE, 604 iwl_mld_get_primary_link(vif)); 605 return; 606 } 607 608 /* Check again when the next window ends */ 609 wiphy_delayed_work_queue(mld_vif->mld->wiphy, 610 &mld_vif->emlsr.check_tpt_wk, 611 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW)); 612 } 613 614 void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk) 615 { 616 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 617 emlsr.unblock_tpt_wk); 618 struct ieee80211_vif *vif = 619 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 620 621 iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT); 622 } 623 624 /* 625 * Link selection 626 */ 627 628 s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld, 629 const struct cfg80211_chan_def *chandef, 630 bool low) 631 { 632 if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ && 633 chandef->chan->band != NL80211_BAND_5GHZ && 634 chandef->chan->band != NL80211_BAND_6GHZ)) 635 return S8_MAX; 636 637 #define RSSI_THRESHOLD(_low, _bw) \ 638 (_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \ 639 : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ 640 641 switch (chandef->width) { 642 case NL80211_CHAN_WIDTH_20_NOHT: 643 case NL80211_CHAN_WIDTH_20: 644 /* 320 MHz has the same thresholds as 20 MHz */ 645 case NL80211_CHAN_WIDTH_320: 646 return RSSI_THRESHOLD(low, 20); 647 case NL80211_CHAN_WIDTH_40: 648 return RSSI_THRESHOLD(low, 40); 649 case NL80211_CHAN_WIDTH_80: 650 return RSSI_THRESHOLD(low, 80); 651 case NL80211_CHAN_WIDTH_160: 652 return RSSI_THRESHOLD(low, 160); 653 default: 654 WARN_ON(1); 655 return S8_MAX; 656 } 657 #undef RSSI_THRESHOLD 658 } 659 660 #define IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH -69 661 #define IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH -63 662 #define IWL_MLD_BT_COEX_WIFI_LOSS_THRESH 7 663 664 VISIBLE_IF_IWLWIFI_KUNIT 665 bool 666 iwl_mld_bt_allows_emlsr(struct iwl_mld *mld, struct ieee80211_bss_conf *link, 667 bool check_entry) 668 { 669 int bt_penalty, rssi_thresh; 670 s32 link_rssi; 671 672 if (WARN_ON_ONCE(!link->bss)) 673 return false; 674 675 link_rssi = MBM_TO_DBM(link->bss->signal); 676 rssi_thresh = check_entry ? 677 IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH : 678 IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH; 679 /* No valid RSSI - force to take low rssi */ 680 if (!link_rssi) 681 link_rssi = rssi_thresh - 1; 682 683 if (link_rssi > rssi_thresh) 684 bt_penalty = max(mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][0], 685 mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][1]); 686 else 687 bt_penalty = max(mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][0], 688 mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][1]); 689 690 IWL_DEBUG_EHT(mld, "BT penalty for link-id %0X is %d\n", 691 link->link_id, bt_penalty); 692 return bt_penalty < IWL_MLD_BT_COEX_WIFI_LOSS_THRESH; 693 } 694 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_bt_allows_emlsr); 695 696 static u32 697 iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld, 698 struct ieee80211_vif *vif, 699 struct iwl_mld_link_sel_data *link, 700 bool primary) 701 { 702 struct wiphy *wiphy = mld->wiphy; 703 struct ieee80211_bss_conf *conf; 704 u32 ret = 0; 705 706 conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]); 707 if (WARN_ON_ONCE(!conf)) 708 return IWL_MLD_EMLSR_EXIT_INVALID; 709 710 if (link->chandef->chan->band == NL80211_BAND_2GHZ && 711 !iwl_mld_bt_allows_emlsr(mld, conf, true)) 712 ret |= IWL_MLD_EMLSR_EXIT_BT_COEX; 713 714 if (link->signal < 715 iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false)) 716 ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI; 717 718 if (conf->csa_active) 719 ret |= IWL_MLD_EMLSR_EXIT_CSA; 720 721 if (ret) { 722 IWL_DEBUG_INFO(mld, 723 "Link %d is not allowed for EMLSR as %s\n", 724 link->link_id, 725 primary ? "primary" : "secondary"); 726 iwl_mld_print_emlsr_exit(mld, ret); 727 } 728 729 return ret; 730 } 731 732 static u8 733 iwl_mld_set_link_sel_data(struct iwl_mld *mld, 734 struct ieee80211_vif *vif, 735 struct iwl_mld_link_sel_data *data, 736 unsigned long usable_links, 737 u8 *best_link_idx) 738 { 739 u8 n_data = 0; 740 u16 max_grade = 0; 741 unsigned long link_id; 742 743 /* 744 * TODO: don't select links that weren't discovered in the last scan 745 * This requires mac80211 (or cfg80211) changes to forward/track when 746 * a BSS was last updated. cfg80211 already tracks this information but 747 * it is not exposed within the kernel. 748 */ 749 for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { 750 struct ieee80211_bss_conf *link_conf = 751 link_conf_dereference_protected(vif, link_id); 752 753 if (WARN_ON_ONCE(!link_conf)) 754 continue; 755 756 /* Ignore any BSS that was not seen in the last MLO scan */ 757 if (ktime_before(link_conf->bss->ts_boottime, 758 mld->scan.last_mlo_scan_time)) 759 continue; 760 761 data[n_data].link_id = link_id; 762 data[n_data].chandef = &link_conf->chanreq.oper; 763 data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal); 764 data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf); 765 766 if (n_data == 0 || data[n_data].grade > max_grade) { 767 max_grade = data[n_data].grade; 768 *best_link_idx = n_data; 769 } 770 n_data++; 771 } 772 773 return n_data; 774 } 775 776 static u32 777 iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx) 778 { 779 const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx); 780 781 switch (phy->chandef.width) { 782 case NL80211_CHAN_WIDTH_320: 783 case NL80211_CHAN_WIDTH_160: 784 return 5; 785 case NL80211_CHAN_WIDTH_80: 786 return 7; 787 default: 788 break; 789 } 790 return 10; 791 } 792 793 static bool 794 iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld, 795 struct ieee80211_vif *vif, 796 const struct iwl_mld_link_sel_data *a, 797 const struct iwl_mld_link_sel_data *b) 798 { 799 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 800 struct iwl_mld_link *link_a = 801 iwl_mld_link_dereference_check(mld_vif, a->link_id); 802 struct ieee80211_chanctx_conf *chanctx_a = NULL; 803 u32 bw_a, bw_b, ratio; 804 u32 primary_load_perc; 805 806 if (!link_a || !link_a->active) { 807 IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n"); 808 return false; 809 } 810 811 chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx); 812 813 if (WARN_ON(!chanctx_a)) 814 return false; 815 816 primary_load_perc = 817 iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us; 818 819 IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc); 820 821 if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) { 822 IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n"); 823 return false; 824 } 825 826 if (iwl_mld_vif_low_latency(mld_vif)) { 827 IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n"); 828 return true; 829 } 830 831 if (a->chandef->width <= b->chandef->width) 832 return true; 833 834 bw_a = cfg80211_chandef_get_width(a->chandef); 835 bw_b = cfg80211_chandef_get_width(b->chandef); 836 ratio = bw_a / bw_b; 837 838 switch (ratio) { 839 case 2: 840 return primary_load_perc > 25; 841 case 4: 842 return primary_load_perc > 40; 843 case 8: 844 case 16: 845 return primary_load_perc > 50; 846 } 847 848 return false; 849 } 850 851 VISIBLE_IF_KUNIT u32 852 iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif, 853 struct iwl_mld_link_sel_data *a, 854 struct iwl_mld_link_sel_data *b) 855 { 856 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 857 struct iwl_mld *mld = mld_vif->mld; 858 u32 reason_mask = 0; 859 860 /* Per-link considerations */ 861 reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true); 862 if (reason_mask) 863 return reason_mask; 864 865 reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false); 866 if (reason_mask) 867 return reason_mask; 868 869 if (a->chandef->chan->band == b->chandef->chan->band) { 870 const struct cfg80211_chan_def *c_low = a->chandef; 871 const struct cfg80211_chan_def *c_high = b->chandef; 872 u32 c_low_upper_edge, c_high_lower_edge; 873 874 if (c_low->chan->center_freq > c_high->chan->center_freq) 875 swap(c_low, c_high); 876 877 c_low_upper_edge = c_low->chan->center_freq + 878 cfg80211_chandef_get_width(c_low) / 2; 879 c_high_lower_edge = c_high->chan->center_freq - 880 cfg80211_chandef_get_width(c_high) / 2; 881 882 if (a->chandef->chan->band == NL80211_BAND_5GHZ && 883 c_low_upper_edge <= 5330 && c_high_lower_edge >= 5490) { 884 /* This case is fine - HW/FW can deal with it, there's 885 * enough separation between the two channels. 886 */ 887 } else { 888 reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND; 889 } 890 } 891 if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b)) 892 reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD; 893 894 if (reason_mask) { 895 IWL_DEBUG_INFO(mld, 896 "Links %d and %d are not a valid pair for EMLSR\n", 897 a->link_id, b->link_id); 898 IWL_DEBUG_INFO(mld, 899 "Links bandwidth are: %d and %d\n", 900 nl80211_chan_width_to_mhz(a->chandef->width), 901 nl80211_chan_width_to_mhz(b->chandef->width)); 902 iwl_mld_print_emlsr_exit(mld, reason_mask); 903 } 904 905 return reason_mask; 906 } 907 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_emlsr_pair_state); 908 909 /* Calculation is done with fixed-point with a scaling factor of 1/256 */ 910 #define SCALE_FACTOR 256 911 912 /* 913 * Returns the combined grade of two given links. 914 * Returns 0 if EMLSR is not allowed with these 2 links. 915 */ 916 static 917 unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld, 918 struct ieee80211_vif *vif, 919 struct iwl_mld_link_sel_data *a, 920 struct iwl_mld_link_sel_data *b, 921 u8 *primary_id) 922 { 923 struct ieee80211_bss_conf *primary_conf; 924 struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy; 925 unsigned int primary_load; 926 927 lockdep_assert_wiphy(wiphy); 928 929 /* a is always primary, b is always secondary */ 930 if (b->grade > a->grade) 931 swap(a, b); 932 933 *primary_id = a->link_id; 934 935 if (iwl_mld_emlsr_pair_state(vif, a, b)) 936 return 0; 937 938 primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]); 939 940 if (WARN_ON_ONCE(!primary_conf)) 941 return 0; 942 943 primary_load = iwl_mld_get_chan_load(mld, primary_conf); 944 945 /* The more the primary link is loaded, the more worthwhile EMLSR becomes */ 946 return a->grade + ((b->grade * primary_load) / SCALE_FACTOR); 947 } 948 949 static void _iwl_mld_select_links(struct iwl_mld *mld, 950 struct ieee80211_vif *vif) 951 { 952 struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; 953 struct iwl_mld_link_sel_data *best_link; 954 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 955 int max_active_links = iwl_mld_max_active_links(mld, vif); 956 u16 new_active, usable_links = ieee80211_vif_usable_links(vif); 957 u8 best_idx, new_primary, n_data; 958 u16 max_grade; 959 960 lockdep_assert_wiphy(mld->wiphy); 961 962 if (!mld_vif->authorized || hweight16(usable_links) <= 1) 963 return; 964 965 if (WARN(ktime_before(mld->scan.last_mlo_scan_time, 966 ktime_sub_ns(ktime_get_boottime_ns(), 967 5ULL * NSEC_PER_SEC)), 968 "Last MLO scan was too long ago, can't select links\n")) 969 return; 970 971 /* The logic below is simple and not suited for more than 2 links */ 972 WARN_ON_ONCE(max_active_links > 2); 973 974 n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links, 975 &best_idx); 976 977 if (WARN(!n_data, "Couldn't find a valid grade for any link!\n")) 978 return; 979 980 /* Default to selecting the single best link */ 981 best_link = &data[best_idx]; 982 new_primary = best_link->link_id; 983 new_active = BIT(best_link->link_id); 984 max_grade = best_link->grade; 985 986 /* If EMLSR is not possible, activate the best link */ 987 if (max_active_links == 1 || n_data == 1 || 988 !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE || 989 mld_vif->emlsr.blocked_reasons) 990 goto set_active; 991 992 /* Try to find the best link combination */ 993 for (u8 a = 0; a < n_data; a++) { 994 for (u8 b = a + 1; b < n_data; b++) { 995 u8 best_in_pair; 996 u16 emlsr_grade = 997 iwl_mld_get_emlsr_grade(mld, vif, 998 &data[a], &data[b], 999 &best_in_pair); 1000 1001 /* 1002 * Prefer (new) EMLSR combination to prefer EMLSR over 1003 * a single link. 1004 */ 1005 if (emlsr_grade < max_grade) 1006 continue; 1007 1008 max_grade = emlsr_grade; 1009 new_primary = best_in_pair; 1010 new_active = BIT(data[a].link_id) | 1011 BIT(data[b].link_id); 1012 } 1013 } 1014 1015 set_active: 1016 IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n", 1017 new_active, new_primary); 1018 1019 mld_vif->emlsr.selected_primary = new_primary; 1020 mld_vif->emlsr.selected_links = new_active; 1021 1022 ieee80211_set_active_links_async(vif, new_active); 1023 } 1024 1025 static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac, 1026 struct ieee80211_vif *vif) 1027 { 1028 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 1029 struct iwl_mld *mld = mld_vif->mld; 1030 1031 _iwl_mld_select_links(mld, vif); 1032 } 1033 1034 void iwl_mld_select_links(struct iwl_mld *mld) 1035 { 1036 ieee80211_iterate_active_interfaces_mtx(mld->hw, 1037 IEEE80211_IFACE_ITER_NORMAL, 1038 iwl_mld_vif_iter_select_links, 1039 NULL); 1040 } 1041 1042 static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac, 1043 struct ieee80211_vif *vif) 1044 { 1045 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 1046 const struct iwl_bt_coex_profile_notif zero_notif = {}; 1047 struct iwl_mld *mld = mld_vif->mld; 1048 struct ieee80211_bss_conf *link; 1049 unsigned int link_id; 1050 const struct iwl_bt_coex_profile_notif *notif = &mld->last_bt_notif; 1051 1052 if (!iwl_mld_vif_has_emlsr_cap(vif)) 1053 return; 1054 1055 /* zeroed structure means that BT is OFF */ 1056 if (!memcmp(notif, &zero_notif, sizeof(*notif))) { 1057 iwl_mld_retry_emlsr(mld, vif); 1058 return; 1059 } 1060 1061 for_each_vif_active_link(vif, link, link_id) { 1062 bool emlsr_active, emlsr_allowed; 1063 1064 if (WARN_ON(!link->chanreq.oper.chan)) 1065 continue; 1066 1067 if (link->chanreq.oper.chan->band != NL80211_BAND_2GHZ) 1068 continue; 1069 1070 emlsr_active = iwl_mld_emlsr_active(vif); 1071 emlsr_allowed = iwl_mld_bt_allows_emlsr(mld, link, 1072 !emlsr_active); 1073 if (emlsr_allowed && !emlsr_active) { 1074 iwl_mld_retry_emlsr(mld, vif); 1075 return; 1076 } 1077 1078 if (!emlsr_allowed && emlsr_active) { 1079 iwl_mld_exit_emlsr(mld, vif, 1080 IWL_MLD_EMLSR_EXIT_BT_COEX, 1081 iwl_mld_get_primary_link(vif)); 1082 return; 1083 } 1084 } 1085 } 1086 1087 void iwl_mld_emlsr_check_bt(struct iwl_mld *mld) 1088 { 1089 ieee80211_iterate_active_interfaces_mtx(mld->hw, 1090 IEEE80211_IFACE_ITER_NORMAL, 1091 iwl_mld_emlsr_check_bt_iter, 1092 NULL); 1093 } 1094 1095 struct iwl_mld_chan_load_data { 1096 struct iwl_mld_phy *phy; 1097 u32 prev_chan_load_not_by_us; 1098 }; 1099 1100 static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac, 1101 struct ieee80211_vif *vif) 1102 { 1103 struct iwl_mld_chan_load_data *data = _data; 1104 const struct iwl_mld_phy *phy = data->phy; 1105 struct ieee80211_chanctx_conf *chanctx = 1106 container_of((const void *)phy, struct ieee80211_chanctx_conf, 1107 drv_priv); 1108 struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld; 1109 struct ieee80211_bss_conf *prim_link; 1110 unsigned int prim_link_id; 1111 1112 prim_link_id = iwl_mld_get_primary_link(vif); 1113 prim_link = link_conf_dereference_protected(vif, prim_link_id); 1114 1115 if (WARN_ON(!prim_link)) 1116 return; 1117 1118 if (chanctx != rcu_access_pointer(prim_link->chanctx_conf)) 1119 return; 1120 1121 if (iwl_mld_emlsr_active(vif)) { 1122 int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link, 1123 true); 1124 1125 if (chan_load < 0) 1126 return; 1127 1128 /* chan_load is in range [0,255] */ 1129 if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD)) 1130 iwl_mld_exit_emlsr(mld, vif, 1131 IWL_MLD_EMLSR_EXIT_CHAN_LOAD, 1132 prim_link_id); 1133 } else { 1134 u32 old_chan_load = data->prev_chan_load_not_by_us; 1135 u32 new_chan_load = phy->avg_channel_load_not_by_us; 1136 u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx); 1137 1138 #define THRESHOLD_CROSSED(threshold) \ 1139 (old_chan_load <= (threshold) && new_chan_load > (threshold)) 1140 1141 if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) || 1142 THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50)) 1143 iwl_mld_retry_emlsr(mld, vif); 1144 #undef THRESHOLD_CROSSED 1145 } 1146 } 1147 1148 void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw, 1149 struct iwl_mld_phy *phy, 1150 u32 prev_chan_load_not_by_us) 1151 { 1152 struct iwl_mld_chan_load_data data = { 1153 .phy = phy, 1154 .prev_chan_load_not_by_us = prev_chan_load_not_by_us, 1155 }; 1156 1157 ieee80211_iterate_active_interfaces_mtx(hw, 1158 IEEE80211_IFACE_ITER_NORMAL, 1159 iwl_mld_chan_load_update_iter, 1160 &data); 1161 } 1162 1163 void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif) 1164 { 1165 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 1166 1167 if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) || 1168 mld_vif->emlsr.blocked_reasons) 1169 return; 1170 1171 iwl_mld_int_mlo_scan(mld, vif); 1172 } 1173