1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 #include "mlo.h" 6 #include "phy.h" 7 8 /* Block reasons helper */ 9 #define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \ 10 HOW(PREVENTION) \ 11 HOW(WOWLAN) \ 12 HOW(ROC) \ 13 HOW(NON_BSS) \ 14 HOW(TMP_NON_BSS) \ 15 HOW(TPT) 16 17 static const char * 18 iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked) 19 { 20 /* Using switch without "default" will warn about missing entries */ 21 switch (blocked) { 22 #define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x; 23 HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE) 24 #undef REASON_CASE 25 } 26 27 return "ERROR"; 28 } 29 30 static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask) 31 { 32 #define NAME_FMT(x) "%s" 33 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "", 34 IWL_DEBUG_INFO(mld, 35 "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT) 36 " (0x%x)\n", 37 HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR) 38 mask); 39 #undef NAME_FMT 40 #undef NAME_PR 41 } 42 43 /* Exit reasons helper */ 44 #define HANDLE_EMLSR_EXIT_REASONS(HOW) \ 45 HOW(BLOCK) \ 46 HOW(MISSED_BEACON) \ 47 HOW(FAIL_ENTRY) \ 48 HOW(CSA) \ 49 HOW(EQUAL_BAND) \ 50 HOW(LOW_RSSI) \ 51 HOW(LINK_USAGE) \ 52 HOW(BT_COEX) \ 53 HOW(CHAN_LOAD) \ 54 HOW(RFI) \ 55 HOW(FW_REQUEST) \ 56 HOW(INVALID) 57 58 static const char * 59 iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit) 60 { 61 /* Using switch without "default" will warn about missing entries */ 62 switch (exit) { 63 #define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x; 64 HANDLE_EMLSR_EXIT_REASONS(REASON_CASE) 65 #undef REASON_CASE 66 } 67 68 return "ERROR"; 69 } 70 71 static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask) 72 { 73 #define NAME_FMT(x) "%s" 74 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "", 75 IWL_DEBUG_INFO(mld, 76 "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT) 77 " (0x%x)\n", 78 HANDLE_EMLSR_EXIT_REASONS(NAME_PR) 79 mask); 80 #undef NAME_FMT 81 #undef NAME_PR 82 } 83 84 void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk) 85 { 86 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 87 emlsr.prevent_done_wk.work); 88 struct ieee80211_vif *vif = 89 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 90 91 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons & 92 IWL_MLD_EMLSR_BLOCKED_PREVENTION))) 93 return; 94 95 iwl_mld_unblock_emlsr(mld_vif->mld, vif, 96 IWL_MLD_EMLSR_BLOCKED_PREVENTION); 97 } 98 99 void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy, 100 struct wiphy_work *wk) 101 { 102 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 103 emlsr.tmp_non_bss_done_wk.work); 104 struct ieee80211_vif *vif = 105 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 106 107 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons & 108 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS))) 109 return; 110 111 iwl_mld_unblock_emlsr(mld_vif->mld, vif, 112 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS); 113 } 114 115 #define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC) 116 #define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC) 117 118 /* Exit reasons that can cause longer EMLSR prevention */ 119 #define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \ 120 IWL_MLD_EMLSR_EXIT_LINK_USAGE | \ 121 IWL_MLD_EMLSR_EXIT_FW_REQUEST) 122 #define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400) 123 124 #define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300) 125 #define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600) 126 127 static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld, 128 struct iwl_mld_vif *mld_vif, 129 enum iwl_mld_emlsr_exit reason) 130 { 131 unsigned long delay; 132 133 /* 134 * Reset the counter if more than 400 seconds have passed between one 135 * exit and the other, or if we exited due to a different reason. 136 * Will also reset the counter after the long prevention is done. 137 */ 138 if (time_after(jiffies, mld_vif->emlsr.last_exit_ts + 139 IWL_MLD_PREVENT_EMLSR_TIMEOUT) || 140 mld_vif->emlsr.last_exit_reason != reason) 141 mld_vif->emlsr.exit_repeat_count = 0; 142 143 mld_vif->emlsr.last_exit_reason = reason; 144 mld_vif->emlsr.last_exit_ts = jiffies; 145 mld_vif->emlsr.exit_repeat_count++; 146 147 /* 148 * Do not add a prevention when the reason was a block. For a block, 149 * EMLSR will be enabled again on unblock. 150 */ 151 if (reason == IWL_MLD_EMLSR_EXIT_BLOCK) 152 return; 153 154 /* Set prevention for a minimum of 30 seconds */ 155 mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION; 156 delay = IWL_MLD_TRIGGER_LINK_SEL_TIME; 157 158 /* Handle repeats for reasons that can cause long prevention */ 159 if (mld_vif->emlsr.exit_repeat_count > 1 && 160 reason & IWL_MLD_PREVENT_EMLSR_REASONS) { 161 if (mld_vif->emlsr.exit_repeat_count == 2) 162 delay = IWL_MLD_EMLSR_PREVENT_SHORT; 163 else 164 delay = IWL_MLD_EMLSR_PREVENT_LONG; 165 166 /* 167 * The timeouts are chosen so that this will not happen, i.e. 168 * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT 169 */ 170 WARN_ON(mld_vif->emlsr.exit_repeat_count > 3); 171 } 172 173 IWL_DEBUG_INFO(mld, 174 "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n", 175 delay / HZ, mld_vif->emlsr.exit_repeat_count, 176 iwl_mld_get_emlsr_exit_string(reason), reason); 177 178 wiphy_delayed_work_queue(mld->wiphy, 179 &mld_vif->emlsr.prevent_done_wk, delay); 180 } 181 182 static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw, 183 struct ieee80211_chanctx_conf *ctx, 184 void *dat) 185 { 186 struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); 187 188 /* It is ok to do it for all chanctx (and not only for the ones that 189 * belong to the EMLSR vif) since EMLSR is not allowed if there is 190 * another vif. 191 */ 192 phy->avg_channel_load_not_by_us = 0; 193 } 194 195 static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 196 enum iwl_mld_emlsr_exit exit, u8 link_to_keep, 197 bool sync) 198 { 199 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 200 u16 new_active_links; 201 int ret = 0; 202 203 lockdep_assert_wiphy(mld->wiphy); 204 205 /* On entry failure need to exit anyway, even if entered from debugfs */ 206 if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE) 207 return 0; 208 209 /* Ignore exit request if EMLSR is not active */ 210 if (!iwl_mld_emlsr_active(vif)) 211 return 0; 212 213 if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized)) 214 return 0; 215 216 if (WARN_ON(!(vif->active_links & BIT(link_to_keep)))) 217 link_to_keep = __ffs(vif->active_links); 218 219 new_active_links = BIT(link_to_keep); 220 IWL_DEBUG_INFO(mld, 221 "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n", 222 iwl_mld_get_emlsr_exit_string(exit), exit, 223 vif->active_links, new_active_links); 224 225 if (sync) 226 ret = ieee80211_set_active_links(vif, new_active_links); 227 else 228 ieee80211_set_active_links_async(vif, new_active_links); 229 230 /* Update latest exit reason and check EMLSR prevention */ 231 iwl_mld_check_emlsr_prevention(mld, mld_vif, exit); 232 233 /* channel_load_not_by_us is invalid when in EMLSR. 234 * Clear it so wrong values won't be used. 235 */ 236 ieee80211_iter_chan_contexts_atomic(mld->hw, 237 iwl_mld_clear_avg_chan_load_iter, 238 NULL); 239 240 return ret; 241 } 242 243 void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 244 enum iwl_mld_emlsr_exit exit, u8 link_to_keep) 245 { 246 _iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false); 247 } 248 249 static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif, 250 enum iwl_mld_emlsr_blocked reason, 251 u8 link_to_keep, bool sync) 252 { 253 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 254 255 lockdep_assert_wiphy(mld->wiphy); 256 257 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif)) 258 return 0; 259 260 if (mld_vif->emlsr.blocked_reasons & reason) 261 return 0; 262 263 mld_vif->emlsr.blocked_reasons |= reason; 264 265 IWL_DEBUG_INFO(mld, 266 "Blocking EMLSR mode. reason = %s (0x%x)\n", 267 iwl_mld_get_emlsr_blocked_string(reason), reason); 268 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); 269 270 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) 271 wiphy_delayed_work_cancel(mld_vif->mld->wiphy, 272 &mld_vif->emlsr.check_tpt_wk); 273 274 return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK, 275 link_to_keep, sync); 276 } 277 278 void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 279 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep) 280 { 281 _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false); 282 } 283 284 int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif, 285 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep) 286 { 287 return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true); 288 } 289 290 static void _iwl_mld_select_links(struct iwl_mld *mld, 291 struct ieee80211_vif *vif); 292 293 void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 294 enum iwl_mld_emlsr_blocked reason) 295 { 296 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 297 298 lockdep_assert_wiphy(mld->wiphy); 299 300 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif)) 301 return; 302 303 if (!(mld_vif->emlsr.blocked_reasons & reason)) 304 return; 305 306 mld_vif->emlsr.blocked_reasons &= ~reason; 307 308 IWL_DEBUG_INFO(mld, 309 "Unblocking EMLSR mode. reason = %s (0x%x)\n", 310 iwl_mld_get_emlsr_blocked_string(reason), reason); 311 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); 312 313 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) 314 wiphy_delayed_work_queue(mld_vif->mld->wiphy, 315 &mld_vif->emlsr.check_tpt_wk, 316 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW)); 317 318 if (mld_vif->emlsr.blocked_reasons) 319 return; 320 321 IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n"); 322 iwl_mld_int_mlo_scan(mld, vif); 323 } 324 325 static void 326 iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac, 327 struct ieee80211_vif *vif) 328 { 329 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 330 struct iwl_esr_mode_notif *notif = (void *)data; 331 332 if (!iwl_mld_vif_has_emlsr_cap(vif)) 333 return; 334 335 switch (le32_to_cpu(notif->action)) { 336 case ESR_RECOMMEND_LEAVE: 337 iwl_mld_exit_emlsr(mld_vif->mld, vif, 338 IWL_MLD_EMLSR_EXIT_FW_REQUEST, 339 iwl_mld_get_primary_link(vif)); 340 break; 341 case ESR_RECOMMEND_ENTER: 342 case ESR_FORCE_LEAVE: 343 default: 344 IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n", 345 le32_to_cpu(notif->action)); 346 } 347 } 348 349 void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld, 350 struct iwl_rx_packet *pkt) 351 { 352 ieee80211_iterate_active_interfaces_mtx(mld->hw, 353 IEEE80211_IFACE_ITER_NORMAL, 354 iwl_mld_vif_iter_emlsr_mode_notif, 355 pkt->data); 356 } 357 358 static void 359 iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac, 360 struct ieee80211_vif *vif) 361 { 362 if (!iwl_mld_vif_has_emlsr_cap(vif)) 363 return; 364 365 ieee80211_connection_loss(vif); 366 } 367 368 void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld, 369 struct iwl_rx_packet *pkt) 370 { 371 const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data; 372 u32 fw_link_id = le32_to_cpu(notif->link_id); 373 struct ieee80211_bss_conf *bss_conf = 374 iwl_mld_fw_id_to_link_conf(mld, fw_link_id); 375 376 IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n", 377 le32_to_cpu(notif->activation) ? "enter" : "exit", 378 bss_conf ? bss_conf->link_id : -1, 379 le32_to_cpu(notif->link_id), 380 le32_to_cpu(notif->err_code)); 381 382 if (IWL_FW_CHECK(mld, !bss_conf, 383 "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n", 384 le32_to_cpu(notif->activation) ? "" : "de", 385 fw_link_id)) { 386 ieee80211_iterate_active_interfaces_mtx( 387 mld->hw, IEEE80211_IFACE_ITER_NORMAL, 388 iwl_mld_vif_iter_disconnect_emlsr, NULL); 389 return; 390 } 391 392 /* Disconnect if we failed to deactivate a link */ 393 if (!le32_to_cpu(notif->activation)) { 394 ieee80211_connection_loss(bss_conf->vif); 395 return; 396 } 397 398 /* 399 * We failed to activate the second link, go back to the link specified 400 * by the firmware as that is the one that is still valid now. 401 */ 402 iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY, 403 bss_conf->link_id); 404 } 405 406 /* Active non-station link tracking */ 407 static void iwl_mld_count_non_bss_links(void *_data, u8 *mac, 408 struct ieee80211_vif *vif) 409 { 410 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 411 int *count = _data; 412 413 if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) 414 return; 415 416 *count += iwl_mld_count_active_links(mld_vif->mld, vif); 417 } 418 419 struct iwl_mld_update_emlsr_block_data { 420 bool block; 421 int result; 422 }; 423 424 static void 425 iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac, 426 struct ieee80211_vif *vif) 427 { 428 struct iwl_mld_update_emlsr_block_data *data = _data; 429 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 430 int ret; 431 432 if (data->block) { 433 ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif, 434 IWL_MLD_EMLSR_BLOCKED_NON_BSS, 435 iwl_mld_get_primary_link(vif)); 436 if (ret) 437 data->result = ret; 438 } else { 439 iwl_mld_unblock_emlsr(mld_vif->mld, vif, 440 IWL_MLD_EMLSR_BLOCKED_NON_BSS); 441 } 442 } 443 444 int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld, 445 int pending_link_changes) 446 { 447 /* An active link of a non-station vif blocks EMLSR. Upon activation 448 * block EMLSR on the bss vif. Upon deactivation, check if this link 449 * was the last non-station link active, and if so unblock the bss vif 450 */ 451 struct iwl_mld_update_emlsr_block_data block_data = {}; 452 int count = pending_link_changes; 453 454 /* No need to count if we are activating a non-BSS link */ 455 if (count <= 0) 456 ieee80211_iterate_active_interfaces_mtx(mld->hw, 457 IEEE80211_IFACE_ITER_NORMAL, 458 iwl_mld_count_non_bss_links, 459 &count); 460 461 /* 462 * We could skip updating it if the block change did not change (and 463 * pending_link_changes is non-zero). 464 */ 465 block_data.block = !!count; 466 467 ieee80211_iterate_active_interfaces_mtx(mld->hw, 468 IEEE80211_IFACE_ITER_NORMAL, 469 iwl_mld_vif_iter_update_emlsr_non_bss_block, 470 &block_data); 471 472 return block_data.result; 473 } 474 475 #define EMLSR_SEC_LINK_MIN_PERC 10 476 #define EMLSR_MIN_TX 3000 477 #define EMLSR_MIN_RX 400 478 479 void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk) 480 { 481 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 482 emlsr.check_tpt_wk.work); 483 struct ieee80211_vif *vif = 484 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 485 struct iwl_mld *mld = mld_vif->mld; 486 struct iwl_mld_sta *mld_sta; 487 struct iwl_mld_link *sec_link; 488 unsigned long total_tx = 0, total_rx = 0; 489 unsigned long sec_link_tx = 0, sec_link_rx = 0; 490 u8 sec_link_tx_perc, sec_link_rx_perc; 491 s8 sec_link_id; 492 493 if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta) 494 return; 495 496 mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta); 497 498 /* We only count for the AP sta in a MLO connection */ 499 if (!mld_sta->mpdu_counters) 500 return; 501 502 /* This wk should only run when the TPT blocker isn't set. 503 * When the blocker is set, the decision to remove it, as well as 504 * clearing the counters is done in DP (to avoid having a wk every 505 * 5 seconds when idle. When the blocker is unset, we are not idle anyway) 506 */ 507 if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT)) 508 return; 509 /* 510 * TPT is unblocked, need to check if the TPT criteria is still met. 511 * 512 * If EMLSR is active, then we also need to check the secondar link 513 * requirements. 514 */ 515 if (iwl_mld_emlsr_active(vif)) { 516 sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif)); 517 sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id); 518 if (WARN_ON_ONCE(!sec_link)) 519 return; 520 /* We need the FW ID here */ 521 sec_link_id = sec_link->fw_id; 522 } else { 523 sec_link_id = -1; 524 } 525 526 /* Sum up RX and TX MPDUs from the different queues/links */ 527 for (int q = 0; q < mld->trans->num_rx_queues; q++) { 528 struct iwl_mld_per_q_mpdu_counter *queue_counter = 529 &mld_sta->mpdu_counters[q]; 530 531 spin_lock_bh(&queue_counter->lock); 532 533 /* The link IDs that doesn't exist will contain 0 */ 534 for (int link = 0; 535 link < ARRAY_SIZE(queue_counter->per_link); 536 link++) { 537 total_tx += queue_counter->per_link[link].tx; 538 total_rx += queue_counter->per_link[link].rx; 539 } 540 541 if (sec_link_id != -1) { 542 sec_link_tx += queue_counter->per_link[sec_link_id].tx; 543 sec_link_rx += queue_counter->per_link[sec_link_id].rx; 544 } 545 546 memset(queue_counter->per_link, 0, 547 sizeof(queue_counter->per_link)); 548 549 spin_unlock_bh(&queue_counter->lock); 550 } 551 552 IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", 553 total_tx, total_rx); 554 555 /* If we don't have enough MPDUs - exit EMLSR */ 556 if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH && 557 total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) { 558 iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT, 559 iwl_mld_get_primary_link(vif)); 560 return; 561 } 562 563 /* EMLSR is not active */ 564 if (sec_link_id == -1) 565 return; 566 567 IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n", 568 sec_link_id, sec_link_tx, sec_link_rx); 569 570 /* Calculate the percentage of the secondary link TX/RX */ 571 sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0; 572 sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0; 573 574 /* 575 * The TX/RX percentage is checked only if it exceeds the required 576 * minimum. In addition, RX is checked only if the TX check failed. 577 */ 578 if ((total_tx > EMLSR_MIN_TX && 579 sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) || 580 (total_rx > EMLSR_MIN_RX && 581 sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) { 582 iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE, 583 iwl_mld_get_primary_link(vif)); 584 return; 585 } 586 587 /* Check again when the next window ends */ 588 wiphy_delayed_work_queue(mld_vif->mld->wiphy, 589 &mld_vif->emlsr.check_tpt_wk, 590 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW)); 591 } 592 593 void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk) 594 { 595 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 596 emlsr.unblock_tpt_wk); 597 struct ieee80211_vif *vif = 598 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 599 600 iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT); 601 } 602 603 /* 604 * Link selection 605 */ 606 607 s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld, 608 const struct cfg80211_chan_def *chandef, 609 bool low) 610 { 611 if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ && 612 chandef->chan->band != NL80211_BAND_5GHZ && 613 chandef->chan->band != NL80211_BAND_6GHZ)) 614 return S8_MAX; 615 616 #define RSSI_THRESHOLD(_low, _bw) \ 617 (_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \ 618 : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ 619 620 switch (chandef->width) { 621 case NL80211_CHAN_WIDTH_20_NOHT: 622 case NL80211_CHAN_WIDTH_20: 623 /* 320 MHz has the same thresholds as 20 MHz */ 624 case NL80211_CHAN_WIDTH_320: 625 return RSSI_THRESHOLD(low, 20); 626 case NL80211_CHAN_WIDTH_40: 627 return RSSI_THRESHOLD(low, 40); 628 case NL80211_CHAN_WIDTH_80: 629 return RSSI_THRESHOLD(low, 80); 630 case NL80211_CHAN_WIDTH_160: 631 return RSSI_THRESHOLD(low, 160); 632 default: 633 WARN_ON(1); 634 return S8_MAX; 635 } 636 #undef RSSI_THRESHOLD 637 } 638 639 #define IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH -69 640 #define IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH -63 641 #define IWL_MLD_BT_COEX_WIFI_LOSS_THRESH 7 642 643 VISIBLE_IF_IWLWIFI_KUNIT 644 bool 645 iwl_mld_bt_allows_emlsr(struct iwl_mld *mld, struct ieee80211_bss_conf *link, 646 bool check_entry) 647 { 648 int bt_penalty, rssi_thresh; 649 s32 link_rssi; 650 651 if (WARN_ON_ONCE(!link->bss)) 652 return false; 653 654 link_rssi = MBM_TO_DBM(link->bss->signal); 655 rssi_thresh = check_entry ? 656 IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH : 657 IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH; 658 /* No valid RSSI - force to take low rssi */ 659 if (!link_rssi) 660 link_rssi = rssi_thresh - 1; 661 662 if (link_rssi > rssi_thresh) 663 bt_penalty = max(mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][0], 664 mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][1]); 665 else 666 bt_penalty = max(mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][0], 667 mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][1]); 668 669 IWL_DEBUG_EHT(mld, "BT penalty for link-id %0X is %d\n", 670 link->link_id, bt_penalty); 671 return bt_penalty < IWL_MLD_BT_COEX_WIFI_LOSS_THRESH; 672 } 673 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_bt_allows_emlsr); 674 675 static u32 676 iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld, 677 struct ieee80211_vif *vif, 678 struct iwl_mld_link_sel_data *link, 679 bool primary) 680 { 681 struct wiphy *wiphy = mld->wiphy; 682 struct ieee80211_bss_conf *conf; 683 u32 ret = 0; 684 685 conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]); 686 if (WARN_ON_ONCE(!conf)) 687 return IWL_MLD_EMLSR_EXIT_INVALID; 688 689 if (link->chandef->chan->band == NL80211_BAND_2GHZ && 690 !iwl_mld_bt_allows_emlsr(mld, conf, true)) 691 ret |= IWL_MLD_EMLSR_EXIT_BT_COEX; 692 693 if (link->signal < 694 iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false)) 695 ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI; 696 697 if (conf->csa_active) 698 ret |= IWL_MLD_EMLSR_EXIT_CSA; 699 700 if (ret) { 701 IWL_DEBUG_INFO(mld, 702 "Link %d is not allowed for EMLSR as %s\n", 703 link->link_id, 704 primary ? "primary" : "secondary"); 705 iwl_mld_print_emlsr_exit(mld, ret); 706 } 707 708 return ret; 709 } 710 711 static u8 712 iwl_mld_set_link_sel_data(struct iwl_mld *mld, 713 struct ieee80211_vif *vif, 714 struct iwl_mld_link_sel_data *data, 715 unsigned long usable_links, 716 u8 *best_link_idx) 717 { 718 u8 n_data = 0; 719 u16 max_grade = 0; 720 unsigned long link_id; 721 722 /* 723 * TODO: don't select links that weren't discovered in the last scan 724 * This requires mac80211 (or cfg80211) changes to forward/track when 725 * a BSS was last updated. cfg80211 already tracks this information but 726 * it is not exposed within the kernel. 727 */ 728 for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { 729 struct ieee80211_bss_conf *link_conf = 730 link_conf_dereference_protected(vif, link_id); 731 732 if (WARN_ON_ONCE(!link_conf)) 733 continue; 734 735 /* Ignore any BSS that was not seen in the last MLO scan */ 736 if (ktime_before(link_conf->bss->ts_boottime, 737 mld->scan.last_mlo_scan_time)) 738 continue; 739 740 data[n_data].link_id = link_id; 741 data[n_data].chandef = &link_conf->chanreq.oper; 742 data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal); 743 data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf); 744 745 if (n_data == 0 || data[n_data].grade > max_grade) { 746 max_grade = data[n_data].grade; 747 *best_link_idx = n_data; 748 } 749 n_data++; 750 } 751 752 return n_data; 753 } 754 755 static u32 756 iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx) 757 { 758 const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx); 759 760 switch (phy->chandef.width) { 761 case NL80211_CHAN_WIDTH_320: 762 case NL80211_CHAN_WIDTH_160: 763 return 5; 764 case NL80211_CHAN_WIDTH_80: 765 return 7; 766 default: 767 break; 768 } 769 return 10; 770 } 771 772 static bool 773 iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld, 774 struct ieee80211_vif *vif, 775 const struct iwl_mld_link_sel_data *a, 776 const struct iwl_mld_link_sel_data *b) 777 { 778 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 779 struct iwl_mld_link *link_a = 780 iwl_mld_link_dereference_check(mld_vif, a->link_id); 781 struct ieee80211_chanctx_conf *chanctx_a = NULL; 782 u32 bw_a, bw_b, ratio; 783 u32 primary_load_perc; 784 785 if (!link_a || !link_a->active) { 786 IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n"); 787 return false; 788 } 789 790 chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx); 791 792 if (WARN_ON(!chanctx_a)) 793 return false; 794 795 primary_load_perc = 796 iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us; 797 798 IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc); 799 800 if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) { 801 IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n"); 802 return false; 803 } 804 805 if (iwl_mld_vif_low_latency(mld_vif)) { 806 IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n"); 807 return true; 808 } 809 810 if (a->chandef->width <= b->chandef->width) 811 return true; 812 813 bw_a = cfg80211_chandef_get_width(a->chandef); 814 bw_b = cfg80211_chandef_get_width(b->chandef); 815 ratio = bw_a / bw_b; 816 817 switch (ratio) { 818 case 2: 819 return primary_load_perc > 25; 820 case 4: 821 return primary_load_perc > 40; 822 case 8: 823 case 16: 824 return primary_load_perc > 50; 825 } 826 827 return false; 828 } 829 830 VISIBLE_IF_KUNIT u32 831 iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif, 832 struct iwl_mld_link_sel_data *a, 833 struct iwl_mld_link_sel_data *b) 834 { 835 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 836 struct iwl_mld *mld = mld_vif->mld; 837 u32 reason_mask = 0; 838 839 /* Per-link considerations */ 840 reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true); 841 if (reason_mask) 842 return reason_mask; 843 844 reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false); 845 if (reason_mask) 846 return reason_mask; 847 848 if (a->chandef->chan->band == b->chandef->chan->band) { 849 const struct cfg80211_chan_def *c_low = a->chandef; 850 const struct cfg80211_chan_def *c_high = b->chandef; 851 u32 c_low_upper_edge, c_high_lower_edge; 852 853 if (c_low->chan->center_freq > c_high->chan->center_freq) 854 swap(c_low, c_high); 855 856 c_low_upper_edge = c_low->chan->center_freq + 857 cfg80211_chandef_get_width(c_low) / 2; 858 c_high_lower_edge = c_high->chan->center_freq - 859 cfg80211_chandef_get_width(c_high) / 2; 860 861 if (a->chandef->chan->band == NL80211_BAND_5GHZ && 862 c_low_upper_edge <= 5330 && c_high_lower_edge >= 5490) { 863 /* This case is fine - HW/FW can deal with it, there's 864 * enough separation between the two channels. 865 */ 866 } else { 867 reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND; 868 } 869 } 870 if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b)) 871 reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD; 872 873 if (reason_mask) { 874 IWL_DEBUG_INFO(mld, 875 "Links %d and %d are not a valid pair for EMLSR\n", 876 a->link_id, b->link_id); 877 IWL_DEBUG_INFO(mld, 878 "Links bandwidth are: %d and %d\n", 879 nl80211_chan_width_to_mhz(a->chandef->width), 880 nl80211_chan_width_to_mhz(b->chandef->width)); 881 iwl_mld_print_emlsr_exit(mld, reason_mask); 882 } 883 884 return reason_mask; 885 } 886 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_emlsr_pair_state); 887 888 /* Calculation is done with fixed-point with a scaling factor of 1/256 */ 889 #define SCALE_FACTOR 256 890 891 /* 892 * Returns the combined grade of two given links. 893 * Returns 0 if EMLSR is not allowed with these 2 links. 894 */ 895 static 896 unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld, 897 struct ieee80211_vif *vif, 898 struct iwl_mld_link_sel_data *a, 899 struct iwl_mld_link_sel_data *b, 900 u8 *primary_id) 901 { 902 struct ieee80211_bss_conf *primary_conf; 903 struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy; 904 unsigned int primary_load; 905 906 lockdep_assert_wiphy(wiphy); 907 908 /* a is always primary, b is always secondary */ 909 if (b->grade > a->grade) 910 swap(a, b); 911 912 *primary_id = a->link_id; 913 914 if (iwl_mld_emlsr_pair_state(vif, a, b)) 915 return 0; 916 917 primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]); 918 919 if (WARN_ON_ONCE(!primary_conf)) 920 return 0; 921 922 primary_load = iwl_mld_get_chan_load(mld, primary_conf); 923 924 /* The more the primary link is loaded, the more worthwhile EMLSR becomes */ 925 return a->grade + ((b->grade * primary_load) / SCALE_FACTOR); 926 } 927 928 static void _iwl_mld_select_links(struct iwl_mld *mld, 929 struct ieee80211_vif *vif) 930 { 931 struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; 932 struct iwl_mld_link_sel_data *best_link; 933 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 934 int max_active_links = iwl_mld_max_active_links(mld, vif); 935 u16 new_active, usable_links = ieee80211_vif_usable_links(vif); 936 u8 best_idx, new_primary, n_data; 937 u16 max_grade; 938 939 lockdep_assert_wiphy(mld->wiphy); 940 941 if (!mld_vif->authorized || hweight16(usable_links) <= 1) 942 return; 943 944 if (WARN(ktime_before(mld->scan.last_mlo_scan_time, 945 ktime_sub_ns(ktime_get_boottime_ns(), 946 5ULL * NSEC_PER_SEC)), 947 "Last MLO scan was too long ago, can't select links\n")) 948 return; 949 950 /* The logic below is simple and not suited for more than 2 links */ 951 WARN_ON_ONCE(max_active_links > 2); 952 953 n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links, 954 &best_idx); 955 956 if (WARN(!n_data, "Couldn't find a valid grade for any link!\n")) 957 return; 958 959 /* Default to selecting the single best link */ 960 best_link = &data[best_idx]; 961 new_primary = best_link->link_id; 962 new_active = BIT(best_link->link_id); 963 max_grade = best_link->grade; 964 965 /* If EMLSR is not possible, activate the best link */ 966 if (max_active_links == 1 || n_data == 1 || 967 !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE || 968 mld_vif->emlsr.blocked_reasons) 969 goto set_active; 970 971 /* Try to find the best link combination */ 972 for (u8 a = 0; a < n_data; a++) { 973 for (u8 b = a + 1; b < n_data; b++) { 974 u8 best_in_pair; 975 u16 emlsr_grade = 976 iwl_mld_get_emlsr_grade(mld, vif, 977 &data[a], &data[b], 978 &best_in_pair); 979 980 /* 981 * Prefer (new) EMLSR combination to prefer EMLSR over 982 * a single link. 983 */ 984 if (emlsr_grade < max_grade) 985 continue; 986 987 max_grade = emlsr_grade; 988 new_primary = best_in_pair; 989 new_active = BIT(data[a].link_id) | 990 BIT(data[b].link_id); 991 } 992 } 993 994 set_active: 995 IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n", 996 new_active, new_primary); 997 998 mld_vif->emlsr.selected_primary = new_primary; 999 mld_vif->emlsr.selected_links = new_active; 1000 1001 ieee80211_set_active_links_async(vif, new_active); 1002 } 1003 1004 static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac, 1005 struct ieee80211_vif *vif) 1006 { 1007 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 1008 struct iwl_mld *mld = mld_vif->mld; 1009 1010 _iwl_mld_select_links(mld, vif); 1011 } 1012 1013 void iwl_mld_select_links(struct iwl_mld *mld) 1014 { 1015 ieee80211_iterate_active_interfaces_mtx(mld->hw, 1016 IEEE80211_IFACE_ITER_NORMAL, 1017 iwl_mld_vif_iter_select_links, 1018 NULL); 1019 } 1020 1021 static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac, 1022 struct ieee80211_vif *vif) 1023 { 1024 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 1025 const struct iwl_bt_coex_profile_notif zero_notif = {}; 1026 struct iwl_mld *mld = mld_vif->mld; 1027 struct ieee80211_bss_conf *link; 1028 unsigned int link_id; 1029 const struct iwl_bt_coex_profile_notif *notif = &mld->last_bt_notif; 1030 1031 if (!iwl_mld_vif_has_emlsr_cap(vif)) 1032 return; 1033 1034 /* zeroed structure means that BT is OFF */ 1035 if (!memcmp(notif, &zero_notif, sizeof(*notif))) { 1036 iwl_mld_retry_emlsr(mld, vif); 1037 return; 1038 } 1039 1040 for_each_vif_active_link(vif, link, link_id) { 1041 bool emlsr_active, emlsr_allowed; 1042 1043 if (WARN_ON(!link->chanreq.oper.chan)) 1044 continue; 1045 1046 if (link->chanreq.oper.chan->band != NL80211_BAND_2GHZ) 1047 continue; 1048 1049 emlsr_active = iwl_mld_emlsr_active(vif); 1050 emlsr_allowed = iwl_mld_bt_allows_emlsr(mld, link, 1051 !emlsr_active); 1052 if (emlsr_allowed && !emlsr_active) { 1053 iwl_mld_retry_emlsr(mld, vif); 1054 return; 1055 } 1056 1057 if (!emlsr_allowed && emlsr_active) { 1058 iwl_mld_exit_emlsr(mld, vif, 1059 IWL_MLD_EMLSR_EXIT_BT_COEX, 1060 iwl_mld_get_primary_link(vif)); 1061 return; 1062 } 1063 } 1064 } 1065 1066 void iwl_mld_emlsr_check_bt(struct iwl_mld *mld) 1067 { 1068 ieee80211_iterate_active_interfaces_mtx(mld->hw, 1069 IEEE80211_IFACE_ITER_NORMAL, 1070 iwl_mld_emlsr_check_bt_iter, 1071 NULL); 1072 } 1073 1074 struct iwl_mld_chan_load_data { 1075 struct iwl_mld_phy *phy; 1076 u32 prev_chan_load_not_by_us; 1077 }; 1078 1079 static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac, 1080 struct ieee80211_vif *vif) 1081 { 1082 struct iwl_mld_chan_load_data *data = _data; 1083 const struct iwl_mld_phy *phy = data->phy; 1084 struct ieee80211_chanctx_conf *chanctx = 1085 container_of((const void *)phy, struct ieee80211_chanctx_conf, 1086 drv_priv); 1087 struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld; 1088 struct ieee80211_bss_conf *prim_link; 1089 unsigned int prim_link_id; 1090 1091 prim_link_id = iwl_mld_get_primary_link(vif); 1092 prim_link = link_conf_dereference_protected(vif, prim_link_id); 1093 1094 if (WARN_ON(!prim_link)) 1095 return; 1096 1097 if (chanctx != rcu_access_pointer(prim_link->chanctx_conf)) 1098 return; 1099 1100 if (iwl_mld_emlsr_active(vif)) { 1101 int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link, 1102 true); 1103 1104 if (chan_load < 0) 1105 return; 1106 1107 /* chan_load is in range [0,255] */ 1108 if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD)) 1109 iwl_mld_exit_emlsr(mld, vif, 1110 IWL_MLD_EMLSR_EXIT_CHAN_LOAD, 1111 prim_link_id); 1112 } else { 1113 u32 old_chan_load = data->prev_chan_load_not_by_us; 1114 u32 new_chan_load = phy->avg_channel_load_not_by_us; 1115 u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx); 1116 1117 #define THRESHOLD_CROSSED(threshold) \ 1118 (old_chan_load <= (threshold) && new_chan_load > (threshold)) 1119 1120 if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) || 1121 THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50)) 1122 iwl_mld_retry_emlsr(mld, vif); 1123 #undef THRESHOLD_CROSSED 1124 } 1125 } 1126 1127 void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw, 1128 struct iwl_mld_phy *phy, 1129 u32 prev_chan_load_not_by_us) 1130 { 1131 struct iwl_mld_chan_load_data data = { 1132 .phy = phy, 1133 .prev_chan_load_not_by_us = prev_chan_load_not_by_us, 1134 }; 1135 1136 ieee80211_iterate_active_interfaces_mtx(hw, 1137 IEEE80211_IFACE_ITER_NORMAL, 1138 iwl_mld_chan_load_update_iter, 1139 &data); 1140 } 1141 1142 void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif) 1143 { 1144 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 1145 1146 if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) || 1147 mld_vif->emlsr.blocked_reasons) 1148 return; 1149 1150 iwl_mld_int_mlo_scan(mld, vif); 1151 } 1152