xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/mlo.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024-2025 Intel Corporation
4  */
5 #include "mlo.h"
6 #include "phy.h"
7 
8 /* Block reasons helper */
9 #define HANDLE_EMLSR_BLOCKED_REASONS(HOW)	\
10 	HOW(PREVENTION)			\
11 	HOW(WOWLAN)			\
12 	HOW(ROC)			\
13 	HOW(NON_BSS)			\
14 	HOW(TMP_NON_BSS)		\
15 	HOW(TPT)
16 
17 static const char *
18 iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
19 {
20 	/* Using switch without "default" will warn about missing entries  */
21 	switch (blocked) {
22 #define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x;
23 	HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE)
24 #undef REASON_CASE
25 	}
26 
27 	return "ERROR";
28 }
29 
30 static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
31 {
32 #define NAME_FMT(x) "%s"
33 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "",
34 	IWL_DEBUG_EHT(mld,
35 		      "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT)
36 		      " (0x%x)\n", HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR) mask);
37 #undef NAME_FMT
38 #undef NAME_PR
39 }
40 
41 /* Exit reasons helper */
42 #define HANDLE_EMLSR_EXIT_REASONS(HOW)	\
43 	HOW(BLOCK)			\
44 	HOW(MISSED_BEACON)		\
45 	HOW(FAIL_ENTRY)			\
46 	HOW(CSA)			\
47 	HOW(EQUAL_BAND)			\
48 	HOW(LOW_RSSI)			\
49 	HOW(LINK_USAGE)			\
50 	HOW(BT_COEX)			\
51 	HOW(CHAN_LOAD)			\
52 	HOW(RFI)			\
53 	HOW(FW_REQUEST)			\
54 	HOW(INVALID)
55 
56 static const char *
57 iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
58 {
59 	/* Using switch without "default" will warn about missing entries  */
60 	switch (exit) {
61 #define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x;
62 	HANDLE_EMLSR_EXIT_REASONS(REASON_CASE)
63 #undef REASON_CASE
64 	}
65 
66 	return "ERROR";
67 }
68 
69 static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask)
70 {
71 #define NAME_FMT(x) "%s"
72 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "",
73 	IWL_DEBUG_EHT(mld,
74 		      "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT)
75 		      " (0x%x)\n", HANDLE_EMLSR_EXIT_REASONS(NAME_PR) mask);
76 #undef NAME_FMT
77 #undef NAME_PR
78 }
79 
80 void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk)
81 {
82 	struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
83 						   emlsr.prevent_done_wk.work);
84 	struct ieee80211_vif *vif =
85 		container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
86 
87 	if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
88 		      IWL_MLD_EMLSR_BLOCKED_PREVENTION)))
89 		return;
90 
91 	iwl_mld_unblock_emlsr(mld_vif->mld, vif,
92 			      IWL_MLD_EMLSR_BLOCKED_PREVENTION);
93 }
94 
95 void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
96 				       struct wiphy_work *wk)
97 {
98 	struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
99 						   emlsr.tmp_non_bss_done_wk.work);
100 	struct ieee80211_vif *vif =
101 		container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
102 
103 	if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
104 		      IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS)))
105 		return;
106 
107 	iwl_mld_unblock_emlsr(mld_vif->mld, vif,
108 			      IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS);
109 }
110 
111 #define IWL_MLD_TRIGGER_LINK_SEL_TIME	(HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
112 #define IWL_MLD_SCAN_EXPIRE_TIME	(HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
113 
114 /* Exit reasons that can cause longer EMLSR prevention */
115 #define IWL_MLD_PREVENT_EMLSR_REASONS	(IWL_MLD_EMLSR_EXIT_MISSED_BEACON	| \
116 					 IWL_MLD_EMLSR_EXIT_LINK_USAGE		| \
117 					 IWL_MLD_EMLSR_EXIT_FW_REQUEST)
118 #define IWL_MLD_PREVENT_EMLSR_TIMEOUT	(HZ * 400)
119 
120 #define IWL_MLD_EMLSR_PREVENT_SHORT	(HZ * 300)
121 #define IWL_MLD_EMLSR_PREVENT_LONG	(HZ * 600)
122 
123 static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
124 					   struct iwl_mld_vif *mld_vif,
125 					   enum iwl_mld_emlsr_exit reason)
126 {
127 	unsigned long delay;
128 
129 	/*
130 	 * Reset the counter if more than 400 seconds have passed between one
131 	 * exit and the other, or if we exited due to a different reason.
132 	 * Will also reset the counter after the long prevention is done.
133 	 */
134 	if (time_after(jiffies, mld_vif->emlsr.last_exit_ts +
135 				IWL_MLD_PREVENT_EMLSR_TIMEOUT) ||
136 	    mld_vif->emlsr.last_exit_reason != reason)
137 		mld_vif->emlsr.exit_repeat_count = 0;
138 
139 	mld_vif->emlsr.last_exit_reason = reason;
140 	mld_vif->emlsr.last_exit_ts = jiffies;
141 	mld_vif->emlsr.exit_repeat_count++;
142 
143 	/*
144 	 * Do not add a prevention when the reason was a block. For a block,
145 	 * EMLSR will be enabled again on unblock.
146 	 */
147 	if (reason == IWL_MLD_EMLSR_EXIT_BLOCK)
148 		return;
149 
150 	/* Set prevention for a minimum of 30 seconds */
151 	mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION;
152 	delay = IWL_MLD_TRIGGER_LINK_SEL_TIME;
153 
154 	/* Handle repeats for reasons that can cause long prevention */
155 	if (mld_vif->emlsr.exit_repeat_count > 1 &&
156 	    reason & IWL_MLD_PREVENT_EMLSR_REASONS) {
157 		if (mld_vif->emlsr.exit_repeat_count == 2)
158 			delay = IWL_MLD_EMLSR_PREVENT_SHORT;
159 		else
160 			delay = IWL_MLD_EMLSR_PREVENT_LONG;
161 
162 		/*
163 		 * The timeouts are chosen so that this will not happen, i.e.
164 		 * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT
165 		 */
166 		WARN_ON(mld_vif->emlsr.exit_repeat_count > 3);
167 	}
168 
169 	IWL_DEBUG_EHT(mld,
170 		      "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
171 		      delay / HZ, mld_vif->emlsr.exit_repeat_count,
172 		      iwl_mld_get_emlsr_exit_string(reason), reason);
173 
174 	wiphy_delayed_work_queue(mld->wiphy,
175 				 &mld_vif->emlsr.prevent_done_wk, delay);
176 }
177 
178 static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw,
179 					     struct ieee80211_chanctx_conf *ctx,
180 					     void *dat)
181 {
182 	struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
183 
184 	/* It is ok to do it for all chanctx (and not only for the ones that
185 	 * belong to the EMLSR vif) since EMLSR is not allowed if there is
186 	 * another vif.
187 	 */
188 	phy->avg_channel_load_not_by_us = 0;
189 }
190 
191 static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
192 			       enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
193 			       bool sync)
194 {
195 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
196 	u16 new_active_links;
197 	int ret = 0;
198 
199 	lockdep_assert_wiphy(mld->wiphy);
200 
201 	/* On entry failure need to exit anyway, even if entered from debugfs */
202 	if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE)
203 		return 0;
204 
205 	/* Ignore exit request if EMLSR is not active */
206 	if (!iwl_mld_emlsr_active(vif))
207 		return 0;
208 
209 	if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized))
210 		return 0;
211 
212 	if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
213 		link_to_keep = __ffs(vif->active_links);
214 
215 	new_active_links = BIT(link_to_keep);
216 	IWL_DEBUG_EHT(mld,
217 		      "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
218 		      iwl_mld_get_emlsr_exit_string(exit), exit,
219 		      vif->active_links, new_active_links);
220 
221 	if (sync)
222 		ret = ieee80211_set_active_links(vif, new_active_links);
223 	else
224 		ieee80211_set_active_links_async(vif, new_active_links);
225 
226 	/* Update latest exit reason and check EMLSR prevention */
227 	iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
228 
229 	/* channel_load_not_by_us is invalid when in EMLSR.
230 	 * Clear it so wrong values won't be used.
231 	 */
232 	ieee80211_iter_chan_contexts_atomic(mld->hw,
233 					    iwl_mld_clear_avg_chan_load_iter,
234 					    NULL);
235 
236 	return ret;
237 }
238 
239 void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
240 			enum iwl_mld_emlsr_exit exit, u8 link_to_keep)
241 {
242 	_iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false);
243 }
244 
245 static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif,
246 				enum iwl_mld_emlsr_blocked reason,
247 				u8 link_to_keep, bool sync)
248 {
249 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
250 
251 	lockdep_assert_wiphy(mld->wiphy);
252 
253 	if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
254 		return 0;
255 
256 	if (mld_vif->emlsr.blocked_reasons & reason)
257 		return 0;
258 
259 	mld_vif->emlsr.blocked_reasons |= reason;
260 
261 	IWL_DEBUG_EHT(mld, "Blocking EMLSR mode. reason = %s (0x%x)\n",
262 		      iwl_mld_get_emlsr_blocked_string(reason), reason);
263 	iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
264 
265 	if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
266 		wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
267 					  &mld_vif->emlsr.check_tpt_wk);
268 
269 	return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK,
270 				   link_to_keep, sync);
271 }
272 
273 void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
274 			 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
275 {
276 	_iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false);
277 }
278 
279 int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
280 			     enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
281 {
282 	return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
283 }
284 
285 #define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (10 * HZ)
286 
287 static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac,
288 						     struct ieee80211_vif *vif)
289 {
290 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
291 	int ret;
292 
293 	if (!iwl_mld_vif_has_emlsr_cap(vif))
294 		return;
295 
296 	ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
297 				       IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS,
298 				       iwl_mld_get_primary_link(vif));
299 	if (ret)
300 		return;
301 
302 	wiphy_delayed_work_queue(mld_vif->mld->wiphy,
303 				 &mld_vif->emlsr.tmp_non_bss_done_wk,
304 				 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT);
305 }
306 
307 void iwl_mld_emlsr_block_tmp_non_bss(struct iwl_mld *mld)
308 {
309 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
310 						IEEE80211_IFACE_ITER_NORMAL,
311 						iwl_mld_vif_iter_emlsr_block_tmp_non_bss,
312 						NULL);
313 }
314 
315 static void _iwl_mld_select_links(struct iwl_mld *mld,
316 				  struct ieee80211_vif *vif);
317 
318 void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
319 			   enum iwl_mld_emlsr_blocked reason)
320 {
321 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
322 
323 	lockdep_assert_wiphy(mld->wiphy);
324 
325 	if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
326 		return;
327 
328 	if (!(mld_vif->emlsr.blocked_reasons & reason))
329 		return;
330 
331 	mld_vif->emlsr.blocked_reasons &= ~reason;
332 
333 	IWL_DEBUG_EHT(mld, "Unblocking EMLSR mode. reason = %s (0x%x)\n",
334 		      iwl_mld_get_emlsr_blocked_string(reason), reason);
335 	iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
336 
337 	if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
338 		wiphy_delayed_work_queue(mld_vif->mld->wiphy,
339 					 &mld_vif->emlsr.check_tpt_wk,
340 					 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
341 
342 	if (mld_vif->emlsr.blocked_reasons)
343 		return;
344 
345 	IWL_DEBUG_EHT(mld, "EMLSR is unblocked\n");
346 	iwl_mld_int_mlo_scan(mld, vif);
347 }
348 
349 static void
350 iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
351 				  struct ieee80211_vif *vif)
352 {
353 	const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
354 	const struct iwl_esr_mode_notif *notif = (void *)data;
355 	enum iwl_mvm_fw_esr_recommendation action = le32_to_cpu(notif->action);
356 
357 	if (!iwl_mld_vif_has_emlsr_cap(vif))
358 		return;
359 
360 	switch (action) {
361 	case ESR_RECOMMEND_LEAVE:
362 		IWL_DEBUG_EHT(mld_vif->mld,
363 			      "FW recommend leave reason = 0x%x\n",
364 			      le32_to_cpu(notif->leave_reason_mask));
365 
366 		iwl_mld_exit_emlsr(mld_vif->mld, vif,
367 				   IWL_MLD_EMLSR_EXIT_FW_REQUEST,
368 				   iwl_mld_get_primary_link(vif));
369 		break;
370 	case ESR_FORCE_LEAVE:
371 		IWL_DEBUG_EHT(mld_vif->mld, "FW force leave reason = 0x%x\n",
372 			      le32_to_cpu(notif->leave_reason_mask));
373 		fallthrough;
374 	case ESR_RECOMMEND_ENTER:
375 	default:
376 		IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
377 			 action);
378 	}
379 }
380 
381 void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
382 				     struct iwl_rx_packet *pkt)
383 {
384 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
385 						IEEE80211_IFACE_ITER_NORMAL,
386 						iwl_mld_vif_iter_emlsr_mode_notif,
387 						pkt->data);
388 }
389 
390 static void
391 iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac,
392 				  struct ieee80211_vif *vif)
393 {
394 	if (!iwl_mld_vif_has_emlsr_cap(vif))
395 		return;
396 
397 	ieee80211_connection_loss(vif);
398 }
399 
400 void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
401 					   struct iwl_rx_packet *pkt)
402 {
403 	const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data;
404 	u32 fw_link_id = le32_to_cpu(notif->link_id);
405 	struct ieee80211_bss_conf *bss_conf =
406 		iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
407 
408 	IWL_DEBUG_EHT(mld,
409 		      "Failed to %s EMLSR on link %d (FW: %d), reason %d\n",
410 		      le32_to_cpu(notif->activation) ? "enter" : "exit",
411 		      bss_conf ? bss_conf->link_id : -1,
412 		      le32_to_cpu(notif->link_id),
413 		      le32_to_cpu(notif->err_code));
414 
415 	if (IWL_FW_CHECK(mld, !bss_conf,
416 			 "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n",
417 			 le32_to_cpu(notif->activation) ? "" : "de",
418 			 fw_link_id)) {
419 		ieee80211_iterate_active_interfaces_mtx(
420 			mld->hw, IEEE80211_IFACE_ITER_NORMAL,
421 			iwl_mld_vif_iter_disconnect_emlsr, NULL);
422 		return;
423 	}
424 
425 	/* Disconnect if we failed to deactivate a link */
426 	if (!le32_to_cpu(notif->activation)) {
427 		ieee80211_connection_loss(bss_conf->vif);
428 		return;
429 	}
430 
431 	/*
432 	 * We failed to activate the second link, go back to the link specified
433 	 * by the firmware as that is the one that is still valid now.
434 	 */
435 	iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY,
436 			   bss_conf->link_id);
437 }
438 
439 /* Active non-station link tracking */
440 static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
441 					struct ieee80211_vif *vif)
442 {
443 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
444 	int *count = _data;
445 
446 	if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
447 		return;
448 
449 	*count += iwl_mld_count_active_links(mld_vif->mld, vif);
450 }
451 
452 struct iwl_mld_update_emlsr_block_data {
453 	bool block;
454 	int result;
455 };
456 
457 static void
458 iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac,
459 					    struct ieee80211_vif *vif)
460 {
461 	struct iwl_mld_update_emlsr_block_data *data = _data;
462 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
463 	int ret;
464 
465 	if (data->block) {
466 		ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
467 					       IWL_MLD_EMLSR_BLOCKED_NON_BSS,
468 					       iwl_mld_get_primary_link(vif));
469 		if (ret)
470 			data->result = ret;
471 	} else {
472 		iwl_mld_unblock_emlsr(mld_vif->mld, vif,
473 				      IWL_MLD_EMLSR_BLOCKED_NON_BSS);
474 	}
475 }
476 
477 int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
478 				      int pending_link_changes)
479 {
480 	/* An active link of a non-station vif blocks EMLSR. Upon activation
481 	 * block EMLSR on the bss vif. Upon deactivation, check if this link
482 	 * was the last non-station link active, and if so unblock the bss vif
483 	 */
484 	struct iwl_mld_update_emlsr_block_data block_data = {};
485 	int count = pending_link_changes;
486 
487 	/* No need to count if we are activating a non-BSS link */
488 	if (count <= 0)
489 		ieee80211_iterate_active_interfaces_mtx(mld->hw,
490 							IEEE80211_IFACE_ITER_NORMAL,
491 							iwl_mld_count_non_bss_links,
492 							&count);
493 
494 	/*
495 	 * We could skip updating it if the block change did not change (and
496 	 * pending_link_changes is non-zero).
497 	 */
498 	block_data.block = !!count;
499 
500 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
501 						IEEE80211_IFACE_ITER_NORMAL,
502 						iwl_mld_vif_iter_update_emlsr_non_bss_block,
503 						&block_data);
504 
505 	return block_data.result;
506 }
507 
508 #define EMLSR_SEC_LINK_MIN_PERC 10
509 #define EMLSR_MIN_TX 3000
510 #define EMLSR_MIN_RX 400
511 
512 void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
513 {
514 	struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
515 						   emlsr.check_tpt_wk.work);
516 	struct ieee80211_vif *vif =
517 		container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
518 	struct iwl_mld *mld = mld_vif->mld;
519 	struct iwl_mld_sta *mld_sta;
520 	struct iwl_mld_link *sec_link;
521 	unsigned long total_tx = 0, total_rx = 0;
522 	unsigned long sec_link_tx = 0, sec_link_rx = 0;
523 	u8 sec_link_tx_perc, sec_link_rx_perc;
524 	s8 sec_link_id;
525 
526 	if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta)
527 		return;
528 
529 	mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
530 
531 	/* We only count for the AP sta in a MLO connection */
532 	if (!mld_sta->mpdu_counters)
533 		return;
534 
535 	/* This wk should only run when the TPT blocker isn't set.
536 	 * When the blocker is set, the decision to remove it, as well as
537 	 * clearing the counters is done in DP (to avoid having a wk every
538 	 * 5 seconds when idle. When the blocker is unset, we are not idle anyway)
539 	 */
540 	if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
541 		return;
542 	/*
543 	 * TPT is unblocked, need to check if the TPT criteria is still met.
544 	 *
545 	 * If EMLSR is active for at least 5 seconds, then we also
546 	 * need to check the secondary link requirements.
547 	 */
548 	if (iwl_mld_emlsr_active(vif) &&
549 	    time_is_before_jiffies(mld_vif->emlsr.last_entry_ts +
550 				   IWL_MLD_TPT_COUNT_WINDOW)) {
551 		sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
552 		sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
553 		if (WARN_ON_ONCE(!sec_link))
554 			return;
555 		/* We need the FW ID here */
556 		sec_link_id = sec_link->fw_id;
557 	} else {
558 		sec_link_id = -1;
559 	}
560 
561 	/* Sum up RX and TX MPDUs from the different queues/links */
562 	for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
563 		struct iwl_mld_per_q_mpdu_counter *queue_counter =
564 			&mld_sta->mpdu_counters[q];
565 
566 		spin_lock_bh(&queue_counter->lock);
567 
568 		/* The link IDs that doesn't exist will contain 0 */
569 		for (int link = 0;
570 		     link < ARRAY_SIZE(queue_counter->per_link);
571 		     link++) {
572 			total_tx += queue_counter->per_link[link].tx;
573 			total_rx += queue_counter->per_link[link].rx;
574 		}
575 
576 		if (sec_link_id != -1) {
577 			sec_link_tx += queue_counter->per_link[sec_link_id].tx;
578 			sec_link_rx += queue_counter->per_link[sec_link_id].rx;
579 		}
580 
581 		memset(queue_counter->per_link, 0,
582 		       sizeof(queue_counter->per_link));
583 
584 		spin_unlock_bh(&queue_counter->lock);
585 	}
586 
587 	IWL_DEBUG_EHT(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
588 		      total_tx, total_rx);
589 
590 	/* If we don't have enough MPDUs - exit EMLSR */
591 	if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH &&
592 	    total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) {
593 		iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT,
594 				    iwl_mld_get_primary_link(vif));
595 		return;
596 	}
597 
598 	/* EMLSR is not active */
599 	if (sec_link_id == -1)
600 		goto schedule;
601 
602 	IWL_DEBUG_EHT(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
603 		      sec_link_id, sec_link_tx, sec_link_rx);
604 
605 	/* Calculate the percentage of the secondary link TX/RX */
606 	sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
607 	sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
608 
609 	/*
610 	 * The TX/RX percentage is checked only if it exceeds the required
611 	 * minimum. In addition, RX is checked only if the TX check failed.
612 	 */
613 	if ((total_tx > EMLSR_MIN_TX &&
614 	     sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) ||
615 	    (total_rx > EMLSR_MIN_RX &&
616 	     sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) {
617 		iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE,
618 				   iwl_mld_get_primary_link(vif));
619 		return;
620 	}
621 
622 schedule:
623 	/* Check again when the next window ends  */
624 	wiphy_delayed_work_queue(mld_vif->mld->wiphy,
625 				 &mld_vif->emlsr.check_tpt_wk,
626 				 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
627 }
628 
629 void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk)
630 {
631 	struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
632 						   emlsr.unblock_tpt_wk);
633 	struct ieee80211_vif *vif =
634 		container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
635 
636 	iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT);
637 }
638 
639 /*
640  * Link selection
641  */
642 
643 s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
644 				 const struct cfg80211_chan_def *chandef,
645 				 bool low)
646 {
647 	if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
648 		    chandef->chan->band != NL80211_BAND_5GHZ &&
649 		    chandef->chan->band != NL80211_BAND_6GHZ))
650 		return S8_MAX;
651 
652 #define RSSI_THRESHOLD(_low, _bw)			\
653 	(_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ	\
654 	       : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ
655 
656 	switch (chandef->width) {
657 	case NL80211_CHAN_WIDTH_20_NOHT:
658 	case NL80211_CHAN_WIDTH_20:
659 	/* 320 MHz has the same thresholds as 20 MHz */
660 	case NL80211_CHAN_WIDTH_320:
661 		return RSSI_THRESHOLD(low, 20);
662 	case NL80211_CHAN_WIDTH_40:
663 		return RSSI_THRESHOLD(low, 40);
664 	case NL80211_CHAN_WIDTH_80:
665 		return RSSI_THRESHOLD(low, 80);
666 	case NL80211_CHAN_WIDTH_160:
667 		return RSSI_THRESHOLD(low, 160);
668 	default:
669 		WARN_ON(1);
670 		return S8_MAX;
671 	}
672 #undef RSSI_THRESHOLD
673 }
674 
675 static u32
676 iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
677 				   struct ieee80211_vif *vif,
678 				   struct iwl_mld_link_sel_data *link,
679 				   bool primary)
680 {
681 	struct wiphy *wiphy = mld->wiphy;
682 	struct ieee80211_bss_conf *conf;
683 	u32 ret = 0;
684 
685 	conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
686 	if (WARN_ON_ONCE(!conf))
687 		return IWL_MLD_EMLSR_EXIT_INVALID;
688 
689 	if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
690 		ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
691 
692 	if (link->signal <
693 	    iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false))
694 		ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI;
695 
696 	if (conf->csa_active)
697 		ret |= IWL_MLD_EMLSR_EXIT_CSA;
698 
699 	if (ret) {
700 		IWL_DEBUG_EHT(mld, "Link %d is not allowed for EMLSR as %s\n",
701 			      link->link_id, primary ? "primary" : "secondary");
702 		iwl_mld_print_emlsr_exit(mld, ret);
703 	}
704 
705 	return ret;
706 }
707 
708 static u8
709 iwl_mld_set_link_sel_data(struct iwl_mld *mld,
710 			  struct ieee80211_vif *vif,
711 			  struct iwl_mld_link_sel_data *data,
712 			  unsigned long usable_links,
713 			  u8 *best_link_idx)
714 {
715 	u8 n_data = 0;
716 	u16 max_grade = 0;
717 	unsigned long link_id;
718 
719 	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
720 		struct ieee80211_bss_conf *link_conf =
721 			link_conf_dereference_protected(vif, link_id);
722 
723 		if (WARN_ON_ONCE(!link_conf))
724 			continue;
725 
726 		/* Ignore any BSS that was not seen in the last MLO scan */
727 		if (ktime_before(link_conf->bss->ts_boottime,
728 				 mld->scan.last_mlo_scan_time))
729 			continue;
730 
731 		data[n_data].link_id = link_id;
732 		data[n_data].chandef = &link_conf->chanreq.oper;
733 		data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal);
734 		data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf);
735 
736 		if (n_data == 0 || data[n_data].grade > max_grade) {
737 			max_grade = data[n_data].grade;
738 			*best_link_idx = n_data;
739 		}
740 		n_data++;
741 	}
742 
743 	return n_data;
744 }
745 
746 static u32
747 iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx)
748 {
749 	const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx);
750 
751 	switch (phy->chandef.width) {
752 	case NL80211_CHAN_WIDTH_320:
753 	case NL80211_CHAN_WIDTH_160:
754 		return 5;
755 	case NL80211_CHAN_WIDTH_80:
756 		return 7;
757 	default:
758 		break;
759 	}
760 	return 10;
761 }
762 
763 static bool
764 iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
765 				  struct ieee80211_vif *vif,
766 				  const struct iwl_mld_link_sel_data *a,
767 				  const struct iwl_mld_link_sel_data *b)
768 {
769 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
770 	struct iwl_mld_link *link_a =
771 		iwl_mld_link_dereference_check(mld_vif, a->link_id);
772 	struct ieee80211_chanctx_conf *chanctx_a = NULL;
773 	u32 bw_a, bw_b, ratio;
774 	u32 primary_load_perc;
775 
776 	if (!link_a || !link_a->active) {
777 		IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n");
778 		return false;
779 	}
780 
781 	chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx);
782 
783 	if (WARN_ON(!chanctx_a))
784 		return false;
785 
786 	primary_load_perc =
787 		iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us;
788 
789 	IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc);
790 
791 	if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) {
792 		IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n");
793 		return false;
794 	}
795 
796 	if (iwl_mld_vif_low_latency(mld_vif)) {
797 		IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n");
798 		return true;
799 	}
800 
801 	if (a->chandef->width <= b->chandef->width)
802 		return true;
803 
804 	bw_a = cfg80211_chandef_get_width(a->chandef);
805 	bw_b = cfg80211_chandef_get_width(b->chandef);
806 	ratio = bw_a / bw_b;
807 
808 	switch (ratio) {
809 	case 2:
810 		return primary_load_perc > 25;
811 	case 4:
812 		return primary_load_perc > 40;
813 	case 8:
814 	case 16:
815 		return primary_load_perc > 50;
816 	}
817 
818 	return false;
819 }
820 
821 VISIBLE_IF_IWLWIFI_KUNIT u32
822 iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
823 			 struct iwl_mld_link_sel_data *a,
824 			 struct iwl_mld_link_sel_data *b)
825 {
826 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
827 	struct iwl_mld *mld = mld_vif->mld;
828 	u32 reason_mask = 0;
829 
830 	/* Per-link considerations */
831 	reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true);
832 	if (reason_mask)
833 		return reason_mask;
834 
835 	reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false);
836 	if (reason_mask)
837 		return reason_mask;
838 
839 	if (a->chandef->chan->band == b->chandef->chan->band) {
840 		const struct cfg80211_chan_def *c_low = a->chandef;
841 		const struct cfg80211_chan_def *c_high = b->chandef;
842 		u32 c_low_upper_edge, c_high_lower_edge;
843 
844 		if (c_low->chan->center_freq > c_high->chan->center_freq)
845 			swap(c_low, c_high);
846 
847 		c_low_upper_edge = c_low->chan->center_freq +
848 				   cfg80211_chandef_get_width(c_low) / 2;
849 		c_high_lower_edge = c_high->chan->center_freq -
850 				    cfg80211_chandef_get_width(c_high) / 2;
851 
852 		if (a->chandef->chan->band == NL80211_BAND_5GHZ &&
853 		    c_low_upper_edge <= 5330 && c_high_lower_edge >= 5490) {
854 			/* This case is fine - HW/FW can deal with it, there's
855 			 * enough separation between the two channels.
856 			 */
857 		} else {
858 			reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
859 		}
860 	}
861 	if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b))
862 		reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD;
863 
864 	if (reason_mask) {
865 		IWL_DEBUG_EHT(mld,
866 			      "Links %d and %d are not a valid pair for EMLSR\n",
867 			      a->link_id, b->link_id);
868 		IWL_DEBUG_EHT(mld, "Links bandwidth are: %d and %d\n",
869 			      nl80211_chan_width_to_mhz(a->chandef->width),
870 			      nl80211_chan_width_to_mhz(b->chandef->width));
871 		iwl_mld_print_emlsr_exit(mld, reason_mask);
872 	}
873 
874 	return reason_mask;
875 }
876 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_emlsr_pair_state);
877 
878 /* Calculation is done with fixed-point with a scaling factor of 1/256 */
879 #define SCALE_FACTOR 256
880 
881 /*
882  * Returns the combined grade of two given links.
883  * Returns 0 if EMLSR is not allowed with these 2 links.
884  */
885 static
886 unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
887 				     struct ieee80211_vif *vif,
888 				     struct iwl_mld_link_sel_data *a,
889 				     struct iwl_mld_link_sel_data *b,
890 				     u8 *primary_id)
891 {
892 	struct ieee80211_bss_conf *primary_conf;
893 	struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
894 	unsigned int primary_load;
895 
896 	lockdep_assert_wiphy(wiphy);
897 
898 	/* a is always primary, b is always secondary */
899 	if (b->grade > a->grade)
900 		swap(a, b);
901 
902 	*primary_id = a->link_id;
903 
904 	if (iwl_mld_emlsr_pair_state(vif, a, b))
905 		return 0;
906 
907 	primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
908 
909 	if (WARN_ON_ONCE(!primary_conf))
910 		return 0;
911 
912 	primary_load = iwl_mld_get_chan_load(mld, primary_conf);
913 
914 	/* The more the primary link is loaded, the more worthwhile EMLSR becomes */
915 	return a->grade + ((b->grade * primary_load) / SCALE_FACTOR);
916 }
917 
918 static void _iwl_mld_select_links(struct iwl_mld *mld,
919 				  struct ieee80211_vif *vif)
920 {
921 	struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
922 	struct iwl_mld_link_sel_data *best_link;
923 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
924 	int max_active_links = iwl_mld_max_active_links(mld, vif);
925 	u16 new_active, usable_links = ieee80211_vif_usable_links(vif);
926 	u8 best_idx, new_primary, n_data;
927 	u16 max_grade;
928 
929 	lockdep_assert_wiphy(mld->wiphy);
930 
931 	if (!mld_vif->authorized || hweight16(usable_links) <= 1)
932 		return;
933 
934 	if (WARN(ktime_before(mld->scan.last_mlo_scan_time,
935 			      ktime_sub_ns(ktime_get_boottime_ns(),
936 					   5ULL * NSEC_PER_SEC)),
937 		"Last MLO scan was too long ago, can't select links\n"))
938 		return;
939 
940 	/* The logic below is simple and not suited for more than 2 links */
941 	WARN_ON_ONCE(max_active_links > 2);
942 
943 	n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
944 					   &best_idx);
945 
946 	if (!n_data) {
947 		IWL_DEBUG_EHT(mld,
948 			      "Couldn't find a valid grade for any link!\n");
949 		return;
950 	}
951 
952 	/* Default to selecting the single best link */
953 	best_link = &data[best_idx];
954 	new_primary = best_link->link_id;
955 	new_active = BIT(best_link->link_id);
956 	max_grade = best_link->grade;
957 
958 	/* If EMLSR is not possible, activate the best link */
959 	if (max_active_links == 1 || n_data == 1 ||
960 	    !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE ||
961 	    mld_vif->emlsr.blocked_reasons)
962 		goto set_active;
963 
964 	/* Try to find the best link combination */
965 	for (u8 a = 0; a < n_data; a++) {
966 		for (u8 b = a + 1; b < n_data; b++) {
967 			u8 best_in_pair;
968 			u16 emlsr_grade =
969 				iwl_mld_get_emlsr_grade(mld, vif,
970 							&data[a], &data[b],
971 							&best_in_pair);
972 
973 			/*
974 			 * Prefer (new) EMLSR combination to prefer EMLSR over
975 			 * a single link.
976 			 */
977 			if (emlsr_grade < max_grade)
978 				continue;
979 
980 			max_grade = emlsr_grade;
981 			new_primary = best_in_pair;
982 			new_active = BIT(data[a].link_id) |
983 				     BIT(data[b].link_id);
984 		}
985 	}
986 
987 set_active:
988 	IWL_DEBUG_EHT(mld, "Link selection result: 0x%x. Primary = %d\n",
989 		      new_active, new_primary);
990 
991 	mld_vif->emlsr.selected_primary = new_primary;
992 	mld_vif->emlsr.selected_links = new_active;
993 
994 	ieee80211_set_active_links_async(vif, new_active);
995 }
996 
997 static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac,
998 					  struct ieee80211_vif *vif)
999 {
1000 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1001 	struct iwl_mld *mld = mld_vif->mld;
1002 
1003 	_iwl_mld_select_links(mld, vif);
1004 }
1005 
1006 void iwl_mld_select_links(struct iwl_mld *mld)
1007 {
1008 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
1009 						IEEE80211_IFACE_ITER_NORMAL,
1010 						iwl_mld_vif_iter_select_links,
1011 						NULL);
1012 }
1013 
1014 static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
1015 					struct ieee80211_vif *vif)
1016 {
1017 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1018 	struct iwl_mld *mld = mld_vif->mld;
1019 	struct ieee80211_bss_conf *link;
1020 	unsigned int link_id;
1021 
1022 	if (!iwl_mld_vif_has_emlsr_cap(vif))
1023 		return;
1024 
1025 	if (!mld->bt_is_active) {
1026 		iwl_mld_retry_emlsr(mld, vif);
1027 		return;
1028 	}
1029 
1030 	/* BT is turned ON but we are not in EMLSR, nothing to do */
1031 	if (!iwl_mld_emlsr_active(vif))
1032 		return;
1033 
1034 	/* In EMLSR and BT is turned ON */
1035 
1036 	for_each_vif_active_link(vif, link, link_id) {
1037 		if (WARN_ON(!link->chanreq.oper.chan))
1038 			continue;
1039 
1040 		if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
1041 			iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
1042 					   iwl_mld_get_primary_link(vif));
1043 			return;
1044 		}
1045 	}
1046 }
1047 
1048 void iwl_mld_emlsr_check_bt(struct iwl_mld *mld)
1049 {
1050 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
1051 						IEEE80211_IFACE_ITER_NORMAL,
1052 						iwl_mld_emlsr_check_bt_iter,
1053 						NULL);
1054 }
1055 
1056 struct iwl_mld_chan_load_data {
1057 	struct iwl_mld_phy *phy;
1058 	u32 prev_chan_load_not_by_us;
1059 };
1060 
1061 static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac,
1062 					  struct ieee80211_vif *vif)
1063 {
1064 	struct iwl_mld_chan_load_data *data = _data;
1065 	const struct iwl_mld_phy *phy = data->phy;
1066 	struct ieee80211_chanctx_conf *chanctx =
1067 		container_of((const void *)phy, struct ieee80211_chanctx_conf,
1068 			     drv_priv);
1069 	struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
1070 	struct ieee80211_bss_conf *prim_link;
1071 	unsigned int prim_link_id;
1072 
1073 	prim_link_id = iwl_mld_get_primary_link(vif);
1074 	prim_link = link_conf_dereference_protected(vif, prim_link_id);
1075 
1076 	if (WARN_ON(!prim_link))
1077 		return;
1078 
1079 	if (chanctx != rcu_access_pointer(prim_link->chanctx_conf))
1080 		return;
1081 
1082 	if (iwl_mld_emlsr_active(vif)) {
1083 		int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link,
1084 								true);
1085 
1086 		if (chan_load < 0)
1087 			return;
1088 
1089 		/* chan_load is in range [0,255] */
1090 		if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD))
1091 			iwl_mld_exit_emlsr(mld, vif,
1092 					   IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
1093 					   prim_link_id);
1094 	} else {
1095 		u32 old_chan_load = data->prev_chan_load_not_by_us;
1096 		u32 new_chan_load = phy->avg_channel_load_not_by_us;
1097 		u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx);
1098 
1099 #define THRESHOLD_CROSSED(threshold) \
1100 	(old_chan_load <= (threshold) && new_chan_load > (threshold))
1101 
1102 		if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) ||
1103 		    THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50))
1104 			iwl_mld_retry_emlsr(mld, vif);
1105 #undef THRESHOLD_CROSSED
1106 	}
1107 }
1108 
1109 void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
1110 				   struct iwl_mld_phy *phy,
1111 				   u32 prev_chan_load_not_by_us)
1112 {
1113 	struct iwl_mld_chan_load_data data = {
1114 		.phy = phy,
1115 		.prev_chan_load_not_by_us = prev_chan_load_not_by_us,
1116 	};
1117 
1118 	ieee80211_iterate_active_interfaces_mtx(hw,
1119 						IEEE80211_IFACE_ITER_NORMAL,
1120 						iwl_mld_chan_load_update_iter,
1121 						&data);
1122 }
1123 
1124 void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
1125 {
1126 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1127 
1128 	if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif) ||
1129 	    iwl_mld_emlsr_active(vif) || mld_vif->emlsr.blocked_reasons)
1130 		return;
1131 
1132 	iwl_mld_int_mlo_scan(mld, vif);
1133 }
1134 
1135 static void iwl_mld_ignore_tpt_iter(void *data, u8 *mac,
1136 				    struct ieee80211_vif *vif)
1137 {
1138 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1139 	struct iwl_mld *mld = mld_vif->mld;
1140 	struct iwl_mld_sta *mld_sta;
1141 	bool *start = (void *)data;
1142 
1143 	/* check_tpt_wk is only used when TPT block isn't set */
1144 	if (mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT ||
1145 	    !IWL_MLD_AUTO_EML_ENABLE || !mld_vif->ap_sta)
1146 		return;
1147 
1148 	mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
1149 
1150 	/* We only count for the AP sta in a MLO connection */
1151 	if (!mld_sta->mpdu_counters)
1152 		return;
1153 
1154 	if (*start) {
1155 		wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
1156 					  &mld_vif->emlsr.check_tpt_wk);
1157 		IWL_DEBUG_EHT(mld, "TPT check disabled\n");
1158 		return;
1159 	}
1160 
1161 	/* Clear the counters so we start from the beginning */
1162 	for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
1163 		struct iwl_mld_per_q_mpdu_counter *queue_counter =
1164 			&mld_sta->mpdu_counters[q];
1165 
1166 		spin_lock_bh(&queue_counter->lock);
1167 
1168 		memset(queue_counter->per_link, 0,
1169 		       sizeof(queue_counter->per_link));
1170 
1171 		spin_unlock_bh(&queue_counter->lock);
1172 	}
1173 
1174 	/* Schedule the check in 5 seconds */
1175 	wiphy_delayed_work_queue(mld_vif->mld->wiphy,
1176 				 &mld_vif->emlsr.check_tpt_wk,
1177 				 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
1178 	IWL_DEBUG_EHT(mld, "TPT check enabled\n");
1179 }
1180 
1181 void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld)
1182 {
1183 	bool start = true;
1184 
1185 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
1186 						IEEE80211_IFACE_ITER_NORMAL,
1187 						iwl_mld_ignore_tpt_iter,
1188 						&start);
1189 }
1190 
1191 void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld)
1192 {
1193 	bool start = false;
1194 
1195 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
1196 						IEEE80211_IFACE_ITER_NORMAL,
1197 						iwl_mld_ignore_tpt_iter,
1198 						&start);
1199 }
1200