xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/mlo.c (revision 566e8f108fc7847f2a8676ec6a101d37b7dd0fb4)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024-2025 Intel Corporation
4  */
5 #include "mlo.h"
6 #include "phy.h"
7 
8 /* Block reasons helper */
9 #define HANDLE_EMLSR_BLOCKED_REASONS(HOW)	\
10 	HOW(PREVENTION)			\
11 	HOW(WOWLAN)			\
12 	HOW(ROC)			\
13 	HOW(NON_BSS)			\
14 	HOW(TMP_NON_BSS)		\
15 	HOW(TPT)
16 
17 static const char *
18 iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
19 {
20 	/* Using switch without "default" will warn about missing entries  */
21 	switch (blocked) {
22 #define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x;
23 	HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE)
24 #undef REASON_CASE
25 	}
26 
27 	return "ERROR";
28 }
29 
30 static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
31 {
32 #define NAME_FMT(x) "%s"
33 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "",
34 	IWL_DEBUG_INFO(mld,
35 		       "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT)
36 		       " (0x%x)\n",
37 		       HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR)
38 		       mask);
39 #undef NAME_FMT
40 #undef NAME_PR
41 }
42 
43 /* Exit reasons helper */
44 #define HANDLE_EMLSR_EXIT_REASONS(HOW)	\
45 	HOW(BLOCK)			\
46 	HOW(MISSED_BEACON)		\
47 	HOW(FAIL_ENTRY)			\
48 	HOW(CSA)			\
49 	HOW(EQUAL_BAND)			\
50 	HOW(LOW_RSSI)			\
51 	HOW(LINK_USAGE)			\
52 	HOW(BT_COEX)			\
53 	HOW(CHAN_LOAD)			\
54 	HOW(RFI)			\
55 	HOW(FW_REQUEST)			\
56 	HOW(INVALID)
57 
58 static const char *
59 iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
60 {
61 	/* Using switch without "default" will warn about missing entries  */
62 	switch (exit) {
63 #define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x;
64 	HANDLE_EMLSR_EXIT_REASONS(REASON_CASE)
65 #undef REASON_CASE
66 	}
67 
68 	return "ERROR";
69 }
70 
71 static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask)
72 {
73 #define NAME_FMT(x) "%s"
74 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "",
75 	IWL_DEBUG_INFO(mld,
76 		       "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT)
77 		       " (0x%x)\n",
78 		       HANDLE_EMLSR_EXIT_REASONS(NAME_PR)
79 		       mask);
80 #undef NAME_FMT
81 #undef NAME_PR
82 }
83 
84 void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk)
85 {
86 	struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
87 						   emlsr.prevent_done_wk.work);
88 	struct ieee80211_vif *vif =
89 		container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
90 
91 	if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
92 		      IWL_MLD_EMLSR_BLOCKED_PREVENTION)))
93 		return;
94 
95 	iwl_mld_unblock_emlsr(mld_vif->mld, vif,
96 			      IWL_MLD_EMLSR_BLOCKED_PREVENTION);
97 }
98 
99 void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
100 				       struct wiphy_work *wk)
101 {
102 	struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
103 						   emlsr.tmp_non_bss_done_wk.work);
104 	struct ieee80211_vif *vif =
105 		container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
106 
107 	if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
108 		      IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS)))
109 		return;
110 
111 	iwl_mld_unblock_emlsr(mld_vif->mld, vif,
112 			      IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS);
113 }
114 
115 #define IWL_MLD_TRIGGER_LINK_SEL_TIME	(HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
116 #define IWL_MLD_SCAN_EXPIRE_TIME	(HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
117 
118 /* Exit reasons that can cause longer EMLSR prevention */
119 #define IWL_MLD_PREVENT_EMLSR_REASONS	(IWL_MLD_EMLSR_EXIT_MISSED_BEACON	| \
120 					 IWL_MLD_EMLSR_EXIT_LINK_USAGE		| \
121 					 IWL_MLD_EMLSR_EXIT_FW_REQUEST)
122 #define IWL_MLD_PREVENT_EMLSR_TIMEOUT	(HZ * 400)
123 
124 #define IWL_MLD_EMLSR_PREVENT_SHORT	(HZ * 300)
125 #define IWL_MLD_EMLSR_PREVENT_LONG	(HZ * 600)
126 
127 static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
128 					   struct iwl_mld_vif *mld_vif,
129 					   enum iwl_mld_emlsr_exit reason)
130 {
131 	unsigned long delay;
132 
133 	/*
134 	 * Reset the counter if more than 400 seconds have passed between one
135 	 * exit and the other, or if we exited due to a different reason.
136 	 * Will also reset the counter after the long prevention is done.
137 	 */
138 	if (time_after(jiffies, mld_vif->emlsr.last_exit_ts +
139 				IWL_MLD_PREVENT_EMLSR_TIMEOUT) ||
140 	    mld_vif->emlsr.last_exit_reason != reason)
141 		mld_vif->emlsr.exit_repeat_count = 0;
142 
143 	mld_vif->emlsr.last_exit_reason = reason;
144 	mld_vif->emlsr.last_exit_ts = jiffies;
145 	mld_vif->emlsr.exit_repeat_count++;
146 
147 	/*
148 	 * Do not add a prevention when the reason was a block. For a block,
149 	 * EMLSR will be enabled again on unblock.
150 	 */
151 	if (reason == IWL_MLD_EMLSR_EXIT_BLOCK)
152 		return;
153 
154 	/* Set prevention for a minimum of 30 seconds */
155 	mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION;
156 	delay = IWL_MLD_TRIGGER_LINK_SEL_TIME;
157 
158 	/* Handle repeats for reasons that can cause long prevention */
159 	if (mld_vif->emlsr.exit_repeat_count > 1 &&
160 	    reason & IWL_MLD_PREVENT_EMLSR_REASONS) {
161 		if (mld_vif->emlsr.exit_repeat_count == 2)
162 			delay = IWL_MLD_EMLSR_PREVENT_SHORT;
163 		else
164 			delay = IWL_MLD_EMLSR_PREVENT_LONG;
165 
166 		/*
167 		 * The timeouts are chosen so that this will not happen, i.e.
168 		 * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT
169 		 */
170 		WARN_ON(mld_vif->emlsr.exit_repeat_count > 3);
171 	}
172 
173 	IWL_DEBUG_INFO(mld,
174 		       "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
175 		       delay / HZ, mld_vif->emlsr.exit_repeat_count,
176 		       iwl_mld_get_emlsr_exit_string(reason), reason);
177 
178 	wiphy_delayed_work_queue(mld->wiphy,
179 				 &mld_vif->emlsr.prevent_done_wk, delay);
180 }
181 
182 static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw,
183 					     struct ieee80211_chanctx_conf *ctx,
184 					     void *dat)
185 {
186 	struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
187 
188 	/* It is ok to do it for all chanctx (and not only for the ones that
189 	 * belong to the EMLSR vif) since EMLSR is not allowed if there is
190 	 * another vif.
191 	 */
192 	phy->avg_channel_load_not_by_us = 0;
193 }
194 
195 static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
196 			       enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
197 			       bool sync)
198 {
199 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
200 	u16 new_active_links;
201 	int ret = 0;
202 
203 	lockdep_assert_wiphy(mld->wiphy);
204 
205 	/* On entry failure need to exit anyway, even if entered from debugfs */
206 	if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE)
207 		return 0;
208 
209 	/* Ignore exit request if EMLSR is not active */
210 	if (!iwl_mld_emlsr_active(vif))
211 		return 0;
212 
213 	if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized))
214 		return 0;
215 
216 	if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
217 		link_to_keep = __ffs(vif->active_links);
218 
219 	new_active_links = BIT(link_to_keep);
220 	IWL_DEBUG_INFO(mld,
221 		       "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
222 		       iwl_mld_get_emlsr_exit_string(exit), exit,
223 		       vif->active_links, new_active_links);
224 
225 	if (sync)
226 		ret = ieee80211_set_active_links(vif, new_active_links);
227 	else
228 		ieee80211_set_active_links_async(vif, new_active_links);
229 
230 	/* Update latest exit reason and check EMLSR prevention */
231 	iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
232 
233 	/* channel_load_not_by_us is invalid when in EMLSR.
234 	 * Clear it so wrong values won't be used.
235 	 */
236 	ieee80211_iter_chan_contexts_atomic(mld->hw,
237 					    iwl_mld_clear_avg_chan_load_iter,
238 					    NULL);
239 
240 	return ret;
241 }
242 
243 void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
244 			enum iwl_mld_emlsr_exit exit, u8 link_to_keep)
245 {
246 	_iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false);
247 }
248 
249 static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif,
250 				enum iwl_mld_emlsr_blocked reason,
251 				u8 link_to_keep, bool sync)
252 {
253 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
254 
255 	lockdep_assert_wiphy(mld->wiphy);
256 
257 	if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
258 		return 0;
259 
260 	if (mld_vif->emlsr.blocked_reasons & reason)
261 		return 0;
262 
263 	mld_vif->emlsr.blocked_reasons |= reason;
264 
265 	IWL_DEBUG_INFO(mld,
266 		       "Blocking EMLSR mode. reason = %s (0x%x)\n",
267 		       iwl_mld_get_emlsr_blocked_string(reason), reason);
268 	iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
269 
270 	if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
271 		wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
272 					  &mld_vif->emlsr.check_tpt_wk);
273 
274 	return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK,
275 				   link_to_keep, sync);
276 }
277 
278 void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
279 			 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
280 {
281 	_iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false);
282 }
283 
284 int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
285 			     enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
286 {
287 	return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
288 }
289 
290 #define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (10 * HZ)
291 
292 static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac,
293 						     struct ieee80211_vif *vif)
294 {
295 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
296 	int ret;
297 
298 	if (!iwl_mld_vif_has_emlsr_cap(vif))
299 		return;
300 
301 	ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
302 				       IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS,
303 				       iwl_mld_get_primary_link(vif));
304 	if (ret)
305 		return;
306 
307 	wiphy_delayed_work_queue(mld_vif->mld->wiphy,
308 				 &mld_vif->emlsr.tmp_non_bss_done_wk,
309 				 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT);
310 }
311 
312 void iwl_mld_emlsr_block_tmp_non_bss(struct iwl_mld *mld)
313 {
314 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
315 						IEEE80211_IFACE_ITER_NORMAL,
316 						iwl_mld_vif_iter_emlsr_block_tmp_non_bss,
317 						NULL);
318 }
319 
320 static void _iwl_mld_select_links(struct iwl_mld *mld,
321 				  struct ieee80211_vif *vif);
322 
323 void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
324 			   enum iwl_mld_emlsr_blocked reason)
325 {
326 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
327 
328 	lockdep_assert_wiphy(mld->wiphy);
329 
330 	if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
331 		return;
332 
333 	if (!(mld_vif->emlsr.blocked_reasons & reason))
334 		return;
335 
336 	mld_vif->emlsr.blocked_reasons &= ~reason;
337 
338 	IWL_DEBUG_INFO(mld,
339 		       "Unblocking EMLSR mode. reason = %s (0x%x)\n",
340 		       iwl_mld_get_emlsr_blocked_string(reason), reason);
341 	iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
342 
343 	if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
344 		wiphy_delayed_work_queue(mld_vif->mld->wiphy,
345 					 &mld_vif->emlsr.check_tpt_wk,
346 					 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
347 
348 	if (mld_vif->emlsr.blocked_reasons)
349 		return;
350 
351 	IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n");
352 	iwl_mld_int_mlo_scan(mld, vif);
353 }
354 
355 static void
356 iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
357 				  struct ieee80211_vif *vif)
358 {
359 	const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
360 	enum iwl_mvm_fw_esr_recommendation action;
361 	const struct iwl_esr_mode_notif *notif = NULL;
362 
363 	if (iwl_fw_lookup_notif_ver(mld_vif->mld->fw, DATA_PATH_GROUP,
364 				    ESR_MODE_NOTIF, 0) > 1) {
365 		notif = (void *)data;
366 		action = le32_to_cpu(notif->action);
367 	} else {
368 		const struct iwl_esr_mode_notif_v1 *notif_v1 = (void *)data;
369 
370 		action = le32_to_cpu(notif_v1->action);
371 	}
372 
373 	if (!iwl_mld_vif_has_emlsr_cap(vif))
374 		return;
375 
376 	switch (action) {
377 	case ESR_RECOMMEND_LEAVE:
378 		if (notif)
379 			IWL_DEBUG_INFO(mld_vif->mld,
380 				       "FW recommend leave reason = 0x%x\n",
381 				       le32_to_cpu(notif->leave_reason_mask));
382 
383 		iwl_mld_exit_emlsr(mld_vif->mld, vif,
384 				   IWL_MLD_EMLSR_EXIT_FW_REQUEST,
385 				   iwl_mld_get_primary_link(vif));
386 		break;
387 	case ESR_FORCE_LEAVE:
388 		if (notif)
389 			IWL_DEBUG_INFO(mld_vif->mld,
390 				       "FW force leave reason = 0x%x\n",
391 				       le32_to_cpu(notif->leave_reason_mask));
392 		fallthrough;
393 	case ESR_RECOMMEND_ENTER:
394 	default:
395 		IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
396 			 action);
397 	}
398 }
399 
400 void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
401 				     struct iwl_rx_packet *pkt)
402 {
403 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
404 						IEEE80211_IFACE_ITER_NORMAL,
405 						iwl_mld_vif_iter_emlsr_mode_notif,
406 						pkt->data);
407 }
408 
409 static void
410 iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac,
411 				  struct ieee80211_vif *vif)
412 {
413 	if (!iwl_mld_vif_has_emlsr_cap(vif))
414 		return;
415 
416 	ieee80211_connection_loss(vif);
417 }
418 
419 void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
420 					   struct iwl_rx_packet *pkt)
421 {
422 	const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data;
423 	u32 fw_link_id = le32_to_cpu(notif->link_id);
424 	struct ieee80211_bss_conf *bss_conf =
425 		iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
426 
427 	IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n",
428 		       le32_to_cpu(notif->activation) ? "enter" : "exit",
429 		       bss_conf ? bss_conf->link_id : -1,
430 		       le32_to_cpu(notif->link_id),
431 		       le32_to_cpu(notif->err_code));
432 
433 	if (IWL_FW_CHECK(mld, !bss_conf,
434 			 "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n",
435 			 le32_to_cpu(notif->activation) ? "" : "de",
436 			 fw_link_id)) {
437 		ieee80211_iterate_active_interfaces_mtx(
438 			mld->hw, IEEE80211_IFACE_ITER_NORMAL,
439 			iwl_mld_vif_iter_disconnect_emlsr, NULL);
440 		return;
441 	}
442 
443 	/* Disconnect if we failed to deactivate a link */
444 	if (!le32_to_cpu(notif->activation)) {
445 		ieee80211_connection_loss(bss_conf->vif);
446 		return;
447 	}
448 
449 	/*
450 	 * We failed to activate the second link, go back to the link specified
451 	 * by the firmware as that is the one that is still valid now.
452 	 */
453 	iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY,
454 			   bss_conf->link_id);
455 }
456 
457 /* Active non-station link tracking */
458 static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
459 					struct ieee80211_vif *vif)
460 {
461 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
462 	int *count = _data;
463 
464 	if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
465 		return;
466 
467 	*count += iwl_mld_count_active_links(mld_vif->mld, vif);
468 }
469 
470 struct iwl_mld_update_emlsr_block_data {
471 	bool block;
472 	int result;
473 };
474 
475 static void
476 iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac,
477 					    struct ieee80211_vif *vif)
478 {
479 	struct iwl_mld_update_emlsr_block_data *data = _data;
480 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
481 	int ret;
482 
483 	if (data->block) {
484 		ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
485 					       IWL_MLD_EMLSR_BLOCKED_NON_BSS,
486 					       iwl_mld_get_primary_link(vif));
487 		if (ret)
488 			data->result = ret;
489 	} else {
490 		iwl_mld_unblock_emlsr(mld_vif->mld, vif,
491 				      IWL_MLD_EMLSR_BLOCKED_NON_BSS);
492 	}
493 }
494 
495 int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
496 				      int pending_link_changes)
497 {
498 	/* An active link of a non-station vif blocks EMLSR. Upon activation
499 	 * block EMLSR on the bss vif. Upon deactivation, check if this link
500 	 * was the last non-station link active, and if so unblock the bss vif
501 	 */
502 	struct iwl_mld_update_emlsr_block_data block_data = {};
503 	int count = pending_link_changes;
504 
505 	/* No need to count if we are activating a non-BSS link */
506 	if (count <= 0)
507 		ieee80211_iterate_active_interfaces_mtx(mld->hw,
508 							IEEE80211_IFACE_ITER_NORMAL,
509 							iwl_mld_count_non_bss_links,
510 							&count);
511 
512 	/*
513 	 * We could skip updating it if the block change did not change (and
514 	 * pending_link_changes is non-zero).
515 	 */
516 	block_data.block = !!count;
517 
518 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
519 						IEEE80211_IFACE_ITER_NORMAL,
520 						iwl_mld_vif_iter_update_emlsr_non_bss_block,
521 						&block_data);
522 
523 	return block_data.result;
524 }
525 
526 #define EMLSR_SEC_LINK_MIN_PERC 10
527 #define EMLSR_MIN_TX 3000
528 #define EMLSR_MIN_RX 400
529 
530 void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
531 {
532 	struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
533 						   emlsr.check_tpt_wk.work);
534 	struct ieee80211_vif *vif =
535 		container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
536 	struct iwl_mld *mld = mld_vif->mld;
537 	struct iwl_mld_sta *mld_sta;
538 	struct iwl_mld_link *sec_link;
539 	unsigned long total_tx = 0, total_rx = 0;
540 	unsigned long sec_link_tx = 0, sec_link_rx = 0;
541 	u8 sec_link_tx_perc, sec_link_rx_perc;
542 	s8 sec_link_id;
543 
544 	if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta)
545 		return;
546 
547 	mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
548 
549 	/* We only count for the AP sta in a MLO connection */
550 	if (!mld_sta->mpdu_counters)
551 		return;
552 
553 	/* This wk should only run when the TPT blocker isn't set.
554 	 * When the blocker is set, the decision to remove it, as well as
555 	 * clearing the counters is done in DP (to avoid having a wk every
556 	 * 5 seconds when idle. When the blocker is unset, we are not idle anyway)
557 	 */
558 	if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
559 		return;
560 	/*
561 	 * TPT is unblocked, need to check if the TPT criteria is still met.
562 	 *
563 	 * If EMLSR is active for at least 5 seconds, then we also
564 	 * need to check the secondary link requirements.
565 	 */
566 	if (iwl_mld_emlsr_active(vif) &&
567 	    time_is_before_jiffies(mld_vif->emlsr.last_entry_ts +
568 				   IWL_MLD_TPT_COUNT_WINDOW)) {
569 		sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
570 		sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
571 		if (WARN_ON_ONCE(!sec_link))
572 			return;
573 		/* We need the FW ID here */
574 		sec_link_id = sec_link->fw_id;
575 	} else {
576 		sec_link_id = -1;
577 	}
578 
579 	/* Sum up RX and TX MPDUs from the different queues/links */
580 	for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
581 		struct iwl_mld_per_q_mpdu_counter *queue_counter =
582 			&mld_sta->mpdu_counters[q];
583 
584 		spin_lock_bh(&queue_counter->lock);
585 
586 		/* The link IDs that doesn't exist will contain 0 */
587 		for (int link = 0;
588 		     link < ARRAY_SIZE(queue_counter->per_link);
589 		     link++) {
590 			total_tx += queue_counter->per_link[link].tx;
591 			total_rx += queue_counter->per_link[link].rx;
592 		}
593 
594 		if (sec_link_id != -1) {
595 			sec_link_tx += queue_counter->per_link[sec_link_id].tx;
596 			sec_link_rx += queue_counter->per_link[sec_link_id].rx;
597 		}
598 
599 		memset(queue_counter->per_link, 0,
600 		       sizeof(queue_counter->per_link));
601 
602 		spin_unlock_bh(&queue_counter->lock);
603 	}
604 
605 	IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
606 		       total_tx, total_rx);
607 
608 	/* If we don't have enough MPDUs - exit EMLSR */
609 	if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH &&
610 	    total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) {
611 		iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT,
612 				    iwl_mld_get_primary_link(vif));
613 		return;
614 	}
615 
616 	/* EMLSR is not active */
617 	if (sec_link_id == -1)
618 		return;
619 
620 	IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
621 		       sec_link_id, sec_link_tx, sec_link_rx);
622 
623 	/* Calculate the percentage of the secondary link TX/RX */
624 	sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
625 	sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
626 
627 	/*
628 	 * The TX/RX percentage is checked only if it exceeds the required
629 	 * minimum. In addition, RX is checked only if the TX check failed.
630 	 */
631 	if ((total_tx > EMLSR_MIN_TX &&
632 	     sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) ||
633 	    (total_rx > EMLSR_MIN_RX &&
634 	     sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) {
635 		iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE,
636 				   iwl_mld_get_primary_link(vif));
637 		return;
638 	}
639 
640 	/* Check again when the next window ends  */
641 	wiphy_delayed_work_queue(mld_vif->mld->wiphy,
642 				 &mld_vif->emlsr.check_tpt_wk,
643 				 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
644 }
645 
646 void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk)
647 {
648 	struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
649 						   emlsr.unblock_tpt_wk);
650 	struct ieee80211_vif *vif =
651 		container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
652 
653 	iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT);
654 }
655 
656 /*
657  * Link selection
658  */
659 
660 s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
661 				 const struct cfg80211_chan_def *chandef,
662 				 bool low)
663 {
664 	if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
665 		    chandef->chan->band != NL80211_BAND_5GHZ &&
666 		    chandef->chan->band != NL80211_BAND_6GHZ))
667 		return S8_MAX;
668 
669 #define RSSI_THRESHOLD(_low, _bw)			\
670 	(_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ	\
671 	       : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ
672 
673 	switch (chandef->width) {
674 	case NL80211_CHAN_WIDTH_20_NOHT:
675 	case NL80211_CHAN_WIDTH_20:
676 	/* 320 MHz has the same thresholds as 20 MHz */
677 	case NL80211_CHAN_WIDTH_320:
678 		return RSSI_THRESHOLD(low, 20);
679 	case NL80211_CHAN_WIDTH_40:
680 		return RSSI_THRESHOLD(low, 40);
681 	case NL80211_CHAN_WIDTH_80:
682 		return RSSI_THRESHOLD(low, 80);
683 	case NL80211_CHAN_WIDTH_160:
684 		return RSSI_THRESHOLD(low, 160);
685 	default:
686 		WARN_ON(1);
687 		return S8_MAX;
688 	}
689 #undef RSSI_THRESHOLD
690 }
691 
692 #define IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH	-69
693 #define IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH	-63
694 #define IWL_MLD_BT_COEX_WIFI_LOSS_THRESH		7
695 
696 VISIBLE_IF_IWLWIFI_KUNIT
697 bool
698 iwl_mld_bt_allows_emlsr(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
699 			bool check_entry)
700 {
701 	int bt_penalty, rssi_thresh;
702 	s32 link_rssi;
703 
704 	if (WARN_ON_ONCE(!link->bss))
705 		return false;
706 
707 	link_rssi = MBM_TO_DBM(link->bss->signal);
708 	rssi_thresh = check_entry ?
709 		      IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH :
710 		      IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH;
711 	/* No valid RSSI - force to take low rssi */
712 	if (!link_rssi)
713 		link_rssi = rssi_thresh - 1;
714 
715 	if (link_rssi > rssi_thresh)
716 		bt_penalty = max(mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][0],
717 				 mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][1]);
718 	else
719 		bt_penalty = max(mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][0],
720 				 mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][1]);
721 
722 	IWL_DEBUG_EHT(mld, "BT penalty for link-id %0X is %d\n",
723 		      link->link_id, bt_penalty);
724 	return bt_penalty < IWL_MLD_BT_COEX_WIFI_LOSS_THRESH;
725 }
726 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_bt_allows_emlsr);
727 
728 static u32
729 iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
730 				   struct ieee80211_vif *vif,
731 				   struct iwl_mld_link_sel_data *link,
732 				   bool primary)
733 {
734 	struct wiphy *wiphy = mld->wiphy;
735 	struct ieee80211_bss_conf *conf;
736 	u32 ret = 0;
737 
738 	conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
739 	if (WARN_ON_ONCE(!conf))
740 		return IWL_MLD_EMLSR_EXIT_INVALID;
741 
742 	if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
743 	    !iwl_mld_bt_allows_emlsr(mld, conf, true))
744 		ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
745 
746 	if (link->signal <
747 	    iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false))
748 		ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI;
749 
750 	if (conf->csa_active)
751 		ret |= IWL_MLD_EMLSR_EXIT_CSA;
752 
753 	if (ret) {
754 		IWL_DEBUG_INFO(mld,
755 			       "Link %d is not allowed for EMLSR as %s\n",
756 			       link->link_id,
757 			       primary ? "primary" : "secondary");
758 		iwl_mld_print_emlsr_exit(mld, ret);
759 	}
760 
761 	return ret;
762 }
763 
764 static u8
765 iwl_mld_set_link_sel_data(struct iwl_mld *mld,
766 			  struct ieee80211_vif *vif,
767 			  struct iwl_mld_link_sel_data *data,
768 			  unsigned long usable_links,
769 			  u8 *best_link_idx)
770 {
771 	u8 n_data = 0;
772 	u16 max_grade = 0;
773 	unsigned long link_id;
774 
775 	/*
776 	 * TODO: don't select links that weren't discovered in the last scan
777 	 * This requires mac80211 (or cfg80211) changes to forward/track when
778 	 * a BSS was last updated. cfg80211 already tracks this information but
779 	 * it is not exposed within the kernel.
780 	 */
781 	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
782 		struct ieee80211_bss_conf *link_conf =
783 			link_conf_dereference_protected(vif, link_id);
784 
785 		if (WARN_ON_ONCE(!link_conf))
786 			continue;
787 
788 		/* Ignore any BSS that was not seen in the last MLO scan */
789 		if (ktime_before(link_conf->bss->ts_boottime,
790 				 mld->scan.last_mlo_scan_time))
791 			continue;
792 
793 		data[n_data].link_id = link_id;
794 		data[n_data].chandef = &link_conf->chanreq.oper;
795 		data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal);
796 		data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf);
797 
798 		if (n_data == 0 || data[n_data].grade > max_grade) {
799 			max_grade = data[n_data].grade;
800 			*best_link_idx = n_data;
801 		}
802 		n_data++;
803 	}
804 
805 	return n_data;
806 }
807 
808 static u32
809 iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx)
810 {
811 	const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx);
812 
813 	switch (phy->chandef.width) {
814 	case NL80211_CHAN_WIDTH_320:
815 	case NL80211_CHAN_WIDTH_160:
816 		return 5;
817 	case NL80211_CHAN_WIDTH_80:
818 		return 7;
819 	default:
820 		break;
821 	}
822 	return 10;
823 }
824 
825 static bool
826 iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
827 				  struct ieee80211_vif *vif,
828 				  const struct iwl_mld_link_sel_data *a,
829 				  const struct iwl_mld_link_sel_data *b)
830 {
831 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
832 	struct iwl_mld_link *link_a =
833 		iwl_mld_link_dereference_check(mld_vif, a->link_id);
834 	struct ieee80211_chanctx_conf *chanctx_a = NULL;
835 	u32 bw_a, bw_b, ratio;
836 	u32 primary_load_perc;
837 
838 	if (!link_a || !link_a->active) {
839 		IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n");
840 		return false;
841 	}
842 
843 	chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx);
844 
845 	if (WARN_ON(!chanctx_a))
846 		return false;
847 
848 	primary_load_perc =
849 		iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us;
850 
851 	IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc);
852 
853 	if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) {
854 		IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n");
855 		return false;
856 	}
857 
858 	if (iwl_mld_vif_low_latency(mld_vif)) {
859 		IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n");
860 		return true;
861 	}
862 
863 	if (a->chandef->width <= b->chandef->width)
864 		return true;
865 
866 	bw_a = cfg80211_chandef_get_width(a->chandef);
867 	bw_b = cfg80211_chandef_get_width(b->chandef);
868 	ratio = bw_a / bw_b;
869 
870 	switch (ratio) {
871 	case 2:
872 		return primary_load_perc > 25;
873 	case 4:
874 		return primary_load_perc > 40;
875 	case 8:
876 	case 16:
877 		return primary_load_perc > 50;
878 	}
879 
880 	return false;
881 }
882 
883 VISIBLE_IF_IWLWIFI_KUNIT u32
884 iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
885 			 struct iwl_mld_link_sel_data *a,
886 			 struct iwl_mld_link_sel_data *b)
887 {
888 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
889 	struct iwl_mld *mld = mld_vif->mld;
890 	u32 reason_mask = 0;
891 
892 	/* Per-link considerations */
893 	reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true);
894 	if (reason_mask)
895 		return reason_mask;
896 
897 	reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false);
898 	if (reason_mask)
899 		return reason_mask;
900 
901 	if (a->chandef->chan->band == b->chandef->chan->band) {
902 		const struct cfg80211_chan_def *c_low = a->chandef;
903 		const struct cfg80211_chan_def *c_high = b->chandef;
904 		u32 c_low_upper_edge, c_high_lower_edge;
905 
906 		if (c_low->chan->center_freq > c_high->chan->center_freq)
907 			swap(c_low, c_high);
908 
909 		c_low_upper_edge = c_low->chan->center_freq +
910 				   cfg80211_chandef_get_width(c_low) / 2;
911 		c_high_lower_edge = c_high->chan->center_freq -
912 				    cfg80211_chandef_get_width(c_high) / 2;
913 
914 		if (a->chandef->chan->band == NL80211_BAND_5GHZ &&
915 		    c_low_upper_edge <= 5330 && c_high_lower_edge >= 5490) {
916 			/* This case is fine - HW/FW can deal with it, there's
917 			 * enough separation between the two channels.
918 			 */
919 		} else {
920 			reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
921 		}
922 	}
923 	if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b))
924 		reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD;
925 
926 	if (reason_mask) {
927 		IWL_DEBUG_INFO(mld,
928 			       "Links %d and %d are not a valid pair for EMLSR\n",
929 			       a->link_id, b->link_id);
930 		IWL_DEBUG_INFO(mld,
931 			       "Links bandwidth are: %d and %d\n",
932 			       nl80211_chan_width_to_mhz(a->chandef->width),
933 			       nl80211_chan_width_to_mhz(b->chandef->width));
934 		iwl_mld_print_emlsr_exit(mld, reason_mask);
935 	}
936 
937 	return reason_mask;
938 }
939 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_emlsr_pair_state);
940 
941 /* Calculation is done with fixed-point with a scaling factor of 1/256 */
942 #define SCALE_FACTOR 256
943 
944 /*
945  * Returns the combined grade of two given links.
946  * Returns 0 if EMLSR is not allowed with these 2 links.
947  */
948 static
949 unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
950 				     struct ieee80211_vif *vif,
951 				     struct iwl_mld_link_sel_data *a,
952 				     struct iwl_mld_link_sel_data *b,
953 				     u8 *primary_id)
954 {
955 	struct ieee80211_bss_conf *primary_conf;
956 	struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
957 	unsigned int primary_load;
958 
959 	lockdep_assert_wiphy(wiphy);
960 
961 	/* a is always primary, b is always secondary */
962 	if (b->grade > a->grade)
963 		swap(a, b);
964 
965 	*primary_id = a->link_id;
966 
967 	if (iwl_mld_emlsr_pair_state(vif, a, b))
968 		return 0;
969 
970 	primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
971 
972 	if (WARN_ON_ONCE(!primary_conf))
973 		return 0;
974 
975 	primary_load = iwl_mld_get_chan_load(mld, primary_conf);
976 
977 	/* The more the primary link is loaded, the more worthwhile EMLSR becomes */
978 	return a->grade + ((b->grade * primary_load) / SCALE_FACTOR);
979 }
980 
981 static void _iwl_mld_select_links(struct iwl_mld *mld,
982 				  struct ieee80211_vif *vif)
983 {
984 	struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
985 	struct iwl_mld_link_sel_data *best_link;
986 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
987 	int max_active_links = iwl_mld_max_active_links(mld, vif);
988 	u16 new_active, usable_links = ieee80211_vif_usable_links(vif);
989 	u8 best_idx, new_primary, n_data;
990 	u16 max_grade;
991 
992 	lockdep_assert_wiphy(mld->wiphy);
993 
994 	if (!mld_vif->authorized || hweight16(usable_links) <= 1)
995 		return;
996 
997 	if (WARN(ktime_before(mld->scan.last_mlo_scan_time,
998 			      ktime_sub_ns(ktime_get_boottime_ns(),
999 					   5ULL * NSEC_PER_SEC)),
1000 		"Last MLO scan was too long ago, can't select links\n"))
1001 		return;
1002 
1003 	/* The logic below is simple and not suited for more than 2 links */
1004 	WARN_ON_ONCE(max_active_links > 2);
1005 
1006 	n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
1007 					   &best_idx);
1008 
1009 	if (!n_data) {
1010 		IWL_DEBUG_EHT(mld,
1011 			      "Couldn't find a valid grade for any link!\n");
1012 		return;
1013 	}
1014 
1015 	/* Default to selecting the single best link */
1016 	best_link = &data[best_idx];
1017 	new_primary = best_link->link_id;
1018 	new_active = BIT(best_link->link_id);
1019 	max_grade = best_link->grade;
1020 
1021 	/* If EMLSR is not possible, activate the best link */
1022 	if (max_active_links == 1 || n_data == 1 ||
1023 	    !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE ||
1024 	    mld_vif->emlsr.blocked_reasons)
1025 		goto set_active;
1026 
1027 	/* Try to find the best link combination */
1028 	for (u8 a = 0; a < n_data; a++) {
1029 		for (u8 b = a + 1; b < n_data; b++) {
1030 			u8 best_in_pair;
1031 			u16 emlsr_grade =
1032 				iwl_mld_get_emlsr_grade(mld, vif,
1033 							&data[a], &data[b],
1034 							&best_in_pair);
1035 
1036 			/*
1037 			 * Prefer (new) EMLSR combination to prefer EMLSR over
1038 			 * a single link.
1039 			 */
1040 			if (emlsr_grade < max_grade)
1041 				continue;
1042 
1043 			max_grade = emlsr_grade;
1044 			new_primary = best_in_pair;
1045 			new_active = BIT(data[a].link_id) |
1046 				     BIT(data[b].link_id);
1047 		}
1048 	}
1049 
1050 set_active:
1051 	IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n",
1052 		       new_active, new_primary);
1053 
1054 	mld_vif->emlsr.selected_primary = new_primary;
1055 	mld_vif->emlsr.selected_links = new_active;
1056 
1057 	ieee80211_set_active_links_async(vif, new_active);
1058 }
1059 
1060 static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac,
1061 					  struct ieee80211_vif *vif)
1062 {
1063 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1064 	struct iwl_mld *mld = mld_vif->mld;
1065 
1066 	_iwl_mld_select_links(mld, vif);
1067 }
1068 
1069 void iwl_mld_select_links(struct iwl_mld *mld)
1070 {
1071 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
1072 						IEEE80211_IFACE_ITER_NORMAL,
1073 						iwl_mld_vif_iter_select_links,
1074 						NULL);
1075 }
1076 
1077 static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
1078 					struct ieee80211_vif *vif)
1079 {
1080 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1081 	const struct iwl_bt_coex_profile_notif zero_notif = {};
1082 	struct iwl_mld *mld = mld_vif->mld;
1083 	struct ieee80211_bss_conf *link;
1084 	unsigned int link_id;
1085 	const struct iwl_bt_coex_profile_notif *notif = &mld->last_bt_notif;
1086 
1087 	if (!iwl_mld_vif_has_emlsr_cap(vif))
1088 		return;
1089 
1090 	/* zeroed structure means that BT is OFF */
1091 	if (!memcmp(notif, &zero_notif, sizeof(*notif))) {
1092 		iwl_mld_retry_emlsr(mld, vif);
1093 		return;
1094 	}
1095 
1096 	for_each_vif_active_link(vif, link, link_id) {
1097 		bool emlsr_active, emlsr_allowed;
1098 
1099 		if (WARN_ON(!link->chanreq.oper.chan))
1100 			continue;
1101 
1102 		if (link->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
1103 			continue;
1104 
1105 		emlsr_active = iwl_mld_emlsr_active(vif);
1106 		emlsr_allowed = iwl_mld_bt_allows_emlsr(mld, link,
1107 							!emlsr_active);
1108 		if (emlsr_allowed && !emlsr_active) {
1109 			iwl_mld_retry_emlsr(mld, vif);
1110 			return;
1111 		}
1112 
1113 		if (!emlsr_allowed && emlsr_active) {
1114 			iwl_mld_exit_emlsr(mld, vif,
1115 					   IWL_MLD_EMLSR_EXIT_BT_COEX,
1116 					   iwl_mld_get_primary_link(vif));
1117 			return;
1118 		}
1119 	}
1120 }
1121 
1122 void iwl_mld_emlsr_check_bt(struct iwl_mld *mld)
1123 {
1124 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
1125 						IEEE80211_IFACE_ITER_NORMAL,
1126 						iwl_mld_emlsr_check_bt_iter,
1127 						NULL);
1128 }
1129 
1130 struct iwl_mld_chan_load_data {
1131 	struct iwl_mld_phy *phy;
1132 	u32 prev_chan_load_not_by_us;
1133 };
1134 
1135 static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac,
1136 					  struct ieee80211_vif *vif)
1137 {
1138 	struct iwl_mld_chan_load_data *data = _data;
1139 	const struct iwl_mld_phy *phy = data->phy;
1140 	struct ieee80211_chanctx_conf *chanctx =
1141 		container_of((const void *)phy, struct ieee80211_chanctx_conf,
1142 			     drv_priv);
1143 	struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
1144 	struct ieee80211_bss_conf *prim_link;
1145 	unsigned int prim_link_id;
1146 
1147 	prim_link_id = iwl_mld_get_primary_link(vif);
1148 	prim_link = link_conf_dereference_protected(vif, prim_link_id);
1149 
1150 	if (WARN_ON(!prim_link))
1151 		return;
1152 
1153 	if (chanctx != rcu_access_pointer(prim_link->chanctx_conf))
1154 		return;
1155 
1156 	if (iwl_mld_emlsr_active(vif)) {
1157 		int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link,
1158 								true);
1159 
1160 		if (chan_load < 0)
1161 			return;
1162 
1163 		/* chan_load is in range [0,255] */
1164 		if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD))
1165 			iwl_mld_exit_emlsr(mld, vif,
1166 					   IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
1167 					   prim_link_id);
1168 	} else {
1169 		u32 old_chan_load = data->prev_chan_load_not_by_us;
1170 		u32 new_chan_load = phy->avg_channel_load_not_by_us;
1171 		u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx);
1172 
1173 #define THRESHOLD_CROSSED(threshold) \
1174 	(old_chan_load <= (threshold) && new_chan_load > (threshold))
1175 
1176 		if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) ||
1177 		    THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50))
1178 			iwl_mld_retry_emlsr(mld, vif);
1179 #undef THRESHOLD_CROSSED
1180 	}
1181 }
1182 
1183 void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
1184 				   struct iwl_mld_phy *phy,
1185 				   u32 prev_chan_load_not_by_us)
1186 {
1187 	struct iwl_mld_chan_load_data data = {
1188 		.phy = phy,
1189 		.prev_chan_load_not_by_us = prev_chan_load_not_by_us,
1190 	};
1191 
1192 	ieee80211_iterate_active_interfaces_mtx(hw,
1193 						IEEE80211_IFACE_ITER_NORMAL,
1194 						iwl_mld_chan_load_update_iter,
1195 						&data);
1196 }
1197 
1198 void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
1199 {
1200 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1201 
1202 	if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif) ||
1203 	    iwl_mld_emlsr_active(vif) || mld_vif->emlsr.blocked_reasons)
1204 		return;
1205 
1206 	iwl_mld_int_mlo_scan(mld, vif);
1207 }
1208 
1209 static void iwl_mld_ignore_tpt_iter(void *data, u8 *mac,
1210 				    struct ieee80211_vif *vif)
1211 {
1212 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1213 	struct iwl_mld *mld = mld_vif->mld;
1214 	struct iwl_mld_sta *mld_sta;
1215 	bool *start = (void *)data;
1216 
1217 	/* check_tpt_wk is only used when TPT block isn't set */
1218 	if (mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT ||
1219 	    !IWL_MLD_AUTO_EML_ENABLE || !mld_vif->ap_sta)
1220 		return;
1221 
1222 	mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
1223 
1224 	/* We only count for the AP sta in a MLO connection */
1225 	if (!mld_sta->mpdu_counters)
1226 		return;
1227 
1228 	if (*start) {
1229 		wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
1230 					  &mld_vif->emlsr.check_tpt_wk);
1231 		IWL_DEBUG_EHT(mld, "TPT check disabled\n");
1232 		return;
1233 	}
1234 
1235 	/* Clear the counters so we start from the beginning */
1236 	for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
1237 		struct iwl_mld_per_q_mpdu_counter *queue_counter =
1238 			&mld_sta->mpdu_counters[q];
1239 
1240 		spin_lock_bh(&queue_counter->lock);
1241 
1242 		memset(queue_counter->per_link, 0,
1243 		       sizeof(queue_counter->per_link));
1244 
1245 		spin_unlock_bh(&queue_counter->lock);
1246 	}
1247 
1248 	/* Schedule the check in 5 seconds */
1249 	wiphy_delayed_work_queue(mld_vif->mld->wiphy,
1250 				 &mld_vif->emlsr.check_tpt_wk,
1251 				 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
1252 	IWL_DEBUG_EHT(mld, "TPT check enabled\n");
1253 }
1254 
1255 void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld)
1256 {
1257 	bool start = true;
1258 
1259 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
1260 						IEEE80211_IFACE_ITER_NORMAL,
1261 						iwl_mld_ignore_tpt_iter,
1262 						&start);
1263 }
1264 
1265 void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld)
1266 {
1267 	bool start = false;
1268 
1269 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
1270 						IEEE80211_IFACE_ITER_NORMAL,
1271 						iwl_mld_ignore_tpt_iter,
1272 						&start);
1273 }
1274