xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c (revision 4f9786035f9e519db41375818e1d0b5f20da2f10)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024-2025 Intel Corporation
4  */
5 #include "mld.h"
6 #include "iface.h"
7 #include "low_latency.h"
8 #include "hcmd.h"
9 #include "power.h"
10 #include "mlo.h"
11 
12 #define MLD_LL_WK_INTERVAL_MSEC 500
13 #define MLD_LL_PERIOD (HZ * MLD_LL_WK_INTERVAL_MSEC / 1000)
14 #define MLD_LL_ACTIVE_WK_PERIOD (HZ * 10)
15 
16 /* packets/MLD_LL_WK_PERIOD seconds */
17 #define MLD_LL_ENABLE_THRESH 100
18 
19 static bool iwl_mld_calc_low_latency(struct iwl_mld *mld,
20 				     unsigned long timestamp)
21 {
22 	struct iwl_mld_low_latency *ll = &mld->low_latency;
23 	bool global_low_latency = false;
24 	u8 num_rx_q = mld->trans->num_rx_queues;
25 
26 	for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) {
27 		u32 total_vo_vi_pkts = 0;
28 		bool ll_period_expired;
29 
30 		/* If it's not initialized yet, it means we have not yet
31 		 * received/transmitted any vo/vi packet on this MAC.
32 		 */
33 		if (!ll->window_start[mac_id])
34 			continue;
35 
36 		ll_period_expired =
37 			time_after(timestamp, ll->window_start[mac_id] +
38 				   MLD_LL_ACTIVE_WK_PERIOD);
39 
40 		if (ll_period_expired)
41 			ll->window_start[mac_id] = timestamp;
42 
43 		for (int q = 0; q < num_rx_q; q++) {
44 			struct iwl_mld_low_latency_packets_counters *counters =
45 				&mld->low_latency.pkts_counters[q];
46 
47 			spin_lock_bh(&counters->lock);
48 
49 			total_vo_vi_pkts += counters->vo_vi[mac_id];
50 
51 			if (ll_period_expired)
52 				counters->vo_vi[mac_id] = 0;
53 
54 			spin_unlock_bh(&counters->lock);
55 		}
56 
57 		/* enable immediately with enough packets but defer
58 		 * disabling only if the low-latency period expired and
59 		 * below threshold.
60 		 */
61 		if (total_vo_vi_pkts > MLD_LL_ENABLE_THRESH)
62 			mld->low_latency.result[mac_id] = true;
63 		else if (ll_period_expired)
64 			mld->low_latency.result[mac_id] = false;
65 
66 		global_low_latency |= mld->low_latency.result[mac_id];
67 	}
68 
69 	return global_low_latency;
70 }
71 
72 static void iwl_mld_low_latency_iter(void *_data, u8 *mac,
73 				     struct ieee80211_vif *vif)
74 {
75 	struct iwl_mld *mld = _data;
76 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
77 	bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC;
78 	bool low_latency;
79 
80 	if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result)))
81 		return;
82 
83 	low_latency = mld->low_latency.result[mld_vif->fw_id];
84 
85 	if (prev != low_latency)
86 		iwl_mld_vif_update_low_latency(mld, vif, low_latency,
87 					       LOW_LATENCY_TRAFFIC);
88 }
89 
90 static void iwl_mld_low_latency_wk(struct wiphy *wiphy, struct wiphy_work *wk)
91 {
92 	struct iwl_mld *mld = container_of(wk, struct iwl_mld,
93 					   low_latency.work.work);
94 	unsigned long timestamp = jiffies;
95 	bool low_latency_active;
96 
97 	if (mld->fw_status.in_hw_restart)
98 		return;
99 
100 	/* It is assumed that the work was scheduled only after checking
101 	 * at least MLD_LL_PERIOD has passed since the last update.
102 	 */
103 
104 	low_latency_active = iwl_mld_calc_low_latency(mld, timestamp);
105 
106 	/* Update the timestamp now after the low-latency calculation */
107 	mld->low_latency.timestamp = timestamp;
108 
109 	/* If low-latency is active we need to force re-evaluation after
110 	 * 10 seconds, so that we can disable low-latency when
111 	 * the low-latency traffic ends.
112 	 *
113 	 * Otherwise, we don't need to run the work because there is nothing to
114 	 * disable.
115 	 *
116 	 * Note that this has no impact on the regular scheduling of the
117 	 * updates triggered by traffic - those happen whenever the
118 	 * MLD_LL_PERIOD timeout expire.
119 	 */
120 	if (low_latency_active)
121 		wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
122 					 MLD_LL_ACTIVE_WK_PERIOD);
123 
124 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
125 						IEEE80211_IFACE_ITER_NORMAL,
126 						iwl_mld_low_latency_iter, mld);
127 }
128 
129 int iwl_mld_low_latency_init(struct iwl_mld *mld)
130 {
131 	struct iwl_mld_low_latency *ll = &mld->low_latency;
132 	unsigned long ts = jiffies;
133 
134 	ll->pkts_counters = kcalloc(mld->trans->num_rx_queues,
135 				    sizeof(*ll->pkts_counters), GFP_KERNEL);
136 	if (!ll->pkts_counters)
137 		return -ENOMEM;
138 
139 	for (int q = 0; q < mld->trans->num_rx_queues; q++)
140 		spin_lock_init(&ll->pkts_counters[q].lock);
141 
142 	wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
143 
144 	ll->timestamp = ts;
145 
146 	/* The low-latency window_start will be initialized per-MAC on
147 	 * the first vo/vi packet received/transmitted.
148 	 */
149 
150 	return 0;
151 }
152 
153 void iwl_mld_low_latency_free(struct iwl_mld *mld)
154 {
155 	struct iwl_mld_low_latency *ll = &mld->low_latency;
156 
157 	kfree(ll->pkts_counters);
158 	ll->pkts_counters = NULL;
159 }
160 
161 void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld)
162 {
163 	struct iwl_mld_low_latency *ll = &mld->low_latency;
164 
165 	ll->timestamp = jiffies;
166 
167 	memset(ll->window_start, 0, sizeof(ll->window_start));
168 	memset(ll->result, 0, sizeof(ll->result));
169 
170 	for (int q = 0; q < mld->trans->num_rx_queues; q++)
171 		memset(ll->pkts_counters[q].vo_vi, 0,
172 		       sizeof(ll->pkts_counters[q].vo_vi));
173 }
174 
175 static int iwl_mld_send_low_latency_cmd(struct iwl_mld *mld, bool low_latency,
176 					u16 mac_id)
177 {
178 	struct iwl_mac_low_latency_cmd cmd = {
179 		.mac_id = cpu_to_le32(mac_id)
180 	};
181 	u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD);
182 	int ret;
183 
184 	if (low_latency) {
185 		/* Currently we don't care about the direction */
186 		cmd.low_latency_rx = 1;
187 		cmd.low_latency_tx = 1;
188 	}
189 
190 	ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
191 	if (ret)
192 		IWL_ERR(mld, "Failed to send low latency command\n");
193 
194 	return ret;
195 }
196 
197 static void iwl_mld_vif_set_low_latency(struct iwl_mld_vif *mld_vif, bool set,
198 					enum iwl_mld_low_latency_cause cause)
199 {
200 	if (set)
201 		mld_vif->low_latency_causes |= cause;
202 	else
203 		mld_vif->low_latency_causes &= ~cause;
204 }
205 
206 void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
207 				    struct ieee80211_vif *vif,
208 				    bool low_latency,
209 				    enum iwl_mld_low_latency_cause cause)
210 {
211 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
212 	bool prev;
213 
214 	prev = iwl_mld_vif_low_latency(mld_vif);
215 	iwl_mld_vif_set_low_latency(mld_vif, low_latency, cause);
216 
217 	low_latency = iwl_mld_vif_low_latency(mld_vif);
218 	if (low_latency == prev)
219 		return;
220 
221 	if (iwl_mld_send_low_latency_cmd(mld, low_latency, mld_vif->fw_id)) {
222 		/* revert to previous low-latency state */
223 		iwl_mld_vif_set_low_latency(mld_vif, prev, cause);
224 		return;
225 	}
226 
227 	if (low_latency)
228 		iwl_mld_leave_omi_bw_reduction(mld);
229 
230 	if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_CLIENT)
231 		return;
232 
233 	iwl_mld_update_mac_power(mld, vif, false);
234 
235 	if (low_latency)
236 		iwl_mld_retry_emlsr(mld, vif);
237 }
238 
239 static bool iwl_mld_is_vo_vi_pkt(struct ieee80211_hdr *hdr)
240 {
241 	u8 tid;
242 	static const u8 tid_to_mac80211_ac[] = {
243 		IEEE80211_AC_BE,
244 		IEEE80211_AC_BK,
245 		IEEE80211_AC_BK,
246 		IEEE80211_AC_BE,
247 		IEEE80211_AC_VI,
248 		IEEE80211_AC_VI,
249 		IEEE80211_AC_VO,
250 		IEEE80211_AC_VO,
251 	};
252 
253 	if (!hdr || !ieee80211_is_data_qos(hdr->frame_control))
254 		return false;
255 
256 	tid = ieee80211_get_tid(hdr);
257 	if (tid >= IWL_MAX_TID_COUNT)
258 		return false;
259 
260 	return tid_to_mac80211_ac[tid] < IEEE80211_AC_VI;
261 }
262 
263 void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
264 					 struct ieee80211_hdr *hdr,
265 					 struct ieee80211_sta *sta,
266 					 u8 queue)
267 {
268 	struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
269 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
270 	struct iwl_mld_low_latency_packets_counters *counters;
271 	unsigned long ts = jiffies ? jiffies : 1;
272 	u8 fw_id = mld_vif->fw_id;
273 
274 	/* we should have failed op mode init if NULL */
275 	if (WARN_ON_ONCE(!mld->low_latency.pkts_counters))
276 		return;
277 
278 	if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
279 			 queue >= mld->trans->num_rx_queues))
280 		return;
281 
282 	if (mld->low_latency.stopped)
283 		return;
284 
285 	if (!iwl_mld_is_vo_vi_pkt(hdr))
286 		return;
287 
288 	counters = &mld->low_latency.pkts_counters[queue];
289 
290 	spin_lock_bh(&counters->lock);
291 	counters->vo_vi[fw_id]++;
292 	spin_unlock_bh(&counters->lock);
293 
294 	/* Initialize the window_start on the first vo/vi packet */
295 	if (!mld->low_latency.window_start[fw_id])
296 		mld->low_latency.window_start[fw_id] = ts;
297 
298 	if (time_is_before_jiffies(mld->low_latency.timestamp + MLD_LL_PERIOD))
299 		wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
300 					 0);
301 }
302 
303 void iwl_mld_low_latency_stop(struct iwl_mld *mld)
304 {
305 	lockdep_assert_wiphy(mld->wiphy);
306 
307 	mld->low_latency.stopped = true;
308 
309 	wiphy_delayed_work_cancel(mld->wiphy, &mld->low_latency.work);
310 }
311 
312 void iwl_mld_low_latency_restart(struct iwl_mld *mld)
313 {
314 	struct iwl_mld_low_latency *ll = &mld->low_latency;
315 	bool low_latency = false;
316 	unsigned long ts = jiffies;
317 
318 	lockdep_assert_wiphy(mld->wiphy);
319 
320 	ll->timestamp = ts;
321 	mld->low_latency.stopped = false;
322 
323 	for (int mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
324 		ll->window_start[mac] = 0;
325 		low_latency |= ll->result[mac];
326 
327 		for (int q = 0; q < mld->trans->num_rx_queues; q++) {
328 			spin_lock_bh(&ll->pkts_counters[q].lock);
329 			ll->pkts_counters[q].vo_vi[mac] = 0;
330 			spin_unlock_bh(&ll->pkts_counters[q].lock);
331 		}
332 	}
333 
334 	/* if low latency is active, force re-evaluation to cover the case of
335 	 * no traffic.
336 	 */
337 	if (low_latency)
338 		wiphy_delayed_work_queue(mld->wiphy, &ll->work, MLD_LL_PERIOD);
339 }
340