xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c (revision fd04fbee7f0f8ec986772d41a1e1717f5bcf941c)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024-2025 Intel Corporation
4  */
5 #include "mld.h"
6 #include "iface.h"
7 #include "low_latency.h"
8 #include "hcmd.h"
9 #include "power.h"
10 
11 #define MLD_LL_WK_INTERVAL_MSEC 500
12 #define MLD_LL_PERIOD (HZ * MLD_LL_WK_INTERVAL_MSEC / 1000)
13 #define MLD_LL_ACTIVE_WK_PERIOD (HZ * 10)
14 
15 /* packets/MLD_LL_WK_PERIOD seconds */
16 #define MLD_LL_ENABLE_THRESH 100
17 
18 static bool iwl_mld_calc_low_latency(struct iwl_mld *mld,
19 				     unsigned long timestamp)
20 {
21 	struct iwl_mld_low_latency *ll = &mld->low_latency;
22 	bool global_low_latency = false;
23 	u8 num_rx_q = mld->trans->num_rx_queues;
24 
25 	for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) {
26 		u32 total_vo_vi_pkts = 0;
27 		bool ll_period_expired;
28 
29 		/* If it's not initialized yet, it means we have not yet
30 		 * received/transmitted any vo/vi packet on this MAC.
31 		 */
32 		if (!ll->window_start[mac_id])
33 			continue;
34 
35 		ll_period_expired =
36 			time_after(timestamp, ll->window_start[mac_id] +
37 				   MLD_LL_ACTIVE_WK_PERIOD);
38 
39 		if (ll_period_expired)
40 			ll->window_start[mac_id] = timestamp;
41 
42 		for (int q = 0; q < num_rx_q; q++) {
43 			struct iwl_mld_low_latency_packets_counters *counters =
44 				&mld->low_latency.pkts_counters[q];
45 
46 			spin_lock_bh(&counters->lock);
47 
48 			total_vo_vi_pkts += counters->vo_vi[mac_id];
49 
50 			if (ll_period_expired)
51 				counters->vo_vi[mac_id] = 0;
52 
53 			spin_unlock_bh(&counters->lock);
54 		}
55 
56 		/* enable immediately with enough packets but defer
57 		 * disabling only if the low-latency period expired and
58 		 * below threshold.
59 		 */
60 		if (total_vo_vi_pkts > MLD_LL_ENABLE_THRESH)
61 			mld->low_latency.result[mac_id] = true;
62 		else if (ll_period_expired)
63 			mld->low_latency.result[mac_id] = false;
64 
65 		global_low_latency |= mld->low_latency.result[mac_id];
66 	}
67 
68 	return global_low_latency;
69 }
70 
71 static void iwl_mld_low_latency_iter(void *_data, u8 *mac,
72 				     struct ieee80211_vif *vif)
73 {
74 	struct iwl_mld *mld = _data;
75 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
76 	bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC;
77 	bool low_latency;
78 
79 	if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result)))
80 		return;
81 
82 	low_latency = mld->low_latency.result[mld_vif->fw_id];
83 
84 	if (prev != low_latency)
85 		iwl_mld_vif_update_low_latency(mld, vif, low_latency,
86 					       LOW_LATENCY_TRAFFIC);
87 }
88 
89 static void iwl_mld_low_latency_wk(struct wiphy *wiphy, struct wiphy_work *wk)
90 {
91 	struct iwl_mld *mld = container_of(wk, struct iwl_mld,
92 					   low_latency.work.work);
93 	unsigned long timestamp = jiffies;
94 	bool low_latency_active;
95 
96 	if (mld->fw_status.in_hw_restart)
97 		return;
98 
99 	/* It is assumed that the work was scheduled only after checking
100 	 * at least MLD_LL_PERIOD has passed since the last update.
101 	 */
102 
103 	low_latency_active = iwl_mld_calc_low_latency(mld, timestamp);
104 
105 	/* Update the timestamp now after the low-latency calculation */
106 	mld->low_latency.timestamp = timestamp;
107 
108 	/* If low-latency is active we need to force re-evaluation after
109 	 * 10 seconds, so that we can disable low-latency when
110 	 * the low-latency traffic ends.
111 	 *
112 	 * Otherwise, we don't need to run the work because there is nothing to
113 	 * disable.
114 	 *
115 	 * Note that this has no impact on the regular scheduling of the
116 	 * updates triggered by traffic - those happen whenever the
117 	 * MLD_LL_PERIOD timeout expire.
118 	 */
119 	if (low_latency_active)
120 		wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
121 					 MLD_LL_ACTIVE_WK_PERIOD);
122 
123 	ieee80211_iterate_active_interfaces_mtx(mld->hw,
124 						IEEE80211_IFACE_ITER_NORMAL,
125 						iwl_mld_low_latency_iter, mld);
126 }
127 
128 int iwl_mld_low_latency_init(struct iwl_mld *mld)
129 {
130 	struct iwl_mld_low_latency *ll = &mld->low_latency;
131 	unsigned long ts = jiffies;
132 
133 	ll->pkts_counters = kcalloc(mld->trans->num_rx_queues,
134 				    sizeof(*ll->pkts_counters), GFP_KERNEL);
135 	if (!ll->pkts_counters)
136 		return -ENOMEM;
137 
138 	for (int q = 0; q < mld->trans->num_rx_queues; q++)
139 		spin_lock_init(&ll->pkts_counters[q].lock);
140 
141 	wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
142 
143 	ll->timestamp = ts;
144 
145 	/* The low-latency window_start will be initialized per-MAC on
146 	 * the first vo/vi packet received/transmitted.
147 	 */
148 
149 	return 0;
150 }
151 
152 void iwl_mld_low_latency_free(struct iwl_mld *mld)
153 {
154 	struct iwl_mld_low_latency *ll = &mld->low_latency;
155 
156 	kfree(ll->pkts_counters);
157 	ll->pkts_counters = NULL;
158 }
159 
160 void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld)
161 {
162 	struct iwl_mld_low_latency *ll = &mld->low_latency;
163 
164 	ll->timestamp = jiffies;
165 
166 	memset(ll->window_start, 0, sizeof(ll->window_start));
167 	memset(ll->result, 0, sizeof(ll->result));
168 
169 	for (int q = 0; q < mld->trans->num_rx_queues; q++)
170 		memset(ll->pkts_counters[q].vo_vi, 0,
171 		       sizeof(ll->pkts_counters[q].vo_vi));
172 }
173 
174 static int iwl_mld_send_low_latency_cmd(struct iwl_mld *mld, bool low_latency,
175 					u16 mac_id)
176 {
177 	struct iwl_mac_low_latency_cmd cmd = {
178 		.mac_id = cpu_to_le32(mac_id)
179 	};
180 	u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD);
181 	int ret;
182 
183 	if (low_latency) {
184 		/* Currently we don't care about the direction */
185 		cmd.low_latency_rx = 1;
186 		cmd.low_latency_tx = 1;
187 	}
188 
189 	ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
190 	if (ret)
191 		IWL_ERR(mld, "Failed to send low latency command\n");
192 
193 	return ret;
194 }
195 
196 static void iwl_mld_vif_set_low_latency(struct iwl_mld_vif *mld_vif, bool set,
197 					enum iwl_mld_low_latency_cause cause)
198 {
199 	if (set)
200 		mld_vif->low_latency_causes |= cause;
201 	else
202 		mld_vif->low_latency_causes &= ~cause;
203 }
204 
205 void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
206 				    struct ieee80211_vif *vif,
207 				    bool low_latency,
208 				    enum iwl_mld_low_latency_cause cause)
209 {
210 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
211 	bool prev;
212 
213 	prev = iwl_mld_vif_low_latency(mld_vif);
214 	iwl_mld_vif_set_low_latency(mld_vif, low_latency, cause);
215 
216 	low_latency = iwl_mld_vif_low_latency(mld_vif);
217 	if (low_latency == prev)
218 		return;
219 
220 	if (iwl_mld_send_low_latency_cmd(mld, low_latency, mld_vif->fw_id)) {
221 		/* revert to previous low-latency state */
222 		iwl_mld_vif_set_low_latency(mld_vif, prev, cause);
223 		return;
224 	}
225 
226 	if (low_latency)
227 		iwl_mld_leave_omi_bw_reduction(mld);
228 
229 	if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_CLIENT)
230 		return;
231 
232 	iwl_mld_update_mac_power(mld, vif, false);
233 }
234 
235 static bool iwl_mld_is_vo_vi_pkt(struct ieee80211_hdr *hdr)
236 {
237 	u8 tid;
238 	static const u8 tid_to_mac80211_ac[] = {
239 		IEEE80211_AC_BE,
240 		IEEE80211_AC_BK,
241 		IEEE80211_AC_BK,
242 		IEEE80211_AC_BE,
243 		IEEE80211_AC_VI,
244 		IEEE80211_AC_VI,
245 		IEEE80211_AC_VO,
246 		IEEE80211_AC_VO,
247 	};
248 
249 	if (!hdr || !ieee80211_is_data_qos(hdr->frame_control))
250 		return false;
251 
252 	tid = ieee80211_get_tid(hdr);
253 	if (tid >= IWL_MAX_TID_COUNT)
254 		return false;
255 
256 	return tid_to_mac80211_ac[tid] < IEEE80211_AC_VI;
257 }
258 
259 void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
260 					 struct ieee80211_hdr *hdr,
261 					 struct ieee80211_sta *sta,
262 					 u8 queue)
263 {
264 	struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
265 	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
266 	struct iwl_mld_low_latency_packets_counters *counters;
267 	unsigned long ts = jiffies ? jiffies : 1;
268 	u8 fw_id = mld_vif->fw_id;
269 
270 	/* we should have failed op mode init if NULL */
271 	if (WARN_ON_ONCE(!mld->low_latency.pkts_counters))
272 		return;
273 
274 	if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
275 			 queue >= mld->trans->num_rx_queues))
276 		return;
277 
278 	if (mld->low_latency.stopped)
279 		return;
280 
281 	if (!iwl_mld_is_vo_vi_pkt(hdr))
282 		return;
283 
284 	counters = &mld->low_latency.pkts_counters[queue];
285 
286 	spin_lock_bh(&counters->lock);
287 	counters->vo_vi[fw_id]++;
288 	spin_unlock_bh(&counters->lock);
289 
290 	/* Initialize the window_start on the first vo/vi packet */
291 	if (!mld->low_latency.window_start[fw_id])
292 		mld->low_latency.window_start[fw_id] = ts;
293 
294 	if (time_is_before_jiffies(mld->low_latency.timestamp + MLD_LL_PERIOD))
295 		wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
296 					 0);
297 }
298 
299 void iwl_mld_low_latency_stop(struct iwl_mld *mld)
300 {
301 	lockdep_assert_wiphy(mld->wiphy);
302 
303 	mld->low_latency.stopped = true;
304 
305 	wiphy_delayed_work_cancel(mld->wiphy, &mld->low_latency.work);
306 }
307 
308 void iwl_mld_low_latency_restart(struct iwl_mld *mld)
309 {
310 	struct iwl_mld_low_latency *ll = &mld->low_latency;
311 	bool low_latency = false;
312 	unsigned long ts = jiffies;
313 
314 	lockdep_assert_wiphy(mld->wiphy);
315 
316 	ll->timestamp = ts;
317 	mld->low_latency.stopped = false;
318 
319 	for (int mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
320 		ll->window_start[mac] = 0;
321 		low_latency |= ll->result[mac];
322 
323 		for (int q = 0; q < mld->trans->num_rx_queues; q++) {
324 			spin_lock_bh(&ll->pkts_counters[q].lock);
325 			ll->pkts_counters[q].vo_vi[mac] = 0;
326 			spin_unlock_bh(&ll->pkts_counters[q].lock);
327 		}
328 	}
329 
330 	/* if low latency is active, force re-evaluation to cover the case of
331 	 * no traffic.
332 	 */
333 	if (low_latency)
334 		wiphy_delayed_work_queue(mld->wiphy, &ll->work, MLD_LL_PERIOD);
335 }
336