1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020 Realtek Corporation
3 */
4 #include <linux/ip.h>
5 #include <linux/udp.h>
6
7 #include "cam.h"
8 #include "chan.h"
9 #include "coex.h"
10 #include "core.h"
11 #include "efuse.h"
12 #include "fw.h"
13 #include "mac.h"
14 #include "phy.h"
15 #include "ps.h"
16 #include "reg.h"
17 #include "sar.h"
18 #include "ser.h"
19 #include "txrx.h"
20 #include "util.h"
21 #include "wow.h"
22
23 static bool rtw89_disable_ps_mode;
24 module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644);
25 MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode");
26
27 #define RTW89_DEF_CHAN(_freq, _hw_val, _flags, _band) \
28 { .center_freq = _freq, .hw_value = _hw_val, .flags = _flags, .band = _band, }
29 #define RTW89_DEF_CHAN_2G(_freq, _hw_val) \
30 RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_2GHZ)
31 #define RTW89_DEF_CHAN_5G(_freq, _hw_val) \
32 RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_5GHZ)
33 #define RTW89_DEF_CHAN_5G_NO_HT40MINUS(_freq, _hw_val) \
34 RTW89_DEF_CHAN(_freq, _hw_val, IEEE80211_CHAN_NO_HT40MINUS, NL80211_BAND_5GHZ)
35 #define RTW89_DEF_CHAN_6G(_freq, _hw_val) \
36 RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_6GHZ)
37
38 static struct ieee80211_channel rtw89_channels_2ghz[] = {
39 RTW89_DEF_CHAN_2G(2412, 1),
40 RTW89_DEF_CHAN_2G(2417, 2),
41 RTW89_DEF_CHAN_2G(2422, 3),
42 RTW89_DEF_CHAN_2G(2427, 4),
43 RTW89_DEF_CHAN_2G(2432, 5),
44 RTW89_DEF_CHAN_2G(2437, 6),
45 RTW89_DEF_CHAN_2G(2442, 7),
46 RTW89_DEF_CHAN_2G(2447, 8),
47 RTW89_DEF_CHAN_2G(2452, 9),
48 RTW89_DEF_CHAN_2G(2457, 10),
49 RTW89_DEF_CHAN_2G(2462, 11),
50 RTW89_DEF_CHAN_2G(2467, 12),
51 RTW89_DEF_CHAN_2G(2472, 13),
52 RTW89_DEF_CHAN_2G(2484, 14),
53 };
54
55 static struct ieee80211_channel rtw89_channels_5ghz[] = {
56 RTW89_DEF_CHAN_5G(5180, 36),
57 RTW89_DEF_CHAN_5G(5200, 40),
58 RTW89_DEF_CHAN_5G(5220, 44),
59 RTW89_DEF_CHAN_5G(5240, 48),
60 RTW89_DEF_CHAN_5G(5260, 52),
61 RTW89_DEF_CHAN_5G(5280, 56),
62 RTW89_DEF_CHAN_5G(5300, 60),
63 RTW89_DEF_CHAN_5G(5320, 64),
64 RTW89_DEF_CHAN_5G(5500, 100),
65 RTW89_DEF_CHAN_5G(5520, 104),
66 RTW89_DEF_CHAN_5G(5540, 108),
67 RTW89_DEF_CHAN_5G(5560, 112),
68 RTW89_DEF_CHAN_5G(5580, 116),
69 RTW89_DEF_CHAN_5G(5600, 120),
70 RTW89_DEF_CHAN_5G(5620, 124),
71 RTW89_DEF_CHAN_5G(5640, 128),
72 RTW89_DEF_CHAN_5G(5660, 132),
73 RTW89_DEF_CHAN_5G(5680, 136),
74 RTW89_DEF_CHAN_5G(5700, 140),
75 RTW89_DEF_CHAN_5G(5720, 144),
76 RTW89_DEF_CHAN_5G(5745, 149),
77 RTW89_DEF_CHAN_5G(5765, 153),
78 RTW89_DEF_CHAN_5G(5785, 157),
79 RTW89_DEF_CHAN_5G(5805, 161),
80 RTW89_DEF_CHAN_5G_NO_HT40MINUS(5825, 165),
81 RTW89_DEF_CHAN_5G(5845, 169),
82 RTW89_DEF_CHAN_5G(5865, 173),
83 RTW89_DEF_CHAN_5G(5885, 177),
84 };
85
86 static_assert(RTW89_5GHZ_UNII4_START_INDEX + RTW89_5GHZ_UNII4_CHANNEL_NUM ==
87 ARRAY_SIZE(rtw89_channels_5ghz));
88
89 static struct ieee80211_channel rtw89_channels_6ghz[] = {
90 RTW89_DEF_CHAN_6G(5955, 1),
91 RTW89_DEF_CHAN_6G(5975, 5),
92 RTW89_DEF_CHAN_6G(5995, 9),
93 RTW89_DEF_CHAN_6G(6015, 13),
94 RTW89_DEF_CHAN_6G(6035, 17),
95 RTW89_DEF_CHAN_6G(6055, 21),
96 RTW89_DEF_CHAN_6G(6075, 25),
97 RTW89_DEF_CHAN_6G(6095, 29),
98 RTW89_DEF_CHAN_6G(6115, 33),
99 RTW89_DEF_CHAN_6G(6135, 37),
100 RTW89_DEF_CHAN_6G(6155, 41),
101 RTW89_DEF_CHAN_6G(6175, 45),
102 RTW89_DEF_CHAN_6G(6195, 49),
103 RTW89_DEF_CHAN_6G(6215, 53),
104 RTW89_DEF_CHAN_6G(6235, 57),
105 RTW89_DEF_CHAN_6G(6255, 61),
106 RTW89_DEF_CHAN_6G(6275, 65),
107 RTW89_DEF_CHAN_6G(6295, 69),
108 RTW89_DEF_CHAN_6G(6315, 73),
109 RTW89_DEF_CHAN_6G(6335, 77),
110 RTW89_DEF_CHAN_6G(6355, 81),
111 RTW89_DEF_CHAN_6G(6375, 85),
112 RTW89_DEF_CHAN_6G(6395, 89),
113 RTW89_DEF_CHAN_6G(6415, 93),
114 RTW89_DEF_CHAN_6G(6435, 97),
115 RTW89_DEF_CHAN_6G(6455, 101),
116 RTW89_DEF_CHAN_6G(6475, 105),
117 RTW89_DEF_CHAN_6G(6495, 109),
118 RTW89_DEF_CHAN_6G(6515, 113),
119 RTW89_DEF_CHAN_6G(6535, 117),
120 RTW89_DEF_CHAN_6G(6555, 121),
121 RTW89_DEF_CHAN_6G(6575, 125),
122 RTW89_DEF_CHAN_6G(6595, 129),
123 RTW89_DEF_CHAN_6G(6615, 133),
124 RTW89_DEF_CHAN_6G(6635, 137),
125 RTW89_DEF_CHAN_6G(6655, 141),
126 RTW89_DEF_CHAN_6G(6675, 145),
127 RTW89_DEF_CHAN_6G(6695, 149),
128 RTW89_DEF_CHAN_6G(6715, 153),
129 RTW89_DEF_CHAN_6G(6735, 157),
130 RTW89_DEF_CHAN_6G(6755, 161),
131 RTW89_DEF_CHAN_6G(6775, 165),
132 RTW89_DEF_CHAN_6G(6795, 169),
133 RTW89_DEF_CHAN_6G(6815, 173),
134 RTW89_DEF_CHAN_6G(6835, 177),
135 RTW89_DEF_CHAN_6G(6855, 181),
136 RTW89_DEF_CHAN_6G(6875, 185),
137 RTW89_DEF_CHAN_6G(6895, 189),
138 RTW89_DEF_CHAN_6G(6915, 193),
139 RTW89_DEF_CHAN_6G(6935, 197),
140 RTW89_DEF_CHAN_6G(6955, 201),
141 RTW89_DEF_CHAN_6G(6975, 205),
142 RTW89_DEF_CHAN_6G(6995, 209),
143 RTW89_DEF_CHAN_6G(7015, 213),
144 RTW89_DEF_CHAN_6G(7035, 217),
145 RTW89_DEF_CHAN_6G(7055, 221),
146 RTW89_DEF_CHAN_6G(7075, 225),
147 RTW89_DEF_CHAN_6G(7095, 229),
148 RTW89_DEF_CHAN_6G(7115, 233),
149 };
150
151 static struct ieee80211_rate rtw89_bitrates[] = {
152 { .bitrate = 10, .hw_value = 0x00, },
153 { .bitrate = 20, .hw_value = 0x01, },
154 { .bitrate = 55, .hw_value = 0x02, },
155 { .bitrate = 110, .hw_value = 0x03, },
156 { .bitrate = 60, .hw_value = 0x04, },
157 { .bitrate = 90, .hw_value = 0x05, },
158 { .bitrate = 120, .hw_value = 0x06, },
159 { .bitrate = 180, .hw_value = 0x07, },
160 { .bitrate = 240, .hw_value = 0x08, },
161 { .bitrate = 360, .hw_value = 0x09, },
162 { .bitrate = 480, .hw_value = 0x0a, },
163 { .bitrate = 540, .hw_value = 0x0b, },
164 };
165
166 static const struct ieee80211_iface_limit rtw89_iface_limits[] = {
167 {
168 .max = 1,
169 .types = BIT(NL80211_IFTYPE_STATION),
170 },
171 {
172 .max = 1,
173 .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
174 BIT(NL80211_IFTYPE_P2P_GO) |
175 BIT(NL80211_IFTYPE_AP),
176 },
177 };
178
179 static const struct ieee80211_iface_limit rtw89_iface_limits_mcc[] = {
180 {
181 .max = 1,
182 .types = BIT(NL80211_IFTYPE_STATION),
183 },
184 {
185 .max = 1,
186 .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
187 BIT(NL80211_IFTYPE_P2P_GO),
188 },
189 };
190
191 static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
192 {
193 .limits = rtw89_iface_limits,
194 .n_limits = ARRAY_SIZE(rtw89_iface_limits),
195 .max_interfaces = RTW89_MAX_INTERFACE_NUM,
196 .num_different_channels = 1,
197 },
198 {
199 .limits = rtw89_iface_limits_mcc,
200 .n_limits = ARRAY_SIZE(rtw89_iface_limits_mcc),
201 .max_interfaces = RTW89_MAX_INTERFACE_NUM,
202 .num_different_channels = 2,
203 },
204 };
205
206 static const u8 rtw89_ext_capa_sta[] = {
207 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
208 [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
209 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
210 };
211
212 static const struct wiphy_iftype_ext_capab rtw89_iftypes_ext_capa[] = {
213 {
214 .iftype = NL80211_IFTYPE_STATION,
215 .extended_capabilities = rtw89_ext_capa_sta,
216 .extended_capabilities_mask = rtw89_ext_capa_sta,
217 .extended_capabilities_len = sizeof(rtw89_ext_capa_sta),
218 /* relevant only if EHT is supported */
219 .eml_capabilities = 0,
220 .mld_capa_and_ops = 0,
221 },
222 };
223
224 #define RTW89_6GHZ_SPAN_HEAD 6145
225 #define RTW89_6GHZ_SPAN_IDX(center_freq) \
226 ((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2)
227
228 #define RTW89_DECL_6GHZ_SPAN(center_freq, subband_l, subband_h) \
229 [RTW89_6GHZ_SPAN_IDX(center_freq)] = { \
230 .sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
231 .sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
232 .acpi_sar_subband_low = RTW89_ACPI_SAR_6GHZ_ ## subband_l, \
233 .acpi_sar_subband_high = RTW89_ACPI_SAR_6GHZ_ ## subband_h, \
234 .ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \
235 .ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \
236 }
237
238 /* Since 6GHz subbands are not edge aligned, some cases span two subbands.
239 * In the following, we describe each of them with rtw89_6ghz_span.
240 */
241 static const struct rtw89_6ghz_span rtw89_overlapping_6ghz[] = {
242 RTW89_DECL_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
243 RTW89_DECL_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
244 RTW89_DECL_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
245 RTW89_DECL_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
246 RTW89_DECL_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
247 RTW89_DECL_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
248 RTW89_DECL_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
249 RTW89_DECL_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
250 RTW89_DECL_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
251 RTW89_DECL_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
252 RTW89_DECL_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
253 RTW89_DECL_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
254 };
255
256 const struct rtw89_6ghz_span *
rtw89_get_6ghz_span(struct rtw89_dev * rtwdev,u32 center_freq)257 rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq)
258 {
259 int idx;
260
261 if (center_freq >= RTW89_6GHZ_SPAN_HEAD) {
262 idx = RTW89_6GHZ_SPAN_IDX(center_freq);
263 /* To decrease size of rtw89_overlapping_6ghz[],
264 * RTW89_6GHZ_SPAN_IDX() truncates the leading NULLs
265 * to make first span as index 0 of the table. So, if center
266 * frequency is less than the first one, it will get netative.
267 */
268 if (idx >= 0 && idx < ARRAY_SIZE(rtw89_overlapping_6ghz))
269 return &rtw89_overlapping_6ghz[idx];
270 }
271
272 return NULL;
273 }
274
rtw89_ra_report_to_bitrate(struct rtw89_dev * rtwdev,u8 rpt_rate,u16 * bitrate)275 bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
276 {
277 struct ieee80211_rate rate;
278
279 if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) {
280 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate);
281 return false;
282 }
283
284 rate = rtw89_bitrates[rpt_rate];
285 *bitrate = rate.bitrate;
286
287 return true;
288 }
289
290 static const struct ieee80211_supported_band rtw89_sband_2ghz = {
291 .band = NL80211_BAND_2GHZ,
292 .channels = rtw89_channels_2ghz,
293 .n_channels = ARRAY_SIZE(rtw89_channels_2ghz),
294 .bitrates = rtw89_bitrates,
295 .n_bitrates = ARRAY_SIZE(rtw89_bitrates),
296 .ht_cap = {0},
297 .vht_cap = {0},
298 };
299
300 static const struct ieee80211_supported_band rtw89_sband_5ghz = {
301 .band = NL80211_BAND_5GHZ,
302 .channels = rtw89_channels_5ghz,
303 .n_channels = ARRAY_SIZE(rtw89_channels_5ghz),
304
305 /* 5G has no CCK rates, 1M/2M/5.5M/11M */
306 .bitrates = rtw89_bitrates + 4,
307 .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
308 .ht_cap = {0},
309 .vht_cap = {0},
310 };
311
312 static const struct ieee80211_supported_band rtw89_sband_6ghz = {
313 .band = NL80211_BAND_6GHZ,
314 .channels = rtw89_channels_6ghz,
315 .n_channels = ARRAY_SIZE(rtw89_channels_6ghz),
316
317 /* 6G has no CCK rates, 1M/2M/5.5M/11M */
318 .bitrates = rtw89_bitrates + 4,
319 .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
320 };
321
__rtw89_traffic_stats_accu(struct rtw89_traffic_stats * stats,struct sk_buff * skb,bool tx)322 static void __rtw89_traffic_stats_accu(struct rtw89_traffic_stats *stats,
323 struct sk_buff *skb, bool tx)
324 {
325 if (tx) {
326 stats->tx_cnt++;
327 stats->tx_unicast += skb->len;
328 } else {
329 stats->rx_cnt++;
330 stats->rx_unicast += skb->len;
331 }
332 }
333
rtw89_traffic_stats_accu(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct sk_buff * skb,bool accu_dev,bool tx)334 static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
335 struct rtw89_vif *rtwvif,
336 struct sk_buff *skb,
337 bool accu_dev, bool tx)
338 {
339 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
340
341 if (!ieee80211_is_data(hdr->frame_control))
342 return;
343
344 if (is_broadcast_ether_addr(hdr->addr1) ||
345 is_multicast_ether_addr(hdr->addr1))
346 return;
347
348 if (accu_dev)
349 __rtw89_traffic_stats_accu(&rtwdev->stats, skb, tx);
350
351 if (rtwvif) {
352 __rtw89_traffic_stats_accu(&rtwvif->stats, skb, tx);
353 __rtw89_traffic_stats_accu(&rtwvif->stats_ps, skb, tx);
354 }
355 }
356
rtw89_get_default_chandef(struct cfg80211_chan_def * chandef)357 void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef)
358 {
359 cfg80211_chandef_create(chandef, &rtw89_channels_2ghz[0],
360 NL80211_CHAN_NO_HT);
361 }
362
rtw89_get_channel_params(const struct cfg80211_chan_def * chandef,struct rtw89_chan * chan)363 void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
364 struct rtw89_chan *chan)
365 {
366 struct ieee80211_channel *channel = chandef->chan;
367 enum nl80211_chan_width width = chandef->width;
368 u32 primary_freq, center_freq;
369 u8 center_chan;
370 u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
371 u32 offset;
372 u8 band;
373
374 center_chan = channel->hw_value;
375 primary_freq = channel->center_freq;
376 center_freq = chandef->center_freq1;
377
378 switch (width) {
379 case NL80211_CHAN_WIDTH_20_NOHT:
380 case NL80211_CHAN_WIDTH_20:
381 bandwidth = RTW89_CHANNEL_WIDTH_20;
382 break;
383 case NL80211_CHAN_WIDTH_40:
384 bandwidth = RTW89_CHANNEL_WIDTH_40;
385 if (primary_freq > center_freq) {
386 center_chan -= 2;
387 } else {
388 center_chan += 2;
389 }
390 break;
391 case NL80211_CHAN_WIDTH_80:
392 case NL80211_CHAN_WIDTH_160:
393 bandwidth = nl_to_rtw89_bandwidth(width);
394 if (primary_freq > center_freq) {
395 offset = (primary_freq - center_freq - 10) / 20;
396 center_chan -= 2 + offset * 4;
397 } else {
398 offset = (center_freq - primary_freq - 10) / 20;
399 center_chan += 2 + offset * 4;
400 }
401 break;
402 default:
403 center_chan = 0;
404 break;
405 }
406
407 switch (channel->band) {
408 default:
409 case NL80211_BAND_2GHZ:
410 band = RTW89_BAND_2G;
411 break;
412 case NL80211_BAND_5GHZ:
413 band = RTW89_BAND_5G;
414 break;
415 case NL80211_BAND_6GHZ:
416 band = RTW89_BAND_6G;
417 break;
418 }
419
420 rtw89_chan_create(chan, center_chan, channel->hw_value, band, bandwidth);
421 }
422
__rtw89_core_set_chip_txpwr(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)423 static void __rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev,
424 const struct rtw89_chan *chan,
425 enum rtw89_phy_idx phy_idx)
426 {
427 const struct rtw89_chip_info *chip = rtwdev->chip;
428 bool entity_active;
429
430 entity_active = rtw89_get_entity_state(rtwdev, phy_idx);
431 if (!entity_active)
432 return;
433
434 chip->ops->set_txpwr(rtwdev, chan, phy_idx);
435 }
436
rtw89_core_set_chip_txpwr(struct rtw89_dev * rtwdev)437 void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
438 {
439 const struct rtw89_chan *chan;
440
441 chan = rtw89_mgnt_chan_get(rtwdev, 0);
442 __rtw89_core_set_chip_txpwr(rtwdev, chan, RTW89_PHY_0);
443
444 if (!rtwdev->support_mlo)
445 return;
446
447 chan = rtw89_mgnt_chan_get(rtwdev, 1);
448 __rtw89_core_set_chip_txpwr(rtwdev, chan, RTW89_PHY_1);
449 }
450
__rtw89_set_channel(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_mac_idx mac_idx,enum rtw89_phy_idx phy_idx)451 static void __rtw89_set_channel(struct rtw89_dev *rtwdev,
452 const struct rtw89_chan *chan,
453 enum rtw89_mac_idx mac_idx,
454 enum rtw89_phy_idx phy_idx)
455 {
456 const struct rtw89_chip_info *chip = rtwdev->chip;
457 const struct rtw89_chan_rcd *chan_rcd;
458 struct rtw89_channel_help_params bak;
459 bool entity_active;
460
461 entity_active = rtw89_get_entity_state(rtwdev, phy_idx);
462
463 chan_rcd = rtw89_chan_rcd_get_by_chan(chan);
464
465 rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx);
466
467 chip->ops->set_channel(rtwdev, chan, mac_idx, phy_idx);
468
469 chip->ops->set_txpwr(rtwdev, chan, phy_idx);
470
471 rtw89_chip_set_channel_done(rtwdev, &bak, chan, mac_idx, phy_idx);
472
473 if (!entity_active || chan_rcd->band_changed) {
474 rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type);
475 rtw89_chip_rfk_band_changed(rtwdev, phy_idx, chan);
476 }
477
478 rtw89_set_entity_state(rtwdev, phy_idx, true);
479 }
480
rtw89_set_channel(struct rtw89_dev * rtwdev)481 int rtw89_set_channel(struct rtw89_dev *rtwdev)
482 {
483 const struct rtw89_chan *chan;
484 enum rtw89_entity_mode mode;
485
486 mode = rtw89_entity_recalc(rtwdev);
487 if (mode < 0 || mode >= NUM_OF_RTW89_ENTITY_MODE) {
488 WARN(1, "Invalid ent mode: %d\n", mode);
489 return -EINVAL;
490 }
491
492 chan = rtw89_mgnt_chan_get(rtwdev, 0);
493 __rtw89_set_channel(rtwdev, chan, RTW89_MAC_0, RTW89_PHY_0);
494
495 if (!rtwdev->support_mlo)
496 return 0;
497
498 chan = rtw89_mgnt_chan_get(rtwdev, 1);
499 __rtw89_set_channel(rtwdev, chan, RTW89_MAC_1, RTW89_PHY_1);
500
501 return 0;
502 }
503
504 static enum rtw89_core_tx_type
rtw89_core_get_tx_type(struct rtw89_dev * rtwdev,struct sk_buff * skb)505 rtw89_core_get_tx_type(struct rtw89_dev *rtwdev,
506 struct sk_buff *skb)
507 {
508 struct ieee80211_hdr *hdr = (void *)skb->data;
509 __le16 fc = hdr->frame_control;
510
511 if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
512 return RTW89_CORE_TX_TYPE_MGMT;
513
514 return RTW89_CORE_TX_TYPE_DATA;
515 }
516
517 static void
rtw89_core_tx_update_ampdu_info(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,enum btc_pkt_type pkt_type)518 rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev,
519 struct rtw89_core_tx_request *tx_req,
520 enum btc_pkt_type pkt_type)
521 {
522 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
523 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
524 struct ieee80211_link_sta *link_sta;
525 struct sk_buff *skb = tx_req->skb;
526 struct rtw89_sta *rtwsta;
527 u8 ampdu_num;
528 u8 tid;
529
530 if (pkt_type == PACKET_EAPOL) {
531 desc_info->bk = true;
532 return;
533 }
534
535 if (!(IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU))
536 return;
537
538 if (!rtwsta_link) {
539 rtw89_warn(rtwdev, "cannot set ampdu info without sta\n");
540 return;
541 }
542
543 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
544 rtwsta = rtwsta_link->rtwsta;
545
546 rcu_read_lock();
547
548 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
549 ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ?
550 rtwsta->ampdu_params[tid].agg_num :
551 4 << link_sta->ht_cap.ampdu_factor) - 1);
552
553 desc_info->agg_en = true;
554 desc_info->ampdu_density = link_sta->ht_cap.ampdu_density;
555 desc_info->ampdu_num = ampdu_num;
556
557 rcu_read_unlock();
558 }
559
560 static void
rtw89_core_tx_update_sec_key(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)561 rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
562 struct rtw89_core_tx_request *tx_req)
563 {
564 struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
565 const struct rtw89_chip_info *chip = rtwdev->chip;
566 const struct rtw89_sec_cam_entry *sec_cam;
567 struct ieee80211_tx_info *info;
568 struct ieee80211_key_conf *key;
569 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
570 struct sk_buff *skb = tx_req->skb;
571 u8 sec_type = RTW89_SEC_KEY_TYPE_NONE;
572 u8 sec_cam_idx;
573 u64 pn64;
574
575 info = IEEE80211_SKB_CB(skb);
576 key = info->control.hw_key;
577 sec_cam_idx = key->hw_key_idx;
578 sec_cam = cam_info->sec_entries[sec_cam_idx];
579 if (!sec_cam) {
580 rtw89_warn(rtwdev, "sec cam entry is empty\n");
581 return;
582 }
583
584 switch (key->cipher) {
585 case WLAN_CIPHER_SUITE_WEP40:
586 sec_type = RTW89_SEC_KEY_TYPE_WEP40;
587 break;
588 case WLAN_CIPHER_SUITE_WEP104:
589 sec_type = RTW89_SEC_KEY_TYPE_WEP104;
590 break;
591 case WLAN_CIPHER_SUITE_TKIP:
592 sec_type = RTW89_SEC_KEY_TYPE_TKIP;
593 break;
594 case WLAN_CIPHER_SUITE_CCMP:
595 sec_type = RTW89_SEC_KEY_TYPE_CCMP128;
596 break;
597 case WLAN_CIPHER_SUITE_CCMP_256:
598 sec_type = RTW89_SEC_KEY_TYPE_CCMP256;
599 break;
600 case WLAN_CIPHER_SUITE_GCMP:
601 sec_type = RTW89_SEC_KEY_TYPE_GCMP128;
602 break;
603 case WLAN_CIPHER_SUITE_GCMP_256:
604 sec_type = RTW89_SEC_KEY_TYPE_GCMP256;
605 break;
606 default:
607 rtw89_warn(rtwdev, "key cipher not supported %d\n", key->cipher);
608 return;
609 }
610
611 desc_info->sec_en = true;
612 desc_info->sec_keyid = key->keyidx;
613 desc_info->sec_type = sec_type;
614 desc_info->sec_cam_idx = sec_cam->sec_cam_idx;
615
616 if (!chip->hw_sec_hdr)
617 return;
618
619 pn64 = atomic64_inc_return(&key->tx_pn);
620 desc_info->sec_seq[0] = pn64;
621 desc_info->sec_seq[1] = pn64 >> 8;
622 desc_info->sec_seq[2] = pn64 >> 16;
623 desc_info->sec_seq[3] = pn64 >> 24;
624 desc_info->sec_seq[4] = pn64 >> 32;
625 desc_info->sec_seq[5] = pn64 >> 40;
626 desc_info->wp_offset = 1; /* in unit of 8 bytes for security header */
627 }
628
rtw89_core_get_mgmt_rate(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,const struct rtw89_chan * chan)629 static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
630 struct rtw89_core_tx_request *tx_req,
631 const struct rtw89_chan *chan)
632 {
633 struct sk_buff *skb = tx_req->skb;
634 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
635 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
636 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
637 struct ieee80211_vif *vif = tx_info->control.vif;
638 struct ieee80211_bss_conf *bss_conf;
639 u16 lowest_rate;
640 u16 rate;
641
642 if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE ||
643 (vif && vif->p2p))
644 lowest_rate = RTW89_HW_RATE_OFDM6;
645 else if (chan->band_type == RTW89_BAND_2G)
646 lowest_rate = RTW89_HW_RATE_CCK1;
647 else
648 lowest_rate = RTW89_HW_RATE_OFDM6;
649
650 if (!rtwvif_link)
651 return lowest_rate;
652
653 rcu_read_lock();
654
655 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
656 if (!bss_conf->basic_rates || !rtwsta_link) {
657 rate = lowest_rate;
658 goto out;
659 }
660
661 rate = __ffs(bss_conf->basic_rates) + lowest_rate;
662
663 out:
664 rcu_read_unlock();
665
666 return rate;
667 }
668
rtw89_core_tx_get_mac_id(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)669 static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
670 struct rtw89_core_tx_request *tx_req)
671 {
672 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
673 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
674 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
675
676 if (desc_info->mlo && !desc_info->sw_mld) {
677 if (rtwsta_link)
678 return rtw89_sta_get_main_macid(rtwsta_link->rtwsta);
679 else
680 return rtw89_vif_get_main_macid(rtwvif_link->rtwvif);
681 }
682
683 if (!rtwsta_link)
684 return rtwvif_link->mac_id;
685
686 return rtwsta_link->mac_id;
687 }
688
rtw89_core_tx_update_llc_hdr(struct rtw89_dev * rtwdev,struct rtw89_tx_desc_info * desc_info,struct sk_buff * skb)689 static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
690 struct rtw89_tx_desc_info *desc_info,
691 struct sk_buff *skb)
692 {
693 struct ieee80211_hdr *hdr = (void *)skb->data;
694 __le16 fc = hdr->frame_control;
695
696 desc_info->hdr_llc_len = ieee80211_hdrlen(fc);
697 desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
698 }
699
700 static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)701 rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
702 struct rtw89_core_tx_request *tx_req)
703 {
704 const struct rtw89_chip_info *chip = rtwdev->chip;
705 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
706 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
707 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
708 rtwvif_link->chanctx_idx);
709 struct sk_buff *skb = tx_req->skb;
710 u8 qsel, ch_dma;
711
712 qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req);
713 ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
714
715 desc_info->qsel = qsel;
716 desc_info->ch_dma = ch_dma;
717 desc_info->port = desc_info->hiq ? rtwvif_link->port : 0;
718 desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
719 desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
720 desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE;
721
722 /* fixed data rate for mgmt frames */
723 desc_info->en_wd_info = true;
724 desc_info->use_rate = true;
725 desc_info->dis_data_fb = true;
726 desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req, chan);
727
728 if (chip->hw_mgmt_tx_encrypt && IEEE80211_SKB_CB(skb)->control.hw_key) {
729 rtw89_core_tx_update_sec_key(rtwdev, tx_req);
730 rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb);
731 }
732
733 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
734 "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n",
735 desc_info->data_rate, chan->channel, chan->band_type,
736 chan->band_width);
737 }
738
739 static void
rtw89_core_tx_update_h2c_info(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)740 rtw89_core_tx_update_h2c_info(struct rtw89_dev *rtwdev,
741 struct rtw89_core_tx_request *tx_req)
742 {
743 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
744
745 desc_info->is_bmc = false;
746 desc_info->wd_page = false;
747 desc_info->ch_dma = RTW89_DMA_H2C;
748 }
749
rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev * rtwdev,__le32 * htc,const struct rtw89_chan * chan)750 static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc,
751 const struct rtw89_chan *chan)
752 {
753 static const u8 rtw89_bandwidth_to_om[] = {
754 [RTW89_CHANNEL_WIDTH_20] = HTC_OM_CHANNEL_WIDTH_20,
755 [RTW89_CHANNEL_WIDTH_40] = HTC_OM_CHANNEL_WIDTH_40,
756 [RTW89_CHANNEL_WIDTH_80] = HTC_OM_CHANNEL_WIDTH_80,
757 [RTW89_CHANNEL_WIDTH_160] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80,
758 [RTW89_CHANNEL_WIDTH_80_80] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80,
759 };
760 const struct rtw89_chip_info *chip = rtwdev->chip;
761 struct rtw89_hal *hal = &rtwdev->hal;
762 u8 om_bandwidth;
763
764 if (!chip->dis_2g_40m_ul_ofdma ||
765 chan->band_type != RTW89_BAND_2G ||
766 chan->band_width != RTW89_CHANNEL_WIDTH_40)
767 return;
768
769 om_bandwidth = chan->band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ?
770 rtw89_bandwidth_to_om[chan->band_width] : 0;
771 *htc = le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
772 le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_OM, RTW89_HTC_MASK_CTL_ID) |
773 le32_encode_bits(hal->rx_nss - 1, RTW89_HTC_MASK_HTC_OM_RX_NSS) |
774 le32_encode_bits(om_bandwidth, RTW89_HTC_MASK_HTC_OM_CH_WIDTH) |
775 le32_encode_bits(1, RTW89_HTC_MASK_HTC_OM_UL_MU_DIS) |
776 le32_encode_bits(hal->tx_nss - 1, RTW89_HTC_MASK_HTC_OM_TX_NSTS) |
777 le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_ER_SU_DIS) |
778 le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR) |
779 le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS);
780 }
781
782 static bool
__rtw89_core_tx_check_he_qos_htc(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,enum btc_pkt_type pkt_type)783 __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
784 struct rtw89_core_tx_request *tx_req,
785 enum btc_pkt_type pkt_type)
786 {
787 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
788 struct sk_buff *skb = tx_req->skb;
789 struct ieee80211_hdr *hdr = (void *)skb->data;
790 struct ieee80211_link_sta *link_sta;
791 __le16 fc = hdr->frame_control;
792
793 /* AP IOT issue with EAPoL, ARP and DHCP */
794 if (pkt_type < PACKET_MAX)
795 return false;
796
797 if (!rtwsta_link)
798 return false;
799
800 rcu_read_lock();
801
802 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
803 if (!link_sta->he_cap.has_he) {
804 rcu_read_unlock();
805 return false;
806 }
807
808 rcu_read_unlock();
809
810 if (!ieee80211_is_data_qos(fc))
811 return false;
812
813 if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN)
814 return false;
815
816 if (rtwsta_link && rtwsta_link->ra_report.might_fallback_legacy)
817 return false;
818
819 return true;
820 }
821
822 static void
__rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)823 __rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev *rtwdev,
824 struct rtw89_core_tx_request *tx_req)
825 {
826 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
827 struct sk_buff *skb = tx_req->skb;
828 struct ieee80211_hdr *hdr = (void *)skb->data;
829 __le16 fc = hdr->frame_control;
830 void *data;
831 __le32 *htc;
832 u8 *qc;
833 int hdr_len;
834
835 hdr_len = ieee80211_has_a4(fc) ? 32 : 26;
836 data = skb_push(skb, IEEE80211_HT_CTL_LEN);
837 memmove(data, data + IEEE80211_HT_CTL_LEN, hdr_len);
838
839 hdr = data;
840 htc = data + hdr_len;
841 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_ORDER);
842 *htc = rtwsta_link->htc_template ? rtwsta_link->htc_template :
843 le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
844 le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_CAS, RTW89_HTC_MASK_CTL_ID);
845
846 qc = data + hdr_len - IEEE80211_QOS_CTL_LEN;
847 qc[0] |= IEEE80211_QOS_CTL_EOSP;
848 }
849
850 static void
rtw89_core_tx_update_he_qos_htc(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,enum btc_pkt_type pkt_type)851 rtw89_core_tx_update_he_qos_htc(struct rtw89_dev *rtwdev,
852 struct rtw89_core_tx_request *tx_req,
853 enum btc_pkt_type pkt_type)
854 {
855 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
856 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
857
858 if (!__rtw89_core_tx_check_he_qos_htc(rtwdev, tx_req, pkt_type))
859 goto desc_bk;
860
861 __rtw89_core_tx_adjust_he_qos_htc(rtwdev, tx_req);
862
863 desc_info->pkt_size += IEEE80211_HT_CTL_LEN;
864 desc_info->a_ctrl_bsr = true;
865
866 desc_bk:
867 if (!rtwvif_link || rtwvif_link->last_a_ctrl == desc_info->a_ctrl_bsr)
868 return;
869
870 rtwvif_link->last_a_ctrl = desc_info->a_ctrl_bsr;
871 desc_info->bk = true;
872 }
873
rtw89_core_get_data_rate(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)874 static u16 rtw89_core_get_data_rate(struct rtw89_dev *rtwdev,
875 struct rtw89_core_tx_request *tx_req)
876 {
877 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
878 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
879 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
880 struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern;
881 enum rtw89_chanctx_idx idx = rtwvif_link->chanctx_idx;
882 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx);
883 struct ieee80211_link_sta *link_sta;
884 u16 lowest_rate;
885 u16 rate;
886
887 if (rate_pattern->enable)
888 return rate_pattern->rate;
889
890 if (vif->p2p)
891 lowest_rate = RTW89_HW_RATE_OFDM6;
892 else if (chan->band_type == RTW89_BAND_2G)
893 lowest_rate = RTW89_HW_RATE_CCK1;
894 else
895 lowest_rate = RTW89_HW_RATE_OFDM6;
896
897 if (!rtwsta_link)
898 return lowest_rate;
899
900 rcu_read_lock();
901
902 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
903 if (!link_sta->supp_rates[chan->band_type]) {
904 rate = lowest_rate;
905 goto out;
906 }
907
908 rate = __ffs(link_sta->supp_rates[chan->band_type]) + lowest_rate;
909
910 out:
911 rcu_read_unlock();
912
913 return rate;
914 }
915
916 static void
rtw89_core_tx_update_data_info(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)917 rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
918 struct rtw89_core_tx_request *tx_req)
919 {
920 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
921 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
922 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
923 struct sk_buff *skb = tx_req->skb;
924 u8 tid, tid_indicate;
925 u8 qsel, ch_dma;
926
927 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
928 tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
929 qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid);
930 ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
931
932 desc_info->ch_dma = ch_dma;
933 desc_info->tid_indicate = tid_indicate;
934 desc_info->qsel = qsel;
935 desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
936 desc_info->port = desc_info->hiq ? rtwvif_link->port : 0;
937 desc_info->er_cap = rtwsta_link ? rtwsta_link->er_cap : false;
938 desc_info->stbc = rtwsta_link ? rtwsta_link->ra.stbc_cap : false;
939 desc_info->ldpc = rtwsta_link ? rtwsta_link->ra.ldpc_cap : false;
940
941 /* enable wd_info for AMPDU */
942 desc_info->en_wd_info = true;
943
944 if (IEEE80211_SKB_CB(skb)->control.hw_key)
945 rtw89_core_tx_update_sec_key(rtwdev, tx_req);
946
947 desc_info->data_retry_lowest_rate = rtw89_core_get_data_rate(rtwdev, tx_req);
948 }
949
950 static enum btc_pkt_type
rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)951 rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
952 struct rtw89_core_tx_request *tx_req)
953 {
954 struct wiphy *wiphy = rtwdev->hw->wiphy;
955 struct sk_buff *skb = tx_req->skb;
956 struct udphdr *udphdr;
957
958 if (IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
959 wiphy_work_queue(wiphy, &rtwdev->btc.eapol_notify_work);
960 return PACKET_EAPOL;
961 }
962
963 if (skb->protocol == htons(ETH_P_ARP)) {
964 wiphy_work_queue(wiphy, &rtwdev->btc.arp_notify_work);
965 return PACKET_ARP;
966 }
967
968 if (skb->protocol == htons(ETH_P_IP) &&
969 ip_hdr(skb)->protocol == IPPROTO_UDP) {
970 udphdr = udp_hdr(skb);
971 if (((udphdr->source == htons(67) && udphdr->dest == htons(68)) ||
972 (udphdr->source == htons(68) && udphdr->dest == htons(67))) &&
973 skb->len > 282) {
974 wiphy_work_queue(wiphy, &rtwdev->btc.dhcp_notify_work);
975 return PACKET_DHCP;
976 }
977 }
978
979 if (skb->protocol == htons(ETH_P_IP) &&
980 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
981 wiphy_work_queue(wiphy, &rtwdev->btc.icmp_notify_work);
982 return PACKET_ICMP;
983 }
984
985 return PACKET_MAX;
986 }
987
988 static void
rtw89_core_tx_wake(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)989 rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
990 struct rtw89_core_tx_request *tx_req)
991 {
992 const struct rtw89_chip_info *chip = rtwdev->chip;
993
994 if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw))
995 return;
996
997 switch (chip->chip_id) {
998 case RTL8852BT:
999 if (test_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
1000 goto notify;
1001 break;
1002 case RTL8852C:
1003 if (test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
1004 goto notify;
1005 break;
1006 default:
1007 if (test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags) &&
1008 tx_req->tx_type == RTW89_CORE_TX_TYPE_MGMT)
1009 goto notify;
1010 break;
1011 }
1012
1013 return;
1014
1015 notify:
1016 rtw89_mac_notify_wake(rtwdev);
1017 }
1018
1019 static void
rtw89_core_tx_update_desc_info(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)1020 rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
1021 struct rtw89_core_tx_request *tx_req)
1022 {
1023 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1024 struct sk_buff *skb = tx_req->skb;
1025 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1026 struct ieee80211_hdr *hdr = (void *)skb->data;
1027 struct rtw89_addr_cam_entry *addr_cam;
1028 enum rtw89_core_tx_type tx_type;
1029 enum btc_pkt_type pkt_type;
1030 bool upd_wlan_hdr = false;
1031 bool is_bmc;
1032 u16 seq;
1033
1034 if (tx_req->sta)
1035 desc_info->mlo = tx_req->sta->mlo;
1036 else if (tx_req->vif)
1037 desc_info->mlo = ieee80211_vif_is_mld(tx_req->vif);
1038
1039 seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
1040 if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) {
1041 tx_type = rtw89_core_get_tx_type(rtwdev, skb);
1042 tx_req->tx_type = tx_type;
1043
1044 addr_cam = rtw89_get_addr_cam_of(tx_req->rtwvif_link,
1045 tx_req->rtwsta_link);
1046 if (addr_cam->valid && desc_info->mlo)
1047 upd_wlan_hdr = true;
1048 }
1049 is_bmc = (is_broadcast_ether_addr(hdr->addr1) ||
1050 is_multicast_ether_addr(hdr->addr1));
1051
1052 desc_info->seq = seq;
1053 desc_info->pkt_size = skb->len;
1054 desc_info->is_bmc = is_bmc;
1055 desc_info->wd_page = true;
1056 desc_info->hiq = info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM;
1057 desc_info->upd_wlan_hdr = upd_wlan_hdr;
1058
1059 switch (tx_req->tx_type) {
1060 case RTW89_CORE_TX_TYPE_MGMT:
1061 rtw89_core_tx_update_mgmt_info(rtwdev, tx_req);
1062 break;
1063 case RTW89_CORE_TX_TYPE_DATA:
1064 rtw89_core_tx_update_data_info(rtwdev, tx_req);
1065 pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req);
1066 rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type);
1067 rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, pkt_type);
1068 rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb);
1069 break;
1070 case RTW89_CORE_TX_TYPE_FWCMD:
1071 rtw89_core_tx_update_h2c_info(rtwdev, tx_req);
1072 break;
1073 }
1074 }
1075
rtw89_core_tx_kick_off(struct rtw89_dev * rtwdev,u8 qsel)1076 void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
1077 {
1078 u8 ch_dma;
1079
1080 ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
1081
1082 rtw89_hci_tx_kick_off(rtwdev, ch_dma);
1083 }
1084
rtw89_core_tx_kick_off_and_wait(struct rtw89_dev * rtwdev,struct sk_buff * skb,int qsel,unsigned int timeout)1085 int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
1086 int qsel, unsigned int timeout)
1087 {
1088 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
1089 struct rtw89_tx_wait_info *wait;
1090 unsigned long time_left;
1091 int ret = 0;
1092
1093 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
1094 if (!wait) {
1095 rtw89_core_tx_kick_off(rtwdev, qsel);
1096 return 0;
1097 }
1098
1099 init_completion(&wait->completion);
1100 rcu_assign_pointer(skb_data->wait, wait);
1101
1102 rtw89_core_tx_kick_off(rtwdev, qsel);
1103 time_left = wait_for_completion_timeout(&wait->completion,
1104 msecs_to_jiffies(timeout));
1105 if (time_left == 0)
1106 ret = -ETIMEDOUT;
1107 else if (!wait->tx_done)
1108 ret = -EAGAIN;
1109
1110 rcu_assign_pointer(skb_data->wait, NULL);
1111 kfree_rcu(wait, rcu_head);
1112
1113 return ret;
1114 }
1115
rtw89_h2c_tx(struct rtw89_dev * rtwdev,struct sk_buff * skb,bool fwdl)1116 int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
1117 struct sk_buff *skb, bool fwdl)
1118 {
1119 struct rtw89_core_tx_request tx_req = {0};
1120 u32 cnt;
1121 int ret;
1122
1123 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
1124 rtw89_debug(rtwdev, RTW89_DBG_FW,
1125 "ignore h2c due to power is off with firmware state=%d\n",
1126 test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags));
1127 dev_kfree_skb(skb);
1128 return 0;
1129 }
1130
1131 tx_req.skb = skb;
1132 tx_req.tx_type = RTW89_CORE_TX_TYPE_FWCMD;
1133 if (fwdl)
1134 tx_req.desc_info.fw_dl = true;
1135
1136 rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
1137
1138 if (!fwdl)
1139 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "H2C: ", skb->data, skb->len);
1140
1141 cnt = rtw89_hci_check_and_reclaim_tx_resource(rtwdev, RTW89_TXCH_CH12);
1142 if (cnt == 0) {
1143 rtw89_err(rtwdev, "no tx fwcmd resource\n");
1144 return -ENOSPC;
1145 }
1146
1147 ret = rtw89_hci_tx_write(rtwdev, &tx_req);
1148 if (ret) {
1149 rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
1150 return ret;
1151 }
1152 rtw89_hci_tx_kick_off(rtwdev, RTW89_TXCH_CH12);
1153
1154 return 0;
1155 }
1156
rtw89_core_tx_write_link(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,struct sk_buff * skb,int * qsel,bool sw_mld)1157 static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
1158 struct rtw89_vif_link *rtwvif_link,
1159 struct rtw89_sta_link *rtwsta_link,
1160 struct sk_buff *skb, int *qsel, bool sw_mld)
1161 {
1162 struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
1163 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
1164 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
1165 struct rtw89_core_tx_request tx_req = {};
1166 int ret;
1167
1168 tx_req.skb = skb;
1169 tx_req.vif = vif;
1170 tx_req.sta = sta;
1171 tx_req.rtwvif_link = rtwvif_link;
1172 tx_req.rtwsta_link = rtwsta_link;
1173 tx_req.desc_info.sw_mld = sw_mld;
1174
1175 rtw89_traffic_stats_accu(rtwdev, rtwvif, skb, true, true);
1176 rtw89_wow_parse_akm(rtwdev, skb);
1177 rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
1178 rtw89_core_tx_wake(rtwdev, &tx_req);
1179
1180 ret = rtw89_hci_tx_write(rtwdev, &tx_req);
1181 if (ret) {
1182 rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
1183 return ret;
1184 }
1185
1186 if (qsel)
1187 *qsel = tx_req.desc_info.qsel;
1188
1189 return 0;
1190 }
1191
rtw89_core_tx_write(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct sk_buff * skb,int * qsel)1192 int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
1193 struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
1194 {
1195 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
1196 struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
1197 struct rtw89_sta_link *rtwsta_link = NULL;
1198 struct rtw89_vif_link *rtwvif_link;
1199
1200 if (rtwsta) {
1201 rtwsta_link = rtw89_get_designated_link(rtwsta);
1202 if (unlikely(!rtwsta_link)) {
1203 rtw89_err(rtwdev, "tx: find no sta designated link\n");
1204 return -ENOLINK;
1205 }
1206
1207 rtwvif_link = rtwsta_link->rtwvif_link;
1208 } else {
1209 rtwvif_link = rtw89_get_designated_link(rtwvif);
1210 if (unlikely(!rtwvif_link)) {
1211 rtw89_err(rtwdev, "tx: find no vif designated link\n");
1212 return -ENOLINK;
1213 }
1214 }
1215
1216 return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false);
1217 }
1218
rtw89_build_txwd_body0(struct rtw89_tx_desc_info * desc_info)1219 static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
1220 {
1221 u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET, desc_info->wp_offset) |
1222 FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) |
1223 FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
1224 FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
1225 FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
1226 FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl) |
1227 FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_SEL, desc_info->hw_ssn_sel) |
1228 FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_MODE, desc_info->hw_seq_mode);
1229
1230 return cpu_to_le32(dword);
1231 }
1232
rtw89_build_txwd_body0_v1(struct rtw89_tx_desc_info * desc_info)1233 static __le32 rtw89_build_txwd_body0_v1(struct rtw89_tx_desc_info *desc_info)
1234 {
1235 u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) |
1236 FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) |
1237 FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
1238 FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
1239 FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
1240 FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl);
1241
1242 return cpu_to_le32(dword);
1243 }
1244
rtw89_build_txwd_body1_v1(struct rtw89_tx_desc_info * desc_info)1245 static __le32 rtw89_build_txwd_body1_v1(struct rtw89_tx_desc_info *desc_info)
1246 {
1247 u32 dword = FIELD_PREP(RTW89_TXWD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) |
1248 FIELD_PREP(RTW89_TXWD_BODY1_SEC_KEYID, desc_info->sec_keyid) |
1249 FIELD_PREP(RTW89_TXWD_BODY1_SEC_TYPE, desc_info->sec_type);
1250
1251 return cpu_to_le32(dword);
1252 }
1253
rtw89_build_txwd_body2(struct rtw89_tx_desc_info * desc_info)1254 static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info)
1255 {
1256 u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) |
1257 FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) |
1258 FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size) |
1259 FIELD_PREP(RTW89_TXWD_BODY2_MACID, desc_info->mac_id);
1260
1261 return cpu_to_le32(dword);
1262 }
1263
rtw89_build_txwd_body3(struct rtw89_tx_desc_info * desc_info)1264 static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info)
1265 {
1266 u32 dword = FIELD_PREP(RTW89_TXWD_BODY3_SW_SEQ, desc_info->seq) |
1267 FIELD_PREP(RTW89_TXWD_BODY3_AGG_EN, desc_info->agg_en) |
1268 FIELD_PREP(RTW89_TXWD_BODY3_BK, desc_info->bk);
1269
1270 return cpu_to_le32(dword);
1271 }
1272
rtw89_build_txwd_body4(struct rtw89_tx_desc_info * desc_info)1273 static __le32 rtw89_build_txwd_body4(struct rtw89_tx_desc_info *desc_info)
1274 {
1275 u32 dword = FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) |
1276 FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]);
1277
1278 return cpu_to_le32(dword);
1279 }
1280
rtw89_build_txwd_body5(struct rtw89_tx_desc_info * desc_info)1281 static __le32 rtw89_build_txwd_body5(struct rtw89_tx_desc_info *desc_info)
1282 {
1283 u32 dword = FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) |
1284 FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) |
1285 FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) |
1286 FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]);
1287
1288 return cpu_to_le32(dword);
1289 }
1290
rtw89_build_txwd_body7_v1(struct rtw89_tx_desc_info * desc_info)1291 static __le32 rtw89_build_txwd_body7_v1(struct rtw89_tx_desc_info *desc_info)
1292 {
1293 u32 dword = FIELD_PREP(RTW89_TXWD_BODY7_USE_RATE_V1, desc_info->use_rate) |
1294 FIELD_PREP(RTW89_TXWD_BODY7_DATA_RATE, desc_info->data_rate);
1295
1296 return cpu_to_le32(dword);
1297 }
1298
rtw89_build_txwd_info0(struct rtw89_tx_desc_info * desc_info)1299 static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
1300 {
1301 u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
1302 FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) |
1303 FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) |
1304 FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) |
1305 FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
1306 FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
1307
1308 return cpu_to_le32(dword);
1309 }
1310
rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info * desc_info)1311 static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
1312 {
1313 u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) |
1314 FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) |
1315 FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
1316 FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port) |
1317 FIELD_PREP(RTW89_TXWD_INFO0_DATA_ER, desc_info->er_cap) |
1318 FIELD_PREP(RTW89_TXWD_INFO0_DATA_BW_ER, 0);
1319
1320 return cpu_to_le32(dword);
1321 }
1322
rtw89_build_txwd_info1(struct rtw89_tx_desc_info * desc_info)1323 static __le32 rtw89_build_txwd_info1(struct rtw89_tx_desc_info *desc_info)
1324 {
1325 u32 dword = FIELD_PREP(RTW89_TXWD_INFO1_MAX_AGGNUM, desc_info->ampdu_num) |
1326 FIELD_PREP(RTW89_TXWD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) |
1327 FIELD_PREP(RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE,
1328 desc_info->data_retry_lowest_rate);
1329
1330 return cpu_to_le32(dword);
1331 }
1332
rtw89_build_txwd_info2(struct rtw89_tx_desc_info * desc_info)1333 static __le32 rtw89_build_txwd_info2(struct rtw89_tx_desc_info *desc_info)
1334 {
1335 u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
1336 FIELD_PREP(RTW89_TXWD_INFO2_SEC_TYPE, desc_info->sec_type) |
1337 FIELD_PREP(RTW89_TXWD_INFO2_SEC_HW_ENC, desc_info->sec_en) |
1338 FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx);
1339
1340 return cpu_to_le32(dword);
1341 }
1342
rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info * desc_info)1343 static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info)
1344 {
1345 u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
1346 FIELD_PREP(RTW89_TXWD_INFO2_FORCE_KEY_EN, desc_info->sec_en) |
1347 FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx);
1348
1349 return cpu_to_le32(dword);
1350 }
1351
rtw89_build_txwd_info4(struct rtw89_tx_desc_info * desc_info)1352 static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info)
1353 {
1354 bool rts_en = !desc_info->is_bmc;
1355 u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, rts_en) |
1356 FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1);
1357
1358 return cpu_to_le32(dword);
1359 }
1360
rtw89_core_fill_txdesc(struct rtw89_dev * rtwdev,struct rtw89_tx_desc_info * desc_info,void * txdesc)1361 void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
1362 struct rtw89_tx_desc_info *desc_info,
1363 void *txdesc)
1364 {
1365 struct rtw89_txwd_body *txwd_body = (struct rtw89_txwd_body *)txdesc;
1366 struct rtw89_txwd_info *txwd_info;
1367
1368 txwd_body->dword0 = rtw89_build_txwd_body0(desc_info);
1369 txwd_body->dword2 = rtw89_build_txwd_body2(desc_info);
1370 txwd_body->dword3 = rtw89_build_txwd_body3(desc_info);
1371
1372 if (!desc_info->en_wd_info)
1373 return;
1374
1375 txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1);
1376 txwd_info->dword0 = rtw89_build_txwd_info0(desc_info);
1377 txwd_info->dword1 = rtw89_build_txwd_info1(desc_info);
1378 txwd_info->dword2 = rtw89_build_txwd_info2(desc_info);
1379 txwd_info->dword4 = rtw89_build_txwd_info4(desc_info);
1380
1381 }
1382 EXPORT_SYMBOL(rtw89_core_fill_txdesc);
1383
rtw89_core_fill_txdesc_v1(struct rtw89_dev * rtwdev,struct rtw89_tx_desc_info * desc_info,void * txdesc)1384 void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev,
1385 struct rtw89_tx_desc_info *desc_info,
1386 void *txdesc)
1387 {
1388 struct rtw89_txwd_body_v1 *txwd_body = (struct rtw89_txwd_body_v1 *)txdesc;
1389 struct rtw89_txwd_info *txwd_info;
1390
1391 txwd_body->dword0 = rtw89_build_txwd_body0_v1(desc_info);
1392 txwd_body->dword1 = rtw89_build_txwd_body1_v1(desc_info);
1393 txwd_body->dword2 = rtw89_build_txwd_body2(desc_info);
1394 txwd_body->dword3 = rtw89_build_txwd_body3(desc_info);
1395 if (desc_info->sec_en) {
1396 txwd_body->dword4 = rtw89_build_txwd_body4(desc_info);
1397 txwd_body->dword5 = rtw89_build_txwd_body5(desc_info);
1398 }
1399 txwd_body->dword7 = rtw89_build_txwd_body7_v1(desc_info);
1400
1401 if (!desc_info->en_wd_info)
1402 return;
1403
1404 txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1);
1405 txwd_info->dword0 = rtw89_build_txwd_info0_v1(desc_info);
1406 txwd_info->dword1 = rtw89_build_txwd_info1(desc_info);
1407 txwd_info->dword2 = rtw89_build_txwd_info2_v1(desc_info);
1408 txwd_info->dword4 = rtw89_build_txwd_info4(desc_info);
1409 }
1410 EXPORT_SYMBOL(rtw89_core_fill_txdesc_v1);
1411
rtw89_build_txwd_body0_v2(struct rtw89_tx_desc_info * desc_info)1412 static __le32 rtw89_build_txwd_body0_v2(struct rtw89_tx_desc_info *desc_info)
1413 {
1414 u32 dword = FIELD_PREP(BE_TXD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) |
1415 FIELD_PREP(BE_TXD_BODY0_WDINFO_EN, desc_info->en_wd_info) |
1416 FIELD_PREP(BE_TXD_BODY0_CH_DMA, desc_info->ch_dma) |
1417 FIELD_PREP(BE_TXD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
1418 FIELD_PREP(BE_TXD_BODY0_WD_PAGE, desc_info->wd_page);
1419
1420 return cpu_to_le32(dword);
1421 }
1422
rtw89_build_txwd_body1_v2(struct rtw89_tx_desc_info * desc_info)1423 static __le32 rtw89_build_txwd_body1_v2(struct rtw89_tx_desc_info *desc_info)
1424 {
1425 u32 dword = FIELD_PREP(BE_TXD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) |
1426 FIELD_PREP(BE_TXD_BODY1_SEC_KEYID, desc_info->sec_keyid) |
1427 FIELD_PREP(BE_TXD_BODY1_SEC_TYPE, desc_info->sec_type);
1428
1429 return cpu_to_le32(dword);
1430 }
1431
rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info * desc_info)1432 static __le32 rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info *desc_info)
1433 {
1434 u32 dword = FIELD_PREP(BE_TXD_BODY2_TID_IND, desc_info->tid_indicate) |
1435 FIELD_PREP(BE_TXD_BODY2_QSEL, desc_info->qsel) |
1436 FIELD_PREP(BE_TXD_BODY2_TXPKTSIZE, desc_info->pkt_size) |
1437 FIELD_PREP(BE_TXD_BODY2_AGG_EN, desc_info->agg_en) |
1438 FIELD_PREP(BE_TXD_BODY2_BK, desc_info->bk) |
1439 FIELD_PREP(BE_TXD_BODY2_MACID, desc_info->mac_id);
1440
1441 return cpu_to_le32(dword);
1442 }
1443
rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info * desc_info)1444 static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info)
1445 {
1446 u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq) |
1447 FIELD_PREP(BE_TXD_BODY3_MLO_FLAG, desc_info->mlo) |
1448 FIELD_PREP(BE_TXD_BODY3_IS_MLD_SW_EN, desc_info->sw_mld);
1449
1450 return cpu_to_le32(dword);
1451 }
1452
rtw89_build_txwd_body4_v2(struct rtw89_tx_desc_info * desc_info)1453 static __le32 rtw89_build_txwd_body4_v2(struct rtw89_tx_desc_info *desc_info)
1454 {
1455 u32 dword = FIELD_PREP(BE_TXD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) |
1456 FIELD_PREP(BE_TXD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]);
1457
1458 return cpu_to_le32(dword);
1459 }
1460
rtw89_build_txwd_body5_v2(struct rtw89_tx_desc_info * desc_info)1461 static __le32 rtw89_build_txwd_body5_v2(struct rtw89_tx_desc_info *desc_info)
1462 {
1463 u32 dword = FIELD_PREP(BE_TXD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) |
1464 FIELD_PREP(BE_TXD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) |
1465 FIELD_PREP(BE_TXD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) |
1466 FIELD_PREP(BE_TXD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]);
1467
1468 return cpu_to_le32(dword);
1469 }
1470
rtw89_build_txwd_body6_v2(struct rtw89_tx_desc_info * desc_info)1471 static __le32 rtw89_build_txwd_body6_v2(struct rtw89_tx_desc_info *desc_info)
1472 {
1473 u32 dword = FIELD_PREP(BE_TXD_BODY6_UPD_WLAN_HDR, desc_info->upd_wlan_hdr);
1474
1475 return cpu_to_le32(dword);
1476 }
1477
rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info * desc_info)1478 static __le32 rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info *desc_info)
1479 {
1480 u32 dword = FIELD_PREP(BE_TXD_BODY7_USERATE_SEL, desc_info->use_rate) |
1481 FIELD_PREP(BE_TXD_BODY7_DATA_ER, desc_info->er_cap) |
1482 FIELD_PREP(BE_TXD_BODY7_DATA_BW_ER, 0) |
1483 FIELD_PREP(BE_TXD_BODY7_DATARATE, desc_info->data_rate);
1484
1485 return cpu_to_le32(dword);
1486 }
1487
rtw89_build_txwd_info0_v2(struct rtw89_tx_desc_info * desc_info)1488 static __le32 rtw89_build_txwd_info0_v2(struct rtw89_tx_desc_info *desc_info)
1489 {
1490 u32 dword = FIELD_PREP(BE_TXD_INFO0_DATA_STBC, desc_info->stbc) |
1491 FIELD_PREP(BE_TXD_INFO0_DATA_LDPC, desc_info->ldpc) |
1492 FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
1493 FIELD_PREP(BE_TXD_INFO0_MULTIPORT_ID, desc_info->port);
1494
1495 return cpu_to_le32(dword);
1496 }
1497
rtw89_build_txwd_info1_v2(struct rtw89_tx_desc_info * desc_info)1498 static __le32 rtw89_build_txwd_info1_v2(struct rtw89_tx_desc_info *desc_info)
1499 {
1500 u32 dword = FIELD_PREP(BE_TXD_INFO1_MAX_AGG_NUM, desc_info->ampdu_num) |
1501 FIELD_PREP(BE_TXD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) |
1502 FIELD_PREP(BE_TXD_INFO1_DATA_RTY_LOWEST_RATE,
1503 desc_info->data_retry_lowest_rate);
1504
1505 return cpu_to_le32(dword);
1506 }
1507
rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info * desc_info)1508 static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info)
1509 {
1510 u32 dword = FIELD_PREP(BE_TXD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
1511 FIELD_PREP(BE_TXD_INFO2_FORCE_KEY_EN, desc_info->sec_en) |
1512 FIELD_PREP(BE_TXD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx);
1513
1514 return cpu_to_le32(dword);
1515 }
1516
rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info * desc_info)1517 static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info)
1518 {
1519 bool rts_en = !desc_info->is_bmc;
1520 u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, rts_en) |
1521 FIELD_PREP(BE_TXD_INFO4_HW_RTS_EN, 1);
1522
1523 return cpu_to_le32(dword);
1524 }
1525
rtw89_core_fill_txdesc_v2(struct rtw89_dev * rtwdev,struct rtw89_tx_desc_info * desc_info,void * txdesc)1526 void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev,
1527 struct rtw89_tx_desc_info *desc_info,
1528 void *txdesc)
1529 {
1530 struct rtw89_txwd_body_v2 *txwd_body = txdesc;
1531 struct rtw89_txwd_info_v2 *txwd_info;
1532
1533 txwd_body->dword0 = rtw89_build_txwd_body0_v2(desc_info);
1534 txwd_body->dword1 = rtw89_build_txwd_body1_v2(desc_info);
1535 txwd_body->dword2 = rtw89_build_txwd_body2_v2(desc_info);
1536 txwd_body->dword3 = rtw89_build_txwd_body3_v2(desc_info);
1537 if (desc_info->sec_en) {
1538 txwd_body->dword4 = rtw89_build_txwd_body4_v2(desc_info);
1539 txwd_body->dword5 = rtw89_build_txwd_body5_v2(desc_info);
1540 }
1541 txwd_body->dword6 = rtw89_build_txwd_body6_v2(desc_info);
1542 txwd_body->dword7 = rtw89_build_txwd_body7_v2(desc_info);
1543
1544 if (!desc_info->en_wd_info)
1545 return;
1546
1547 txwd_info = (struct rtw89_txwd_info_v2 *)(txwd_body + 1);
1548 txwd_info->dword0 = rtw89_build_txwd_info0_v2(desc_info);
1549 txwd_info->dword1 = rtw89_build_txwd_info1_v2(desc_info);
1550 txwd_info->dword2 = rtw89_build_txwd_info2_v2(desc_info);
1551 txwd_info->dword4 = rtw89_build_txwd_info4_v2(desc_info);
1552 }
1553 EXPORT_SYMBOL(rtw89_core_fill_txdesc_v2);
1554
rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info * desc_info)1555 static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info)
1556 {
1557 u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) |
1558 FIELD_PREP(AX_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ?
1559 RTW89_CORE_RX_TYPE_FWDL :
1560 RTW89_CORE_RX_TYPE_H2C);
1561
1562 return cpu_to_le32(dword);
1563 }
1564
rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev * rtwdev,struct rtw89_tx_desc_info * desc_info,void * txdesc)1565 void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
1566 struct rtw89_tx_desc_info *desc_info,
1567 void *txdesc)
1568 {
1569 struct rtw89_rxdesc_short *txwd_v1 = (struct rtw89_rxdesc_short *)txdesc;
1570
1571 txwd_v1->dword0 = rtw89_build_txwd_fwcmd0_v1(desc_info);
1572 }
1573 EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v1);
1574
rtw89_build_txwd_fwcmd0_v2(struct rtw89_tx_desc_info * desc_info)1575 static __le32 rtw89_build_txwd_fwcmd0_v2(struct rtw89_tx_desc_info *desc_info)
1576 {
1577 u32 dword = FIELD_PREP(BE_RXD_RPKT_LEN_MASK, desc_info->pkt_size) |
1578 FIELD_PREP(BE_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ?
1579 RTW89_CORE_RX_TYPE_FWDL :
1580 RTW89_CORE_RX_TYPE_H2C);
1581
1582 return cpu_to_le32(dword);
1583 }
1584
rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev * rtwdev,struct rtw89_tx_desc_info * desc_info,void * txdesc)1585 void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev,
1586 struct rtw89_tx_desc_info *desc_info,
1587 void *txdesc)
1588 {
1589 struct rtw89_rxdesc_short_v2 *txwd_v2 = (struct rtw89_rxdesc_short_v2 *)txdesc;
1590
1591 txwd_v2->dword0 = rtw89_build_txwd_fwcmd0_v2(desc_info);
1592 }
1593 EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v2);
1594
rtw89_core_rx_process_mac_ppdu(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct rtw89_rx_phy_ppdu * phy_ppdu)1595 static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
1596 struct sk_buff *skb,
1597 struct rtw89_rx_phy_ppdu *phy_ppdu)
1598 {
1599 const struct rtw89_chip_info *chip = rtwdev->chip;
1600 const struct rtw89_rxinfo *rxinfo = (const struct rtw89_rxinfo *)skb->data;
1601 const struct rtw89_rxinfo_user *user;
1602 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
1603 int rx_cnt_size = RTW89_PPDU_MAC_RX_CNT_SIZE;
1604 bool rx_cnt_valid = false;
1605 bool invalid = false;
1606 u8 plcp_size = 0;
1607 u8 *phy_sts;
1608 u8 usr_num;
1609 int i;
1610
1611 if (chip_gen == RTW89_CHIP_BE) {
1612 invalid = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_INVALID_V1);
1613 rx_cnt_size = RTW89_PPDU_MAC_RX_CNT_SIZE_V1;
1614 }
1615
1616 if (invalid)
1617 return -EINVAL;
1618
1619 rx_cnt_valid = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_RX_CNT_VLD);
1620 if (chip_gen == RTW89_CHIP_BE) {
1621 plcp_size = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_PLCP_LEN_V1) << 3;
1622 usr_num = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_USR_NUM_V1);
1623 } else {
1624 plcp_size = le32_get_bits(rxinfo->w1, RTW89_RXINFO_W1_PLCP_LEN) << 3;
1625 usr_num = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_USR_NUM);
1626 }
1627 if (usr_num > chip->ppdu_max_usr) {
1628 rtw89_warn(rtwdev, "Invalid user number (%d) in mac info\n",
1629 usr_num);
1630 return -EINVAL;
1631 }
1632
1633 for (i = 0; i < usr_num; i++) {
1634 user = &rxinfo->user[i];
1635 if (!le32_get_bits(user->w0, RTW89_RXINFO_USER_MAC_ID_VALID))
1636 continue;
1637 /* For WiFi 7 chips, RXWD.mac_id of PPDU status is not set
1638 * by hardware, so update mac_id by rxinfo_user[].mac_id.
1639 */
1640 if (chip_gen == RTW89_CHIP_BE)
1641 phy_ppdu->mac_id =
1642 le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID);
1643 phy_ppdu->has_data =
1644 le32_get_bits(user->w0, RTW89_RXINFO_USER_DATA);
1645 phy_ppdu->has_bcn =
1646 le32_get_bits(user->w0, RTW89_RXINFO_USER_BCN);
1647 break;
1648 }
1649
1650 phy_sts = skb->data + RTW89_PPDU_MAC_INFO_SIZE;
1651 phy_sts += usr_num * RTW89_PPDU_MAC_INFO_USR_SIZE;
1652 /* 8-byte alignment */
1653 if (usr_num & BIT(0))
1654 phy_sts += RTW89_PPDU_MAC_INFO_USR_SIZE;
1655 if (rx_cnt_valid)
1656 phy_sts += rx_cnt_size;
1657 phy_sts += plcp_size;
1658
1659 if (phy_sts > skb->data + skb->len)
1660 return -EINVAL;
1661
1662 phy_ppdu->buf = phy_sts;
1663 phy_ppdu->len = skb->data + skb->len - phy_sts;
1664
1665 return 0;
1666 }
1667
rtw89_get_data_rate_nss(struct rtw89_dev * rtwdev,u16 data_rate)1668 static u8 rtw89_get_data_rate_nss(struct rtw89_dev *rtwdev, u16 data_rate)
1669 {
1670 u8 data_rate_mode;
1671
1672 data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate);
1673 switch (data_rate_mode) {
1674 case DATA_RATE_MODE_NON_HT:
1675 return 1;
1676 case DATA_RATE_MODE_HT:
1677 return rtw89_get_data_ht_nss(rtwdev, data_rate) + 1;
1678 case DATA_RATE_MODE_VHT:
1679 case DATA_RATE_MODE_HE:
1680 case DATA_RATE_MODE_EHT:
1681 return rtw89_get_data_nss(rtwdev, data_rate) + 1;
1682 default:
1683 rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
1684 return 0;
1685 }
1686 }
1687
rtw89_core_rx_process_phy_ppdu_iter(void * data,struct ieee80211_sta * sta)1688 static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
1689 struct ieee80211_sta *sta)
1690 {
1691 struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data;
1692 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
1693 struct rtw89_dev *rtwdev = rtwsta->rtwdev;
1694 struct rtw89_hal *hal = &rtwdev->hal;
1695 struct rtw89_sta_link *rtwsta_link;
1696 u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
1697 u8 ant_pos = U8_MAX;
1698 u8 evm_pos = 0;
1699 int i;
1700
1701 rtwsta_link = rtw89_sta_get_link_inst(rtwsta, phy_ppdu->phy_idx);
1702 if (unlikely(!rtwsta_link))
1703 return;
1704
1705 if (rtwsta_link->mac_id != phy_ppdu->mac_id || !phy_ppdu->to_self)
1706 return;
1707
1708 if (hal->ant_diversity && hal->antenna_rx) {
1709 ant_pos = __ffs(hal->antenna_rx);
1710 evm_pos = ant_pos;
1711 }
1712
1713 ewma_rssi_add(&rtwsta_link->avg_rssi, phy_ppdu->rssi_avg);
1714
1715 if (ant_pos < ant_num) {
1716 ewma_rssi_add(&rtwsta_link->rssi[ant_pos], phy_ppdu->rssi[0]);
1717 } else {
1718 for (i = 0; i < rtwdev->chip->rf_path_num; i++)
1719 ewma_rssi_add(&rtwsta_link->rssi[i], phy_ppdu->rssi[i]);
1720 }
1721
1722 if (phy_ppdu->ofdm.has && (phy_ppdu->has_data || phy_ppdu->has_bcn)) {
1723 ewma_snr_add(&rtwsta_link->avg_snr, phy_ppdu->ofdm.avg_snr);
1724 if (rtw89_get_data_rate_nss(rtwdev, phy_ppdu->rate) == 1) {
1725 ewma_evm_add(&rtwsta_link->evm_1ss, phy_ppdu->ofdm.evm_min);
1726 } else {
1727 ewma_evm_add(&rtwsta_link->evm_min[evm_pos],
1728 phy_ppdu->ofdm.evm_min);
1729 ewma_evm_add(&rtwsta_link->evm_max[evm_pos],
1730 phy_ppdu->ofdm.evm_max);
1731 }
1732 }
1733 }
1734
1735 #define VAR_LEN 0xff
1736 #define VAR_LEN_UNIT 8
rtw89_core_get_phy_status_ie_len(struct rtw89_dev * rtwdev,const struct rtw89_phy_sts_iehdr * iehdr)1737 static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev,
1738 const struct rtw89_phy_sts_iehdr *iehdr)
1739 {
1740 static const u8 physts_ie_len_tabs[RTW89_CHIP_GEN_NUM][32] = {
1741 [RTW89_CHIP_AX] = {
1742 16, 32, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN,
1743 VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN,
1744 VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32
1745 },
1746 [RTW89_CHIP_BE] = {
1747 32, 40, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN,
1748 VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 88, 56, VAR_LEN,
1749 VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32
1750 },
1751 };
1752 const u8 *physts_ie_len_tab;
1753 u16 ie_len;
1754 u8 ie;
1755
1756 physts_ie_len_tab = physts_ie_len_tabs[rtwdev->chip->chip_gen];
1757
1758 ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE);
1759 if (physts_ie_len_tab[ie] != VAR_LEN)
1760 ie_len = physts_ie_len_tab[ie];
1761 else
1762 ie_len = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_LEN) * VAR_LEN_UNIT;
1763
1764 return ie_len;
1765 }
1766
rtw89_core_parse_phy_status_ie01_v2(struct rtw89_dev * rtwdev,const struct rtw89_phy_sts_iehdr * iehdr,struct rtw89_rx_phy_ppdu * phy_ppdu)1767 static void rtw89_core_parse_phy_status_ie01_v2(struct rtw89_dev *rtwdev,
1768 const struct rtw89_phy_sts_iehdr *iehdr,
1769 struct rtw89_rx_phy_ppdu *phy_ppdu)
1770 {
1771 const struct rtw89_phy_sts_ie01_v2 *ie;
1772 u8 *rpl_fd = phy_ppdu->rpl_fd;
1773
1774 ie = (const struct rtw89_phy_sts_ie01_v2 *)iehdr;
1775 rpl_fd[RF_PATH_A] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A);
1776 rpl_fd[RF_PATH_B] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B);
1777 rpl_fd[RF_PATH_C] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C);
1778 rpl_fd[RF_PATH_D] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D);
1779
1780 phy_ppdu->bw_idx = le32_get_bits(ie->w5, RTW89_PHY_STS_IE01_V2_W5_BW_IDX);
1781 }
1782
rtw89_core_parse_phy_status_ie01(struct rtw89_dev * rtwdev,const struct rtw89_phy_sts_iehdr * iehdr,struct rtw89_rx_phy_ppdu * phy_ppdu)1783 static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
1784 const struct rtw89_phy_sts_iehdr *iehdr,
1785 struct rtw89_rx_phy_ppdu *phy_ppdu)
1786 {
1787 const struct rtw89_phy_sts_ie01 *ie = (const struct rtw89_phy_sts_ie01 *)iehdr;
1788 s16 cfo;
1789 u32 t;
1790
1791 phy_ppdu->chan_idx = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_CH_IDX);
1792
1793 if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) {
1794 phy_ppdu->ldpc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_LDPC);
1795 phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC);
1796 }
1797
1798 if (!phy_ppdu->hdr_2_en)
1799 phy_ppdu->rx_path_en =
1800 le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RX_PATH_EN);
1801
1802 if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
1803 return;
1804
1805 if (!phy_ppdu->to_self)
1806 return;
1807
1808 phy_ppdu->rpl_avg = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD);
1809 phy_ppdu->ofdm.avg_snr = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_AVG_SNR);
1810 phy_ppdu->ofdm.evm_max = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MAX);
1811 phy_ppdu->ofdm.evm_min = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MIN);
1812 phy_ppdu->ofdm.has = true;
1813
1814 /* sign conversion for S(12,2) */
1815 if (rtwdev->chip->cfo_src_fd) {
1816 t = le32_get_bits(ie->w1, RTW89_PHY_STS_IE01_W1_FD_CFO);
1817 cfo = sign_extend32(t, 11);
1818 } else {
1819 t = le32_get_bits(ie->w1, RTW89_PHY_STS_IE01_W1_PREMB_CFO);
1820 cfo = sign_extend32(t, 11);
1821 }
1822
1823 rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
1824
1825 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
1826 rtw89_core_parse_phy_status_ie01_v2(rtwdev, iehdr, phy_ppdu);
1827 }
1828
rtw89_core_parse_phy_status_ie00(struct rtw89_dev * rtwdev,const struct rtw89_phy_sts_iehdr * iehdr,struct rtw89_rx_phy_ppdu * phy_ppdu)1829 static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev,
1830 const struct rtw89_phy_sts_iehdr *iehdr,
1831 struct rtw89_rx_phy_ppdu *phy_ppdu)
1832 {
1833 const struct rtw89_phy_sts_ie00 *ie = (const struct rtw89_phy_sts_ie00 *)iehdr;
1834 u16 tmp_rpl;
1835
1836 tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL);
1837 phy_ppdu->rpl_avg = tmp_rpl >> 1;
1838 }
1839
rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev * rtwdev,const struct rtw89_phy_sts_iehdr * iehdr,struct rtw89_rx_phy_ppdu * phy_ppdu)1840 static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev,
1841 const struct rtw89_phy_sts_iehdr *iehdr,
1842 struct rtw89_rx_phy_ppdu *phy_ppdu)
1843 {
1844 const struct rtw89_phy_sts_ie00_v2 *ie;
1845 u8 *rpl_path = phy_ppdu->rpl_path;
1846 u16 tmp_rpl[RF_PATH_MAX];
1847 u8 i;
1848
1849 ie = (const struct rtw89_phy_sts_ie00_v2 *)iehdr;
1850 tmp_rpl[RF_PATH_A] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A);
1851 tmp_rpl[RF_PATH_B] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B);
1852 tmp_rpl[RF_PATH_C] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C);
1853 tmp_rpl[RF_PATH_D] = le32_get_bits(ie->w5, RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D);
1854
1855 for (i = 0; i < RF_PATH_MAX; i++)
1856 rpl_path[i] = tmp_rpl[i] >> 1;
1857 }
1858
rtw89_core_process_phy_status_ie(struct rtw89_dev * rtwdev,const struct rtw89_phy_sts_iehdr * iehdr,struct rtw89_rx_phy_ppdu * phy_ppdu)1859 static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
1860 const struct rtw89_phy_sts_iehdr *iehdr,
1861 struct rtw89_rx_phy_ppdu *phy_ppdu)
1862 {
1863 u8 ie;
1864
1865 ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE);
1866
1867 switch (ie) {
1868 case RTW89_PHYSTS_IE00_CMN_CCK:
1869 rtw89_core_parse_phy_status_ie00(rtwdev, iehdr, phy_ppdu);
1870 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
1871 rtw89_core_parse_phy_status_ie00_v2(rtwdev, iehdr, phy_ppdu);
1872 break;
1873 case RTW89_PHYSTS_IE01_CMN_OFDM:
1874 rtw89_core_parse_phy_status_ie01(rtwdev, iehdr, phy_ppdu);
1875 break;
1876 default:
1877 break;
1878 }
1879
1880 return 0;
1881 }
1882
rtw89_core_update_phy_ppdu_hdr_v2(struct rtw89_rx_phy_ppdu * phy_ppdu)1883 static void rtw89_core_update_phy_ppdu_hdr_v2(struct rtw89_rx_phy_ppdu *phy_ppdu)
1884 {
1885 const struct rtw89_phy_sts_hdr_v2 *hdr = phy_ppdu->buf + PHY_STS_HDR_LEN;
1886
1887 phy_ppdu->rx_path_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_V2_W0_PATH_EN);
1888 }
1889
rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu * phy_ppdu)1890 static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
1891 {
1892 const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf;
1893 u8 *rssi = phy_ppdu->rssi;
1894
1895 phy_ppdu->ie = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_IE_MAP);
1896 phy_ppdu->rssi_avg = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_RSSI_AVG);
1897 rssi[RF_PATH_A] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_A);
1898 rssi[RF_PATH_B] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_B);
1899 rssi[RF_PATH_C] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_C);
1900 rssi[RF_PATH_D] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_D);
1901
1902 phy_ppdu->hdr_2_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_HDR_2_EN);
1903 if (phy_ppdu->hdr_2_en)
1904 rtw89_core_update_phy_ppdu_hdr_v2(phy_ppdu);
1905 }
1906
rtw89_core_rx_process_phy_ppdu(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)1907 static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
1908 struct rtw89_rx_phy_ppdu *phy_ppdu)
1909 {
1910 const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf;
1911 u32 len_from_header;
1912 bool physts_valid;
1913
1914 physts_valid = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_VALID);
1915 if (!physts_valid)
1916 return -EINVAL;
1917
1918 len_from_header = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_LEN) << 3;
1919
1920 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
1921 len_from_header += PHY_STS_HDR_LEN;
1922
1923 if (len_from_header != phy_ppdu->len) {
1924 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "phy ppdu len mismatch\n");
1925 return -EINVAL;
1926 }
1927 rtw89_core_update_phy_ppdu(phy_ppdu);
1928
1929 return 0;
1930 }
1931
rtw89_core_rx_parse_phy_sts(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)1932 static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
1933 struct rtw89_rx_phy_ppdu *phy_ppdu)
1934 {
1935 u16 ie_len;
1936 void *pos, *end;
1937
1938 /* mark invalid reports and bypass them */
1939 if (phy_ppdu->ie < RTW89_CCK_PKT)
1940 return -EINVAL;
1941
1942 pos = phy_ppdu->buf + PHY_STS_HDR_LEN;
1943 if (phy_ppdu->hdr_2_en)
1944 pos += PHY_STS_HDR_LEN;
1945 end = phy_ppdu->buf + phy_ppdu->len;
1946 while (pos < end) {
1947 const struct rtw89_phy_sts_iehdr *iehdr = pos;
1948
1949 ie_len = rtw89_core_get_phy_status_ie_len(rtwdev, iehdr);
1950 rtw89_core_process_phy_status_ie(rtwdev, iehdr, phy_ppdu);
1951 pos += ie_len;
1952 if (pos > end || ie_len == 0) {
1953 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
1954 "phy status parse failed\n");
1955 return -EINVAL;
1956 }
1957 }
1958
1959 rtw89_chip_convert_rpl_to_rssi(rtwdev, phy_ppdu);
1960 rtw89_phy_antdiv_parse(rtwdev, phy_ppdu);
1961
1962 return 0;
1963 }
1964
rtw89_core_rx_process_phy_sts(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)1965 static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev,
1966 struct rtw89_rx_phy_ppdu *phy_ppdu)
1967 {
1968 int ret;
1969
1970 ret = rtw89_core_rx_parse_phy_sts(rtwdev, phy_ppdu);
1971 if (ret)
1972 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "parse phy sts failed\n");
1973 else
1974 phy_ppdu->valid = true;
1975
1976 ieee80211_iterate_stations_atomic(rtwdev->hw,
1977 rtw89_core_rx_process_phy_ppdu_iter,
1978 phy_ppdu);
1979 }
1980
rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev * rtwdev,u8 desc_info_gi,bool rx_status)1981 static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev,
1982 u8 desc_info_gi,
1983 bool rx_status)
1984 {
1985 switch (desc_info_gi) {
1986 case RTW89_GILTF_SGI_4XHE08:
1987 case RTW89_GILTF_2XHE08:
1988 case RTW89_GILTF_1XHE08:
1989 return NL80211_RATE_INFO_HE_GI_0_8;
1990 case RTW89_GILTF_2XHE16:
1991 case RTW89_GILTF_1XHE16:
1992 return NL80211_RATE_INFO_HE_GI_1_6;
1993 case RTW89_GILTF_LGI_4XHE32:
1994 return NL80211_RATE_INFO_HE_GI_3_2;
1995 default:
1996 rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info_gi);
1997 if (rx_status)
1998 return NL80211_RATE_INFO_HE_GI_3_2;
1999 return U8_MAX;
2000 }
2001 }
2002
rtw89_rxdesc_to_nl_eht_gi(struct rtw89_dev * rtwdev,u8 desc_info_gi,bool rx_status)2003 static u8 rtw89_rxdesc_to_nl_eht_gi(struct rtw89_dev *rtwdev,
2004 u8 desc_info_gi,
2005 bool rx_status)
2006 {
2007 switch (desc_info_gi) {
2008 case RTW89_GILTF_SGI_4XHE08:
2009 case RTW89_GILTF_2XHE08:
2010 case RTW89_GILTF_1XHE08:
2011 return NL80211_RATE_INFO_EHT_GI_0_8;
2012 case RTW89_GILTF_2XHE16:
2013 case RTW89_GILTF_1XHE16:
2014 return NL80211_RATE_INFO_EHT_GI_1_6;
2015 case RTW89_GILTF_LGI_4XHE32:
2016 return NL80211_RATE_INFO_EHT_GI_3_2;
2017 default:
2018 rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info_gi);
2019 if (rx_status)
2020 return NL80211_RATE_INFO_EHT_GI_3_2;
2021 return U8_MAX;
2022 }
2023 }
2024
rtw89_rxdesc_to_nl_he_eht_gi(struct rtw89_dev * rtwdev,u8 desc_info_gi,bool rx_status,bool eht)2025 static u8 rtw89_rxdesc_to_nl_he_eht_gi(struct rtw89_dev *rtwdev,
2026 u8 desc_info_gi,
2027 bool rx_status, bool eht)
2028 {
2029 return eht ? rtw89_rxdesc_to_nl_eht_gi(rtwdev, desc_info_gi, rx_status) :
2030 rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info_gi, rx_status);
2031 }
2032
2033 static
rtw89_check_rx_statu_gi_match(struct ieee80211_rx_status * status,u8 gi_ltf,bool eht)2034 bool rtw89_check_rx_statu_gi_match(struct ieee80211_rx_status *status, u8 gi_ltf,
2035 bool eht)
2036 {
2037 if (eht)
2038 return status->eht.gi == gi_ltf;
2039
2040 return status->he_gi == gi_ltf;
2041 }
2042
rtw89_core_rx_ppdu_match(struct rtw89_dev * rtwdev,struct rtw89_rx_desc_info * desc_info,struct ieee80211_rx_status * status)2043 static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev,
2044 struct rtw89_rx_desc_info *desc_info,
2045 struct ieee80211_rx_status *status)
2046 {
2047 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
2048 u8 data_rate_mode, bw, rate_idx = MASKBYTE0, gi_ltf;
2049 bool eht = false;
2050 u16 data_rate;
2051 bool ret;
2052
2053 data_rate = desc_info->data_rate;
2054 data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate);
2055 if (data_rate_mode == DATA_RATE_MODE_NON_HT) {
2056 rate_idx = rtw89_get_data_not_ht_idx(rtwdev, data_rate);
2057 /* rate_idx is still hardware value here */
2058 } else if (data_rate_mode == DATA_RATE_MODE_HT) {
2059 rate_idx = rtw89_get_data_ht_mcs(rtwdev, data_rate);
2060 } else if (data_rate_mode == DATA_RATE_MODE_VHT ||
2061 data_rate_mode == DATA_RATE_MODE_HE ||
2062 data_rate_mode == DATA_RATE_MODE_EHT) {
2063 rate_idx = rtw89_get_data_mcs(rtwdev, data_rate);
2064 } else {
2065 rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
2066 }
2067
2068 eht = data_rate_mode == DATA_RATE_MODE_EHT;
2069 bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
2070 gi_ltf = rtw89_rxdesc_to_nl_he_eht_gi(rtwdev, desc_info->gi_ltf, false, eht);
2071 ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt &&
2072 status->rate_idx == rate_idx &&
2073 rtw89_check_rx_statu_gi_match(status, gi_ltf, eht) &&
2074 status->bw == bw;
2075
2076 return ret;
2077 }
2078
2079 struct rtw89_vif_rx_stats_iter_data {
2080 struct rtw89_dev *rtwdev;
2081 struct rtw89_rx_phy_ppdu *phy_ppdu;
2082 struct rtw89_rx_desc_info *desc_info;
2083 struct sk_buff *skb;
2084 const u8 *bssid;
2085 };
2086
rtw89_stats_trigger_frame(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_bss_conf * bss_conf,struct sk_buff * skb)2087 static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
2088 struct rtw89_vif_link *rtwvif_link,
2089 struct ieee80211_bss_conf *bss_conf,
2090 struct sk_buff *skb)
2091 {
2092 struct ieee80211_trigger *tf = (struct ieee80211_trigger *)skb->data;
2093 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
2094 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
2095 u8 *pos, *end, type, tf_bw;
2096 u16 aid, tf_rua;
2097
2098 if (!ether_addr_equal(bss_conf->bssid, tf->ta) ||
2099 rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION ||
2100 rtwvif_link->net_type == RTW89_NET_TYPE_NO_LINK)
2101 return;
2102
2103 type = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_TYPE_MASK);
2104 if (type != IEEE80211_TRIGGER_TYPE_BASIC && type != IEEE80211_TRIGGER_TYPE_MU_BAR)
2105 return;
2106
2107 end = (u8 *)tf + skb->len;
2108 pos = tf->variable;
2109
2110 while (end - pos >= RTW89_TF_BASIC_USER_INFO_SZ) {
2111 aid = RTW89_GET_TF_USER_INFO_AID12(pos);
2112 tf_rua = RTW89_GET_TF_USER_INFO_RUA(pos);
2113 tf_bw = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_ULBW_MASK);
2114 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2115 "[TF] aid: %d, ul_mcs: %d, rua: %d, bw: %d\n",
2116 aid, RTW89_GET_TF_USER_INFO_UL_MCS(pos),
2117 tf_rua, tf_bw);
2118
2119 if (aid == RTW89_TF_PAD)
2120 break;
2121
2122 if (aid == vif->cfg.aid) {
2123 enum nl80211_he_ru_alloc rua;
2124
2125 rtwvif->stats.rx_tf_acc++;
2126 rtwdev->stats.rx_tf_acc++;
2127
2128 /* The following only required for HE trigger frame, but we
2129 * cannot use UL HE-SIG-A2 reserved subfield to identify it
2130 * since some 11ax APs will fill it with all 0s, which will
2131 * be misunderstood as EHT trigger frame.
2132 */
2133 if (bss_conf->eht_support)
2134 break;
2135
2136 rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1);
2137
2138 if (tf_bw == IEEE80211_TRIGGER_ULBW_160_80P80MHZ &&
2139 rua <= NL80211_RATE_INFO_HE_RU_ALLOC_106)
2140 rtwvif_link->pwr_diff_en = true;
2141 break;
2142 }
2143
2144 pos += RTW89_TF_BASIC_USER_INFO_SZ;
2145 }
2146 }
2147
rtw89_cancel_6ghz_probe_work(struct wiphy * wiphy,struct wiphy_work * work)2148 static void rtw89_cancel_6ghz_probe_work(struct wiphy *wiphy, struct wiphy_work *work)
2149 {
2150 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
2151 cancel_6ghz_probe_work);
2152 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
2153 struct rtw89_pktofld_info *info;
2154
2155 lockdep_assert_wiphy(wiphy);
2156
2157 if (!rtwdev->scanning)
2158 return;
2159
2160 list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
2161 if (!info->cancel || !test_bit(info->id, rtwdev->pkt_offload))
2162 continue;
2163
2164 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
2165
2166 /* Don't delete/free info from pkt_list at this moment. Let it
2167 * be deleted/freed in rtw89_release_pkt_list() after scanning,
2168 * since if during scanning, pkt_list is accessed in bottom half.
2169 */
2170 }
2171 }
2172
rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev * rtwdev,struct sk_buff * skb)2173 static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
2174 struct sk_buff *skb)
2175 {
2176 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
2177 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
2178 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
2179 struct rtw89_pktofld_info *info;
2180 const u8 *ies = mgmt->u.beacon.variable, *ssid_ie;
2181 bool queue_work = false;
2182
2183 if (rx_status->band != NL80211_BAND_6GHZ)
2184 return;
2185
2186 if (unlikely(!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))) {
2187 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rx on unsupported 6 GHz\n");
2188 return;
2189 }
2190
2191 ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, ies, skb->len);
2192
2193 list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
2194 if (ether_addr_equal(info->bssid, mgmt->bssid)) {
2195 info->cancel = true;
2196 queue_work = true;
2197 continue;
2198 }
2199
2200 if (!ssid_ie || ssid_ie[1] != info->ssid_len || info->ssid_len == 0)
2201 continue;
2202
2203 if (memcmp(&ssid_ie[2], info->ssid, info->ssid_len) == 0) {
2204 info->cancel = true;
2205 queue_work = true;
2206 }
2207 }
2208
2209 if (queue_work)
2210 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->cancel_6ghz_probe_work);
2211 }
2212
rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link * rtwvif_link,struct ieee80211_hdr * hdr,size_t len)2213 static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link,
2214 struct ieee80211_hdr *hdr, size_t len)
2215 {
2216 struct ieee80211_mgmt *mgmt = (typeof(mgmt))hdr;
2217
2218 if (len < offsetof(typeof(*mgmt), u.beacon.variable))
2219 return;
2220
2221 WRITE_ONCE(rtwvif_link->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
2222 }
2223
rtw89_vif_rx_stats_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2224 static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
2225 struct ieee80211_vif *vif)
2226 {
2227 struct rtw89_vif_rx_stats_iter_data *iter_data = data;
2228 struct rtw89_dev *rtwdev = iter_data->rtwdev;
2229 struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
2230 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
2231 struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
2232 struct sk_buff *skb = iter_data->skb;
2233 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
2234 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2235 struct rtw89_rx_phy_ppdu *phy_ppdu = iter_data->phy_ppdu;
2236 bool is_mld = ieee80211_vif_is_mld(vif);
2237 struct ieee80211_bss_conf *bss_conf;
2238 struct rtw89_vif_link *rtwvif_link;
2239 const u8 *bssid = iter_data->bssid;
2240
2241 if (rtwdev->scanning &&
2242 (ieee80211_is_beacon(hdr->frame_control) ||
2243 ieee80211_is_probe_resp(hdr->frame_control)))
2244 rtw89_core_cancel_6ghz_probe_tx(rtwdev, skb);
2245
2246 rcu_read_lock();
2247
2248 rtwvif_link = rtw89_vif_get_link_inst(rtwvif, desc_info->bb_sel);
2249 if (unlikely(!rtwvif_link))
2250 goto out;
2251
2252 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
2253 if (!bss_conf->bssid)
2254 goto out;
2255
2256 if (ieee80211_is_trigger(hdr->frame_control)) {
2257 rtw89_stats_trigger_frame(rtwdev, rtwvif_link, bss_conf, skb);
2258 goto out;
2259 }
2260
2261 if (!ether_addr_equal(bss_conf->bssid, bssid))
2262 goto out;
2263
2264 if (is_mld) {
2265 rx_status->link_valid = true;
2266 rx_status->link_id = rtwvif_link->link_id;
2267 }
2268
2269 if (ieee80211_is_beacon(hdr->frame_control)) {
2270 if (vif->type == NL80211_IFTYPE_STATION &&
2271 !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) {
2272 rtw89_vif_sync_bcn_tsf(rtwvif_link, hdr, skb->len);
2273 rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
2274 }
2275 pkt_stat->beacon_nr++;
2276
2277 if (phy_ppdu) {
2278 ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
2279 if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
2280 rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx;
2281 }
2282
2283 pkt_stat->beacon_rate = desc_info->data_rate;
2284 }
2285
2286 if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
2287 goto out;
2288
2289 if (desc_info->data_rate < RTW89_HW_RATE_NR)
2290 pkt_stat->rx_rate_cnt[desc_info->data_rate]++;
2291
2292 rtw89_traffic_stats_accu(rtwdev, rtwvif, skb, false, false);
2293
2294 out:
2295 rcu_read_unlock();
2296 }
2297
rtw89_core_rx_stats(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu,struct rtw89_rx_desc_info * desc_info,struct sk_buff * skb)2298 static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev,
2299 struct rtw89_rx_phy_ppdu *phy_ppdu,
2300 struct rtw89_rx_desc_info *desc_info,
2301 struct sk_buff *skb)
2302 {
2303 struct rtw89_vif_rx_stats_iter_data iter_data;
2304
2305 rtw89_traffic_stats_accu(rtwdev, NULL, skb, true, false);
2306
2307 iter_data.rtwdev = rtwdev;
2308 iter_data.phy_ppdu = phy_ppdu;
2309 iter_data.desc_info = desc_info;
2310 iter_data.skb = skb;
2311 iter_data.bssid = get_hdr_bssid((struct ieee80211_hdr *)skb->data);
2312 rtw89_iterate_vifs_bh(rtwdev, rtw89_vif_rx_stats_iter, &iter_data);
2313 }
2314
rtw89_correct_cck_chan(struct rtw89_dev * rtwdev,struct ieee80211_rx_status * status)2315 static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev,
2316 struct ieee80211_rx_status *status)
2317 {
2318 const struct rtw89_chan_rcd *rcd =
2319 rtw89_chan_rcd_get(rtwdev, RTW89_CHANCTX_0);
2320 u16 chan = rcd->prev_primary_channel;
2321 u8 band = rtw89_hw_to_nl80211_band(rcd->prev_band_type);
2322
2323 if (status->band != NL80211_BAND_2GHZ &&
2324 status->encoding == RX_ENC_LEGACY &&
2325 status->rate_idx < RTW89_HW_RATE_OFDM6) {
2326 status->freq = ieee80211_channel_to_frequency(chan, band);
2327 status->band = band;
2328 }
2329 }
2330
rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status * rx_status)2331 static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
2332 {
2333 if (rx_status->band == NL80211_BAND_2GHZ ||
2334 rx_status->encoding != RX_ENC_LEGACY)
2335 return;
2336
2337 /* Some control frames' freq(ACKs in this case) are reported wrong due
2338 * to FW notify timing, set to lowest rate to prevent overflow.
2339 */
2340 if (rx_status->rate_idx < RTW89_HW_RATE_OFDM6) {
2341 rx_status->rate_idx = 0;
2342 return;
2343 }
2344
2345 /* No 4 CCK rates for non-2G */
2346 rx_status->rate_idx -= 4;
2347 }
2348
2349 static
rtw89_core_update_rx_status_by_ppdu(struct rtw89_dev * rtwdev,struct ieee80211_rx_status * rx_status,struct rtw89_rx_phy_ppdu * phy_ppdu)2350 void rtw89_core_update_rx_status_by_ppdu(struct rtw89_dev *rtwdev,
2351 struct ieee80211_rx_status *rx_status,
2352 struct rtw89_rx_phy_ppdu *phy_ppdu)
2353 {
2354 if (!(rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR))
2355 return;
2356
2357 if (!phy_ppdu)
2358 return;
2359
2360 if (phy_ppdu->ldpc)
2361 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2362 if (phy_ppdu->stbc)
2363 rx_status->enc_flags |= u8_encode_bits(1, RX_ENC_FLAG_STBC_MASK);
2364 }
2365
2366 static const u8 rx_status_bw_to_radiotap_eht_usig[] = {
2367 [RATE_INFO_BW_20] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ,
2368 [RATE_INFO_BW_5] = U8_MAX,
2369 [RATE_INFO_BW_10] = U8_MAX,
2370 [RATE_INFO_BW_40] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ,
2371 [RATE_INFO_BW_80] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ,
2372 [RATE_INFO_BW_160] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ,
2373 [RATE_INFO_BW_HE_RU] = U8_MAX,
2374 [RATE_INFO_BW_320] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1,
2375 [RATE_INFO_BW_EHT_RU] = U8_MAX,
2376 };
2377
rtw89_core_update_radiotap_eht(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct ieee80211_rx_status * rx_status)2378 static void rtw89_core_update_radiotap_eht(struct rtw89_dev *rtwdev,
2379 struct sk_buff *skb,
2380 struct ieee80211_rx_status *rx_status)
2381 {
2382 struct ieee80211_radiotap_eht_usig *usig;
2383 struct ieee80211_radiotap_eht *eht;
2384 struct ieee80211_radiotap_tlv *tlv;
2385 int eht_len = struct_size(eht, user_info, 1);
2386 int usig_len = sizeof(*usig);
2387 int len;
2388 u8 bw;
2389
2390 len = sizeof(*tlv) + ALIGN(eht_len, 4) +
2391 sizeof(*tlv) + ALIGN(usig_len, 4);
2392
2393 rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
2394 skb_reset_mac_header(skb);
2395
2396 /* EHT */
2397 tlv = skb_push(skb, len);
2398 memset(tlv, 0, len);
2399 tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT);
2400 tlv->len = cpu_to_le16(eht_len);
2401
2402 eht = (struct ieee80211_radiotap_eht *)tlv->data;
2403 eht->known = cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI);
2404 eht->data[0] =
2405 le32_encode_bits(rx_status->eht.gi, IEEE80211_RADIOTAP_EHT_DATA0_GI);
2406
2407 eht->user_info[0] =
2408 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
2409 IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
2410 IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN);
2411 eht->user_info[0] |=
2412 le32_encode_bits(rx_status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
2413 le32_encode_bits(rx_status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O);
2414 if (rx_status->enc_flags & RX_ENC_FLAG_LDPC)
2415 eht->user_info[0] |=
2416 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
2417
2418 /* U-SIG */
2419 tlv = (void *)tlv + sizeof(*tlv) + ALIGN(eht_len, 4);
2420 tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG);
2421 tlv->len = cpu_to_le16(usig_len);
2422
2423 if (rx_status->bw >= ARRAY_SIZE(rx_status_bw_to_radiotap_eht_usig))
2424 return;
2425
2426 bw = rx_status_bw_to_radiotap_eht_usig[rx_status->bw];
2427 if (bw == U8_MAX)
2428 return;
2429
2430 usig = (struct ieee80211_radiotap_eht_usig *)tlv->data;
2431 usig->common =
2432 le32_encode_bits(1, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN) |
2433 le32_encode_bits(bw, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW);
2434 }
2435
rtw89_core_update_radiotap(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct ieee80211_rx_status * rx_status)2436 static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev,
2437 struct sk_buff *skb,
2438 struct ieee80211_rx_status *rx_status)
2439 {
2440 static const struct ieee80211_radiotap_he known_he = {
2441 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2442 IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
2443 IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
2444 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2445 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2446 };
2447 struct ieee80211_radiotap_he *he;
2448
2449 if (!(rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR))
2450 return;
2451
2452 if (rx_status->encoding == RX_ENC_HE) {
2453 rx_status->flag |= RX_FLAG_RADIOTAP_HE;
2454 he = skb_push(skb, sizeof(*he));
2455 *he = known_he;
2456 } else if (rx_status->encoding == RX_ENC_EHT) {
2457 rtw89_core_update_radiotap_eht(rtwdev, skb, rx_status);
2458 }
2459 }
2460
rtw89_core_validate_rx_signal(struct ieee80211_rx_status * rx_status)2461 static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status)
2462 {
2463 if (!rx_status->signal)
2464 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2465 }
2466
rtw89_core_update_rx_freq_from_ie(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct ieee80211_rx_status * rx_status)2467 static void rtw89_core_update_rx_freq_from_ie(struct rtw89_dev *rtwdev,
2468 struct sk_buff *skb,
2469 struct ieee80211_rx_status *rx_status)
2470 {
2471 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
2472 size_t hdr_len, ielen;
2473 u8 *variable;
2474 int chan;
2475
2476 if (!rtwdev->chip->rx_freq_frome_ie)
2477 return;
2478
2479 if (!rtwdev->scanning)
2480 return;
2481
2482 if (ieee80211_is_beacon(mgmt->frame_control)) {
2483 variable = mgmt->u.beacon.variable;
2484 hdr_len = offsetof(struct ieee80211_mgmt,
2485 u.beacon.variable);
2486 } else if (ieee80211_is_probe_resp(mgmt->frame_control)) {
2487 variable = mgmt->u.probe_resp.variable;
2488 hdr_len = offsetof(struct ieee80211_mgmt,
2489 u.probe_resp.variable);
2490 } else {
2491 return;
2492 }
2493
2494 if (skb->len > hdr_len)
2495 ielen = skb->len - hdr_len;
2496 else
2497 return;
2498
2499 /* The parsing code for both 2GHz and 5GHz bands is the same in this
2500 * function.
2501 */
2502 chan = cfg80211_get_ies_channel_number(variable, ielen, NL80211_BAND_2GHZ);
2503 if (chan == -1)
2504 return;
2505
2506 rx_status->band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
2507 rx_status->freq = ieee80211_channel_to_frequency(chan, rx_status->band);
2508 }
2509
rtw89_core_correct_mcc_chan(struct rtw89_dev * rtwdev,struct rtw89_rx_desc_info * desc_info,struct ieee80211_rx_status * rx_status,struct rtw89_rx_phy_ppdu * phy_ppdu)2510 static void rtw89_core_correct_mcc_chan(struct rtw89_dev *rtwdev,
2511 struct rtw89_rx_desc_info *desc_info,
2512 struct ieee80211_rx_status *rx_status,
2513 struct rtw89_rx_phy_ppdu *phy_ppdu)
2514 {
2515 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
2516 struct rtw89_vif_link *rtwvif_link;
2517 struct rtw89_sta_link *rtwsta_link;
2518 const struct rtw89_chan *chan;
2519 u8 mac_id = desc_info->mac_id;
2520 enum rtw89_entity_mode mode;
2521 enum nl80211_band band;
2522
2523 mode = rtw89_get_entity_mode(rtwdev);
2524 if (likely(mode != RTW89_ENTITY_MODE_MCC))
2525 return;
2526
2527 if (chip_gen == RTW89_CHIP_BE && phy_ppdu)
2528 mac_id = phy_ppdu->mac_id;
2529
2530 rcu_read_lock();
2531
2532 rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, mac_id);
2533 if (!rtwsta_link)
2534 goto out;
2535
2536 rtwvif_link = rtwsta_link->rtwvif_link;
2537 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
2538 band = rtw89_hw_to_nl80211_band(chan->band_type);
2539 rx_status->freq = ieee80211_channel_to_frequency(chan->primary_channel, band);
2540
2541 out:
2542 rcu_read_unlock();
2543 }
2544
rtw89_core_rx_to_mac80211(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu,struct rtw89_rx_desc_info * desc_info,struct sk_buff * skb_ppdu,struct ieee80211_rx_status * rx_status)2545 static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
2546 struct rtw89_rx_phy_ppdu *phy_ppdu,
2547 struct rtw89_rx_desc_info *desc_info,
2548 struct sk_buff *skb_ppdu,
2549 struct ieee80211_rx_status *rx_status)
2550 {
2551 struct napi_struct *napi = &rtwdev->napi;
2552
2553 /* In low power mode, napi isn't scheduled. Receive it to netif. */
2554 if (unlikely(!napi_is_scheduled(napi)))
2555 napi = NULL;
2556
2557 rtw89_core_hw_to_sband_rate(rx_status);
2558 rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
2559 rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
2560 rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
2561 rtw89_core_validate_rx_signal(rx_status);
2562 rtw89_core_update_rx_freq_from_ie(rtwdev, skb_ppdu, rx_status);
2563 rtw89_core_correct_mcc_chan(rtwdev, desc_info, rx_status, phy_ppdu);
2564
2565 /* In low power mode, it does RX in thread context. */
2566 local_bh_disable();
2567 ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi);
2568 local_bh_enable();
2569 rtwdev->napi_budget_countdown--;
2570 }
2571
rtw89_core_rx_pending_skb(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu,struct rtw89_rx_desc_info * desc_info,struct sk_buff * skb)2572 static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
2573 struct rtw89_rx_phy_ppdu *phy_ppdu,
2574 struct rtw89_rx_desc_info *desc_info,
2575 struct sk_buff *skb)
2576 {
2577 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
2578 int curr = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band];
2579 struct sk_buff *skb_ppdu = NULL, *tmp;
2580 struct ieee80211_rx_status *rx_status;
2581
2582 if (curr > RTW89_MAX_PPDU_CNT)
2583 return;
2584
2585 skb_queue_walk_safe(&rtwdev->ppdu_sts.rx_queue[band], skb_ppdu, tmp) {
2586 skb_unlink(skb_ppdu, &rtwdev->ppdu_sts.rx_queue[band]);
2587 rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
2588 if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status))
2589 rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status);
2590 rtw89_correct_cck_chan(rtwdev, rx_status);
2591 rtw89_core_rx_to_mac80211(rtwdev, phy_ppdu, desc_info, skb_ppdu, rx_status);
2592 }
2593 }
2594
rtw89_core_rx_process_ppdu_sts(struct rtw89_dev * rtwdev,struct rtw89_rx_desc_info * desc_info,struct sk_buff * skb)2595 static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev,
2596 struct rtw89_rx_desc_info *desc_info,
2597 struct sk_buff *skb)
2598 {
2599 struct rtw89_rx_phy_ppdu phy_ppdu = {.buf = skb->data, .valid = false,
2600 .len = skb->len,
2601 .to_self = desc_info->addr1_match,
2602 .rate = desc_info->data_rate,
2603 .mac_id = desc_info->mac_id,
2604 .phy_idx = desc_info->bb_sel};
2605 int ret;
2606
2607 if (desc_info->mac_info_valid) {
2608 ret = rtw89_core_rx_process_mac_ppdu(rtwdev, skb, &phy_ppdu);
2609 if (ret)
2610 goto out;
2611 }
2612
2613 ret = rtw89_core_rx_process_phy_ppdu(rtwdev, &phy_ppdu);
2614 if (ret)
2615 goto out;
2616
2617 rtw89_core_rx_process_phy_sts(rtwdev, &phy_ppdu);
2618
2619 out:
2620 rtw89_core_rx_pending_skb(rtwdev, &phy_ppdu, desc_info, skb);
2621 dev_kfree_skb_any(skb);
2622 }
2623
rtw89_core_rx_process_report(struct rtw89_dev * rtwdev,struct rtw89_rx_desc_info * desc_info,struct sk_buff * skb)2624 static void rtw89_core_rx_process_report(struct rtw89_dev *rtwdev,
2625 struct rtw89_rx_desc_info *desc_info,
2626 struct sk_buff *skb)
2627 {
2628 switch (desc_info->pkt_type) {
2629 case RTW89_CORE_RX_TYPE_C2H:
2630 rtw89_fw_c2h_irqsafe(rtwdev, skb);
2631 break;
2632 case RTW89_CORE_RX_TYPE_PPDU_STAT:
2633 rtw89_core_rx_process_ppdu_sts(rtwdev, desc_info, skb);
2634 break;
2635 default:
2636 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "unhandled pkt_type=%d\n",
2637 desc_info->pkt_type);
2638 dev_kfree_skb_any(skb);
2639 break;
2640 }
2641 }
2642
rtw89_core_query_rxdesc(struct rtw89_dev * rtwdev,struct rtw89_rx_desc_info * desc_info,u8 * data,u32 data_offset)2643 void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev,
2644 struct rtw89_rx_desc_info *desc_info,
2645 u8 *data, u32 data_offset)
2646 {
2647 const struct rtw89_chip_info *chip = rtwdev->chip;
2648 struct rtw89_rxdesc_short *rxd_s;
2649 struct rtw89_rxdesc_long *rxd_l;
2650 u8 shift_len, drv_info_len;
2651
2652 rxd_s = (struct rtw89_rxdesc_short *)(data + data_offset);
2653 desc_info->pkt_size = le32_get_bits(rxd_s->dword0, AX_RXD_RPKT_LEN_MASK);
2654 desc_info->drv_info_size = le32_get_bits(rxd_s->dword0, AX_RXD_DRV_INFO_SIZE_MASK);
2655 desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, AX_RXD_LONG_RXD);
2656 desc_info->pkt_type = le32_get_bits(rxd_s->dword0, AX_RXD_RPKT_TYPE_MASK);
2657 desc_info->mac_info_valid = le32_get_bits(rxd_s->dword0, AX_RXD_MAC_INFO_VLD);
2658 if (chip->chip_id == RTL8852C)
2659 desc_info->bw = le32_get_bits(rxd_s->dword1, AX_RXD_BW_v1_MASK);
2660 else
2661 desc_info->bw = le32_get_bits(rxd_s->dword1, AX_RXD_BW_MASK);
2662 desc_info->data_rate = le32_get_bits(rxd_s->dword1, AX_RXD_RX_DATARATE_MASK);
2663 desc_info->gi_ltf = le32_get_bits(rxd_s->dword1, AX_RXD_RX_GI_LTF_MASK);
2664 desc_info->user_id = le32_get_bits(rxd_s->dword1, AX_RXD_USER_ID_MASK);
2665 desc_info->sr_en = le32_get_bits(rxd_s->dword1, AX_RXD_SR_EN);
2666 desc_info->ppdu_cnt = le32_get_bits(rxd_s->dword1, AX_RXD_PPDU_CNT_MASK);
2667 desc_info->ppdu_type = le32_get_bits(rxd_s->dword1, AX_RXD_PPDU_TYPE_MASK);
2668 desc_info->free_run_cnt = le32_get_bits(rxd_s->dword2, AX_RXD_FREERUN_CNT_MASK);
2669 desc_info->icv_err = le32_get_bits(rxd_s->dword3, AX_RXD_ICV_ERR);
2670 desc_info->crc32_err = le32_get_bits(rxd_s->dword3, AX_RXD_CRC32_ERR);
2671 desc_info->hw_dec = le32_get_bits(rxd_s->dword3, AX_RXD_HW_DEC);
2672 desc_info->sw_dec = le32_get_bits(rxd_s->dword3, AX_RXD_SW_DEC);
2673 desc_info->addr1_match = le32_get_bits(rxd_s->dword3, AX_RXD_A1_MATCH);
2674
2675 shift_len = desc_info->shift << 1; /* 2-byte unit */
2676 drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */
2677 desc_info->offset = data_offset + shift_len + drv_info_len;
2678 if (desc_info->long_rxdesc)
2679 desc_info->rxd_len = sizeof(struct rtw89_rxdesc_long);
2680 else
2681 desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short);
2682 desc_info->ready = true;
2683
2684 if (!desc_info->long_rxdesc)
2685 return;
2686
2687 rxd_l = (struct rtw89_rxdesc_long *)(data + data_offset);
2688 desc_info->frame_type = le32_get_bits(rxd_l->dword4, AX_RXD_TYPE_MASK);
2689 desc_info->addr_cam_valid = le32_get_bits(rxd_l->dword5, AX_RXD_ADDR_CAM_VLD);
2690 desc_info->addr_cam_id = le32_get_bits(rxd_l->dword5, AX_RXD_ADDR_CAM_MASK);
2691 desc_info->sec_cam_id = le32_get_bits(rxd_l->dword5, AX_RXD_SEC_CAM_IDX_MASK);
2692 desc_info->mac_id = le32_get_bits(rxd_l->dword5, AX_RXD_MAC_ID_MASK);
2693 desc_info->rx_pl_id = le32_get_bits(rxd_l->dword5, AX_RXD_RX_PL_ID_MASK);
2694 }
2695 EXPORT_SYMBOL(rtw89_core_query_rxdesc);
2696
rtw89_core_query_rxdesc_v2(struct rtw89_dev * rtwdev,struct rtw89_rx_desc_info * desc_info,u8 * data,u32 data_offset)2697 void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
2698 struct rtw89_rx_desc_info *desc_info,
2699 u8 *data, u32 data_offset)
2700 {
2701 struct rtw89_rxdesc_phy_rpt_v2 *rxd_rpt;
2702 struct rtw89_rxdesc_short_v2 *rxd_s;
2703 struct rtw89_rxdesc_long_v2 *rxd_l;
2704 u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len;
2705
2706 rxd_s = (struct rtw89_rxdesc_short_v2 *)(data + data_offset);
2707
2708 desc_info->pkt_size = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_LEN_MASK);
2709 desc_info->drv_info_size = le32_get_bits(rxd_s->dword0, BE_RXD_DRV_INFO_SZ_MASK);
2710 desc_info->phy_rpt_size = le32_get_bits(rxd_s->dword0, BE_RXD_PHY_RPT_SZ_MASK);
2711 desc_info->hdr_cnv_size = le32_get_bits(rxd_s->dword0, BE_RXD_HDR_CNV_SZ_MASK);
2712 desc_info->shift = le32_get_bits(rxd_s->dword0, BE_RXD_SHIFT_MASK);
2713 desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, BE_RXD_LONG_RXD);
2714 desc_info->pkt_type = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_TYPE_MASK);
2715 desc_info->bb_sel = le32_get_bits(rxd_s->dword0, BE_RXD_BB_SEL);
2716 if (desc_info->pkt_type == RTW89_CORE_RX_TYPE_PPDU_STAT)
2717 desc_info->mac_info_valid = true;
2718
2719 desc_info->frame_type = le32_get_bits(rxd_s->dword2, BE_RXD_TYPE_MASK);
2720 desc_info->mac_id = le32_get_bits(rxd_s->dword2, BE_RXD_MAC_ID_MASK);
2721 desc_info->addr_cam_valid = le32_get_bits(rxd_s->dword2, BE_RXD_ADDR_CAM_VLD);
2722
2723 desc_info->icv_err = le32_get_bits(rxd_s->dword3, BE_RXD_ICV_ERR);
2724 desc_info->crc32_err = le32_get_bits(rxd_s->dword3, BE_RXD_CRC32_ERR);
2725 desc_info->hw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_HW_DEC);
2726 desc_info->sw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_SW_DEC);
2727 desc_info->addr1_match = le32_get_bits(rxd_s->dword3, BE_RXD_A1_MATCH);
2728
2729 desc_info->bw = le32_get_bits(rxd_s->dword4, BE_RXD_BW_MASK);
2730 desc_info->data_rate = le32_get_bits(rxd_s->dword4, BE_RXD_RX_DATARATE_MASK);
2731 desc_info->gi_ltf = le32_get_bits(rxd_s->dword4, BE_RXD_RX_GI_LTF_MASK);
2732 desc_info->ppdu_cnt = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_CNT_MASK);
2733 desc_info->ppdu_type = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_TYPE_MASK);
2734
2735 desc_info->free_run_cnt = le32_to_cpu(rxd_s->dword5);
2736
2737 shift_len = desc_info->shift << 1; /* 2-byte unit */
2738 drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */
2739 phy_rtp_len = desc_info->phy_rpt_size << 3; /* 8-byte unit */
2740 hdr_cnv_len = desc_info->hdr_cnv_size << 4; /* 16-byte unit */
2741 desc_info->offset = data_offset + shift_len + drv_info_len +
2742 phy_rtp_len + hdr_cnv_len;
2743
2744 if (desc_info->long_rxdesc)
2745 desc_info->rxd_len = sizeof(struct rtw89_rxdesc_long_v2);
2746 else
2747 desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2);
2748 desc_info->ready = true;
2749
2750 if (phy_rtp_len == sizeof(*rxd_rpt)) {
2751 rxd_rpt = (struct rtw89_rxdesc_phy_rpt_v2 *)(data + data_offset +
2752 desc_info->rxd_len);
2753 desc_info->rssi = le32_get_bits(rxd_rpt->dword0, BE_RXD_PHY_RSSI);
2754 }
2755
2756 if (!desc_info->long_rxdesc)
2757 return;
2758
2759 rxd_l = (struct rtw89_rxdesc_long_v2 *)(data + data_offset);
2760
2761 desc_info->sr_en = le32_get_bits(rxd_l->dword6, BE_RXD_SR_EN);
2762 desc_info->user_id = le32_get_bits(rxd_l->dword6, BE_RXD_USER_ID_MASK);
2763 desc_info->addr_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_ADDR_CAM_MASK);
2764 desc_info->sec_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_SEC_CAM_IDX_MASK);
2765
2766 desc_info->rx_pl_id = le32_get_bits(rxd_l->dword7, BE_RXD_RX_PL_ID_MASK);
2767 }
2768 EXPORT_SYMBOL(rtw89_core_query_rxdesc_v2);
2769
2770 struct rtw89_core_iter_rx_status {
2771 struct rtw89_dev *rtwdev;
2772 struct ieee80211_rx_status *rx_status;
2773 struct rtw89_rx_desc_info *desc_info;
2774 u8 mac_id;
2775 };
2776
2777 static
rtw89_core_stats_sta_rx_status_iter(void * data,struct ieee80211_sta * sta)2778 void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta)
2779 {
2780 struct rtw89_core_iter_rx_status *iter_data =
2781 (struct rtw89_core_iter_rx_status *)data;
2782 struct ieee80211_rx_status *rx_status = iter_data->rx_status;
2783 struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
2784 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
2785 struct rtw89_sta_link *rtwsta_link;
2786 u8 mac_id = iter_data->mac_id;
2787
2788 rtwsta_link = rtw89_sta_get_link_inst(rtwsta, desc_info->bb_sel);
2789 if (unlikely(!rtwsta_link))
2790 return;
2791
2792 if (mac_id != rtwsta_link->mac_id)
2793 return;
2794
2795 rtwsta_link->rx_status = *rx_status;
2796 rtwsta_link->rx_hw_rate = desc_info->data_rate;
2797 }
2798
rtw89_core_stats_sta_rx_status(struct rtw89_dev * rtwdev,struct rtw89_rx_desc_info * desc_info,struct ieee80211_rx_status * rx_status)2799 static void rtw89_core_stats_sta_rx_status(struct rtw89_dev *rtwdev,
2800 struct rtw89_rx_desc_info *desc_info,
2801 struct ieee80211_rx_status *rx_status)
2802 {
2803 struct rtw89_core_iter_rx_status iter_data;
2804
2805 if (!desc_info->addr1_match || !desc_info->long_rxdesc)
2806 return;
2807
2808 if (desc_info->frame_type != RTW89_RX_TYPE_DATA)
2809 return;
2810
2811 iter_data.rtwdev = rtwdev;
2812 iter_data.rx_status = rx_status;
2813 iter_data.desc_info = desc_info;
2814 iter_data.mac_id = desc_info->mac_id;
2815 ieee80211_iterate_stations_atomic(rtwdev->hw,
2816 rtw89_core_stats_sta_rx_status_iter,
2817 &iter_data);
2818 }
2819
rtw89_core_update_rx_status(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct rtw89_rx_desc_info * desc_info,struct ieee80211_rx_status * rx_status)2820 static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
2821 struct sk_buff *skb,
2822 struct rtw89_rx_desc_info *desc_info,
2823 struct ieee80211_rx_status *rx_status)
2824 {
2825 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2826 const struct cfg80211_chan_def *chandef =
2827 rtw89_chandef_get(rtwdev, RTW89_CHANCTX_0);
2828 u16 data_rate;
2829 u8 data_rate_mode;
2830 bool eht = false;
2831 u8 gi;
2832
2833 /* currently using single PHY */
2834 rx_status->freq = chandef->chan->center_freq;
2835 rx_status->band = chandef->chan->band;
2836
2837 if (ieee80211_is_beacon(hdr->frame_control) ||
2838 ieee80211_is_probe_resp(hdr->frame_control))
2839 rx_status->boottime_ns = ktime_get_boottime_ns();
2840
2841 if (rtwdev->scanning &&
2842 RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
2843 const struct rtw89_chan *cur = rtw89_scan_chan_get(rtwdev);
2844 u8 chan = cur->primary_channel;
2845 u8 band = cur->band_type;
2846 enum nl80211_band nl_band;
2847
2848 nl_band = rtw89_hw_to_nl80211_band(band);
2849 rx_status->freq = ieee80211_channel_to_frequency(chan, nl_band);
2850 rx_status->band = nl_band;
2851 }
2852
2853 if (desc_info->icv_err || desc_info->crc32_err)
2854 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2855
2856 if (desc_info->hw_dec &&
2857 !(desc_info->sw_dec || desc_info->icv_err))
2858 rx_status->flag |= RX_FLAG_DECRYPTED;
2859
2860 rx_status->bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
2861
2862 data_rate = desc_info->data_rate;
2863 data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate);
2864 if (data_rate_mode == DATA_RATE_MODE_NON_HT) {
2865 rx_status->encoding = RX_ENC_LEGACY;
2866 rx_status->rate_idx = rtw89_get_data_not_ht_idx(rtwdev, data_rate);
2867 /* convert rate_idx after we get the correct band */
2868 } else if (data_rate_mode == DATA_RATE_MODE_HT) {
2869 rx_status->encoding = RX_ENC_HT;
2870 rx_status->rate_idx = rtw89_get_data_ht_mcs(rtwdev, data_rate);
2871 if (desc_info->gi_ltf)
2872 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2873 } else if (data_rate_mode == DATA_RATE_MODE_VHT) {
2874 rx_status->encoding = RX_ENC_VHT;
2875 rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate);
2876 rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1;
2877 if (desc_info->gi_ltf)
2878 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2879 } else if (data_rate_mode == DATA_RATE_MODE_HE) {
2880 rx_status->encoding = RX_ENC_HE;
2881 rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate);
2882 rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1;
2883 } else if (data_rate_mode == DATA_RATE_MODE_EHT) {
2884 rx_status->encoding = RX_ENC_EHT;
2885 rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate);
2886 rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1;
2887 eht = true;
2888 } else {
2889 rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
2890 }
2891
2892 /* he_gi is used to match ppdu, so we always fill it. */
2893 gi = rtw89_rxdesc_to_nl_he_eht_gi(rtwdev, desc_info->gi_ltf, true, eht);
2894 if (eht)
2895 rx_status->eht.gi = gi;
2896 else
2897 rx_status->he_gi = gi;
2898 rx_status->flag |= RX_FLAG_MACTIME_START;
2899 rx_status->mactime = desc_info->free_run_cnt;
2900
2901 rtw89_chip_phy_rpt_to_rssi(rtwdev, desc_info, rx_status);
2902 rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status);
2903 }
2904
rtw89_update_ps_mode(struct rtw89_dev * rtwdev)2905 static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
2906 {
2907 const struct rtw89_chip_info *chip = rtwdev->chip;
2908
2909 if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE)
2910 return RTW89_PS_MODE_NONE;
2911
2912 if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
2913 RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
2914 return RTW89_PS_MODE_NONE;
2915
2916 if ((chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED)) &&
2917 !RTW89_CHK_FW_FEATURE(NO_LPS_PG, &rtwdev->fw))
2918 return RTW89_PS_MODE_PWR_GATED;
2919
2920 if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_CLK_GATED))
2921 return RTW89_PS_MODE_CLK_GATED;
2922
2923 if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_RFOFF))
2924 return RTW89_PS_MODE_RFOFF;
2925
2926 return RTW89_PS_MODE_NONE;
2927 }
2928
rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev * rtwdev,struct rtw89_rx_desc_info * desc_info)2929 static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
2930 struct rtw89_rx_desc_info *desc_info)
2931 {
2932 struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts;
2933 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
2934 struct ieee80211_rx_status *rx_status;
2935 struct sk_buff *skb_ppdu, *tmp;
2936
2937 skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) {
2938 skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]);
2939 rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
2940 rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb_ppdu, rx_status);
2941 }
2942 }
2943
2944 static
rtw89_core_rx_pkt_hdl(struct rtw89_dev * rtwdev,const struct sk_buff * skb,const struct rtw89_rx_desc_info * desc)2945 void rtw89_core_rx_pkt_hdl(struct rtw89_dev *rtwdev, const struct sk_buff *skb,
2946 const struct rtw89_rx_desc_info *desc)
2947 {
2948 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2949 struct rtw89_sta_link *rtwsta_link;
2950 struct ieee80211_sta *sta;
2951 struct rtw89_sta *rtwsta;
2952 u8 macid = desc->mac_id;
2953
2954 if (!refcount_read(&rtwdev->refcount_ap_info))
2955 return;
2956
2957 rcu_read_lock();
2958
2959 rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid);
2960 if (!rtwsta_link)
2961 goto out;
2962
2963 rtwsta = rtwsta_link->rtwsta;
2964 if (!test_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags))
2965 goto out;
2966
2967 sta = rtwsta_to_sta(rtwsta);
2968 if (ieee80211_is_pspoll(hdr->frame_control))
2969 ieee80211_sta_pspoll(sta);
2970 else if (ieee80211_has_pm(hdr->frame_control) &&
2971 (ieee80211_is_data_qos(hdr->frame_control) ||
2972 ieee80211_is_qos_nullfunc(hdr->frame_control)))
2973 ieee80211_sta_uapsd_trigger(sta, ieee80211_get_tid(hdr));
2974
2975 out:
2976 rcu_read_unlock();
2977 }
2978
rtw89_core_rx(struct rtw89_dev * rtwdev,struct rtw89_rx_desc_info * desc_info,struct sk_buff * skb)2979 void rtw89_core_rx(struct rtw89_dev *rtwdev,
2980 struct rtw89_rx_desc_info *desc_info,
2981 struct sk_buff *skb)
2982 {
2983 struct ieee80211_rx_status *rx_status;
2984 struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts;
2985 u8 ppdu_cnt = desc_info->ppdu_cnt;
2986 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
2987
2988 if (desc_info->pkt_type != RTW89_CORE_RX_TYPE_WIFI) {
2989 rtw89_core_rx_process_report(rtwdev, desc_info, skb);
2990 return;
2991 }
2992
2993 if (ppdu_sts->curr_rx_ppdu_cnt[band] != ppdu_cnt) {
2994 rtw89_core_flush_ppdu_rx_queue(rtwdev, desc_info);
2995 ppdu_sts->curr_rx_ppdu_cnt[band] = ppdu_cnt;
2996 }
2997
2998 rx_status = IEEE80211_SKB_RXCB(skb);
2999 memset(rx_status, 0, sizeof(*rx_status));
3000 rtw89_core_update_rx_status(rtwdev, skb, desc_info, rx_status);
3001 rtw89_core_rx_pkt_hdl(rtwdev, skb, desc_info);
3002 if (desc_info->long_rxdesc &&
3003 BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
3004 skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
3005 else
3006 rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb, rx_status);
3007 }
3008 EXPORT_SYMBOL(rtw89_core_rx);
3009
rtw89_core_napi_start(struct rtw89_dev * rtwdev)3010 void rtw89_core_napi_start(struct rtw89_dev *rtwdev)
3011 {
3012 if (test_and_set_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
3013 return;
3014
3015 napi_enable(&rtwdev->napi);
3016 }
3017 EXPORT_SYMBOL(rtw89_core_napi_start);
3018
rtw89_core_napi_stop(struct rtw89_dev * rtwdev)3019 void rtw89_core_napi_stop(struct rtw89_dev *rtwdev)
3020 {
3021 if (!test_and_clear_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
3022 return;
3023
3024 napi_synchronize(&rtwdev->napi);
3025 napi_disable(&rtwdev->napi);
3026 }
3027 EXPORT_SYMBOL(rtw89_core_napi_stop);
3028
rtw89_core_napi_init(struct rtw89_dev * rtwdev)3029 int rtw89_core_napi_init(struct rtw89_dev *rtwdev)
3030 {
3031 rtwdev->netdev = alloc_netdev_dummy(0);
3032 if (!rtwdev->netdev)
3033 return -ENOMEM;
3034
3035 netif_napi_add(rtwdev->netdev, &rtwdev->napi,
3036 rtwdev->hci.ops->napi_poll);
3037 return 0;
3038 }
3039 EXPORT_SYMBOL(rtw89_core_napi_init);
3040
rtw89_core_napi_deinit(struct rtw89_dev * rtwdev)3041 void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev)
3042 {
3043 rtw89_core_napi_stop(rtwdev);
3044 netif_napi_del(&rtwdev->napi);
3045 free_netdev(rtwdev->netdev);
3046 }
3047 EXPORT_SYMBOL(rtw89_core_napi_deinit);
3048
rtw89_core_ba_work(struct work_struct * work)3049 static void rtw89_core_ba_work(struct work_struct *work)
3050 {
3051 struct rtw89_dev *rtwdev =
3052 container_of(work, struct rtw89_dev, ba_work);
3053 struct rtw89_txq *rtwtxq, *tmp;
3054 int ret;
3055
3056 spin_lock_bh(&rtwdev->ba_lock);
3057 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) {
3058 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
3059 struct ieee80211_sta *sta = txq->sta;
3060 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
3061 u8 tid = txq->tid;
3062
3063 if (!sta) {
3064 rtw89_warn(rtwdev, "cannot start BA without sta\n");
3065 goto skip_ba_work;
3066 }
3067
3068 if (rtwsta->disassoc) {
3069 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
3070 "cannot start BA with disassoc sta\n");
3071 goto skip_ba_work;
3072 }
3073
3074 ret = ieee80211_start_tx_ba_session(sta, tid, 0);
3075 if (ret) {
3076 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
3077 "failed to setup BA session for %pM:%2d: %d\n",
3078 sta->addr, tid, ret);
3079 if (ret == -EINVAL)
3080 set_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags);
3081 }
3082 skip_ba_work:
3083 list_del_init(&rtwtxq->list);
3084 }
3085 spin_unlock_bh(&rtwdev->ba_lock);
3086 }
3087
rtw89_core_free_sta_pending_ba(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta)3088 void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
3089 struct ieee80211_sta *sta)
3090 {
3091 struct rtw89_txq *rtwtxq, *tmp;
3092
3093 spin_lock_bh(&rtwdev->ba_lock);
3094 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) {
3095 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
3096
3097 if (sta == txq->sta)
3098 list_del_init(&rtwtxq->list);
3099 }
3100 spin_unlock_bh(&rtwdev->ba_lock);
3101 }
3102
rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta)3103 void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev,
3104 struct ieee80211_sta *sta)
3105 {
3106 struct rtw89_txq *rtwtxq, *tmp;
3107
3108 spin_lock_bh(&rtwdev->ba_lock);
3109 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
3110 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
3111
3112 if (sta == txq->sta) {
3113 clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
3114 list_del_init(&rtwtxq->list);
3115 }
3116 }
3117 spin_unlock_bh(&rtwdev->ba_lock);
3118 }
3119
rtw89_core_free_sta_pending_roc_tx(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta)3120 void rtw89_core_free_sta_pending_roc_tx(struct rtw89_dev *rtwdev,
3121 struct ieee80211_sta *sta)
3122 {
3123 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
3124 struct sk_buff *skb, *tmp;
3125
3126 skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) {
3127 skb_unlink(skb, &rtwsta->roc_queue);
3128 dev_kfree_skb_any(skb);
3129 }
3130 }
3131
rtw89_core_stop_tx_ba_session(struct rtw89_dev * rtwdev,struct rtw89_txq * rtwtxq)3132 static void rtw89_core_stop_tx_ba_session(struct rtw89_dev *rtwdev,
3133 struct rtw89_txq *rtwtxq)
3134 {
3135 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
3136 struct ieee80211_sta *sta = txq->sta;
3137 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
3138
3139 if (unlikely(!rtwsta) || unlikely(rtwsta->disassoc))
3140 return;
3141
3142 if (!test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags) ||
3143 test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
3144 return;
3145
3146 spin_lock_bh(&rtwdev->ba_lock);
3147 if (!test_and_set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
3148 list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list);
3149 spin_unlock_bh(&rtwdev->ba_lock);
3150
3151 ieee80211_stop_tx_ba_session(sta, txq->tid);
3152 cancel_delayed_work(&rtwdev->forbid_ba_work);
3153 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->forbid_ba_work,
3154 RTW89_FORBID_BA_TIMER);
3155 }
3156
rtw89_core_txq_check_agg(struct rtw89_dev * rtwdev,struct rtw89_txq * rtwtxq,struct sk_buff * skb)3157 static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
3158 struct rtw89_txq *rtwtxq,
3159 struct sk_buff *skb)
3160 {
3161 struct ieee80211_hw *hw = rtwdev->hw;
3162 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
3163 struct ieee80211_sta *sta = txq->sta;
3164 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
3165
3166 if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
3167 return;
3168
3169 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
3170 rtw89_core_stop_tx_ba_session(rtwdev, rtwtxq);
3171 return;
3172 }
3173
3174 if (unlikely(!sta))
3175 return;
3176
3177 if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags)))
3178 return;
3179
3180 if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags)) {
3181 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_AMPDU;
3182 return;
3183 }
3184
3185 spin_lock_bh(&rtwdev->ba_lock);
3186 if (!rtwsta->disassoc && list_empty(&rtwtxq->list)) {
3187 list_add_tail(&rtwtxq->list, &rtwdev->ba_list);
3188 ieee80211_queue_work(hw, &rtwdev->ba_work);
3189 }
3190 spin_unlock_bh(&rtwdev->ba_lock);
3191 }
3192
rtw89_core_txq_push(struct rtw89_dev * rtwdev,struct rtw89_txq * rtwtxq,unsigned long frame_cnt,unsigned long byte_cnt)3193 static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
3194 struct rtw89_txq *rtwtxq,
3195 unsigned long frame_cnt,
3196 unsigned long byte_cnt)
3197 {
3198 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
3199 struct ieee80211_vif *vif = txq->vif;
3200 struct ieee80211_sta *sta = txq->sta;
3201 struct sk_buff *skb;
3202 unsigned long i;
3203 int ret;
3204
3205 rcu_read_lock();
3206 for (i = 0; i < frame_cnt; i++) {
3207 skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq);
3208 if (!skb) {
3209 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n");
3210 goto out;
3211 }
3212 rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
3213 ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
3214 if (ret) {
3215 rtw89_err(rtwdev, "failed to push txq: %d\n", ret);
3216 ieee80211_free_txskb(rtwdev->hw, skb);
3217 break;
3218 }
3219 }
3220 out:
3221 rcu_read_unlock();
3222 }
3223
rtw89_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 tid)3224 static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
3225 {
3226 u8 qsel, ch_dma;
3227
3228 qsel = rtw89_core_get_qsel(rtwdev, tid);
3229 ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
3230
3231 return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma);
3232 }
3233
rtw89_core_txq_agg_wait(struct rtw89_dev * rtwdev,struct ieee80211_txq * txq,unsigned long * frame_cnt,bool * sched_txq,bool * reinvoke)3234 static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev,
3235 struct ieee80211_txq *txq,
3236 unsigned long *frame_cnt,
3237 bool *sched_txq, bool *reinvoke)
3238 {
3239 struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv;
3240 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(txq->sta);
3241 struct rtw89_sta_link *rtwsta_link;
3242
3243 if (!rtwsta)
3244 return false;
3245
3246 rtwsta_link = rtw89_get_designated_link(rtwsta);
3247 if (unlikely(!rtwsta_link)) {
3248 rtw89_err(rtwdev, "agg wait: find no designated link\n");
3249 return false;
3250 }
3251
3252 if (rtwsta_link->max_agg_wait <= 0)
3253 return false;
3254
3255 if (rtwdev->stats.tx_tfc_lv <= RTW89_TFC_MID)
3256 return false;
3257
3258 if (*frame_cnt > 1) {
3259 *frame_cnt -= 1;
3260 *sched_txq = true;
3261 *reinvoke = true;
3262 rtwtxq->wait_cnt = 1;
3263 return false;
3264 }
3265
3266 if (*frame_cnt == 1 && rtwtxq->wait_cnt < rtwsta_link->max_agg_wait) {
3267 *reinvoke = true;
3268 rtwtxq->wait_cnt++;
3269 return true;
3270 }
3271
3272 rtwtxq->wait_cnt = 0;
3273 return false;
3274 }
3275
rtw89_core_txq_schedule(struct rtw89_dev * rtwdev,u8 ac,bool * reinvoke)3276 static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinvoke)
3277 {
3278 struct ieee80211_hw *hw = rtwdev->hw;
3279 struct ieee80211_txq *txq;
3280 struct rtw89_vif *rtwvif;
3281 struct rtw89_txq *rtwtxq;
3282 unsigned long frame_cnt;
3283 unsigned long byte_cnt;
3284 u32 tx_resource;
3285 bool sched_txq;
3286
3287 ieee80211_txq_schedule_start(hw, ac);
3288 while ((txq = ieee80211_next_txq(hw, ac))) {
3289 rtwtxq = (struct rtw89_txq *)txq->drv_priv;
3290 rtwvif = vif_to_rtwvif(txq->vif);
3291
3292 if (rtwvif->offchan) {
3293 ieee80211_return_txq(hw, txq, true);
3294 continue;
3295 }
3296 tx_resource = rtw89_check_and_reclaim_tx_resource(rtwdev, txq->tid);
3297 sched_txq = false;
3298
3299 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
3300 if (rtw89_core_txq_agg_wait(rtwdev, txq, &frame_cnt, &sched_txq, reinvoke)) {
3301 ieee80211_return_txq(hw, txq, true);
3302 continue;
3303 }
3304 frame_cnt = min_t(unsigned long, frame_cnt, tx_resource);
3305 rtw89_core_txq_push(rtwdev, rtwtxq, frame_cnt, byte_cnt);
3306 ieee80211_return_txq(hw, txq, sched_txq);
3307 if (frame_cnt != 0)
3308 rtw89_core_tx_kick_off(rtwdev, rtw89_core_get_qsel(rtwdev, txq->tid));
3309
3310 /* bound of tx_resource could get stuck due to burst traffic */
3311 if (frame_cnt == tx_resource)
3312 *reinvoke = true;
3313 }
3314 ieee80211_txq_schedule_end(hw, ac);
3315 }
3316
rtw89_ips_work(struct wiphy * wiphy,struct wiphy_work * work)3317 static void rtw89_ips_work(struct wiphy *wiphy, struct wiphy_work *work)
3318 {
3319 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
3320 ips_work);
3321
3322 lockdep_assert_wiphy(wiphy);
3323
3324 rtw89_enter_ips_by_hwflags(rtwdev);
3325 }
3326
rtw89_core_txq_work(struct work_struct * w)3327 static void rtw89_core_txq_work(struct work_struct *w)
3328 {
3329 struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work);
3330 bool reinvoke = false;
3331 u8 ac;
3332
3333 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
3334 rtw89_core_txq_schedule(rtwdev, ac, &reinvoke);
3335
3336 if (reinvoke) {
3337 /* reinvoke to process the last frame */
3338 mod_delayed_work(rtwdev->txq_wq, &rtwdev->txq_reinvoke_work, 1);
3339 }
3340 }
3341
rtw89_core_txq_reinvoke_work(struct work_struct * w)3342 static void rtw89_core_txq_reinvoke_work(struct work_struct *w)
3343 {
3344 struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev,
3345 txq_reinvoke_work.work);
3346
3347 queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
3348 }
3349
rtw89_forbid_ba_work(struct work_struct * w)3350 static void rtw89_forbid_ba_work(struct work_struct *w)
3351 {
3352 struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev,
3353 forbid_ba_work.work);
3354 struct rtw89_txq *rtwtxq, *tmp;
3355
3356 spin_lock_bh(&rtwdev->ba_lock);
3357 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
3358 clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
3359 list_del_init(&rtwtxq->list);
3360 }
3361 spin_unlock_bh(&rtwdev->ba_lock);
3362 }
3363
rtw89_core_sta_pending_tx_iter(void * data,struct ieee80211_sta * sta)3364 static void rtw89_core_sta_pending_tx_iter(void *data,
3365 struct ieee80211_sta *sta)
3366 {
3367 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
3368 struct rtw89_dev *rtwdev = rtwsta->rtwdev;
3369 struct rtw89_vif *rtwvif = rtwsta->rtwvif;
3370 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
3371 struct rtw89_vif_link *target = data;
3372 struct rtw89_vif_link *rtwvif_link;
3373 struct sk_buff *skb, *tmp;
3374 unsigned int link_id;
3375 int qsel, ret;
3376
3377 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
3378 if (rtwvif_link->chanctx_idx == target->chanctx_idx)
3379 goto bottom;
3380
3381 return;
3382
3383 bottom:
3384 if (skb_queue_len(&rtwsta->roc_queue) == 0)
3385 return;
3386
3387 skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) {
3388 skb_unlink(skb, &rtwsta->roc_queue);
3389
3390 ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
3391 if (ret) {
3392 rtw89_warn(rtwdev, "pending tx failed with %d\n", ret);
3393 dev_kfree_skb_any(skb);
3394 } else {
3395 rtw89_core_tx_kick_off(rtwdev, qsel);
3396 }
3397 }
3398 }
3399
rtw89_core_handle_sta_pending_tx(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)3400 static void rtw89_core_handle_sta_pending_tx(struct rtw89_dev *rtwdev,
3401 struct rtw89_vif_link *rtwvif_link)
3402 {
3403 ieee80211_iterate_stations_atomic(rtwdev->hw,
3404 rtw89_core_sta_pending_tx_iter,
3405 rtwvif_link);
3406 }
3407
rtw89_core_send_nullfunc(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool qos,bool ps,int timeout)3408 int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
3409 bool qos, bool ps, int timeout)
3410 {
3411 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3412 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
3413 struct rtw89_sta_link *rtwsta_link;
3414 struct ieee80211_sta *sta;
3415 struct ieee80211_hdr *hdr;
3416 struct rtw89_sta *rtwsta;
3417 struct sk_buff *skb;
3418 int ret, qsel;
3419
3420 if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
3421 return 0;
3422
3423 rcu_read_lock();
3424 sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
3425 if (!sta) {
3426 ret = -EINVAL;
3427 goto out;
3428 }
3429 rtwsta = sta_to_rtwsta(sta);
3430
3431 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos);
3432 if (!skb) {
3433 ret = -ENOMEM;
3434 goto out;
3435 }
3436
3437 hdr = (struct ieee80211_hdr *)skb->data;
3438 if (ps)
3439 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
3440
3441 rtwsta_link = rtwsta->links[rtwvif_link->link_id];
3442 if (unlikely(!rtwsta_link)) {
3443 ret = -ENOLINK;
3444 goto out;
3445 }
3446
3447 ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true);
3448 if (ret) {
3449 rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
3450 dev_kfree_skb_any(skb);
3451 goto out;
3452 }
3453
3454 rcu_read_unlock();
3455
3456 return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel,
3457 timeout);
3458 out:
3459 rcu_read_unlock();
3460
3461 return ret;
3462 }
3463
rtw89_roc_start(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)3464 void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
3465 {
3466 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
3467 struct rtw89_chanctx_pause_parm pause_parm = {
3468 .rsn = RTW89_CHANCTX_PAUSE_REASON_ROC,
3469 };
3470 struct ieee80211_hw *hw = rtwdev->hw;
3471 struct rtw89_roc *roc = &rtwvif->roc;
3472 struct rtw89_vif_link *rtwvif_link;
3473 struct cfg80211_chan_def roc_chan;
3474 struct rtw89_vif *tmp_vif;
3475 u32 reg;
3476 int ret;
3477
3478 lockdep_assert_wiphy(hw->wiphy);
3479
3480 rtw89_leave_ips_by_hwflags(rtwdev);
3481 rtw89_leave_lps(rtwdev);
3482
3483 rtwvif_link = rtw89_get_designated_link(rtwvif);
3484 if (unlikely(!rtwvif_link)) {
3485 rtw89_err(rtwdev, "roc start: find no designated link\n");
3486 return;
3487 }
3488
3489 roc->link_id = rtwvif_link->link_id;
3490
3491 pause_parm.trigger = rtwvif_link;
3492 rtw89_chanctx_pause(rtwdev, &pause_parm);
3493
3494 ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, true,
3495 RTW89_ROC_TX_TIMEOUT);
3496 if (ret)
3497 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
3498 "roc send null-1 failed: %d\n", ret);
3499
3500 rtw89_for_each_rtwvif(rtwdev, tmp_vif) {
3501 struct rtw89_vif_link *tmp_link;
3502 unsigned int link_id;
3503
3504 rtw89_vif_for_each_link(tmp_vif, tmp_link, link_id) {
3505 if (tmp_link->chanctx_idx == rtwvif_link->chanctx_idx) {
3506 tmp_vif->offchan = true;
3507 break;
3508 }
3509 }
3510 }
3511
3512 cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT);
3513 rtw89_config_roc_chandef(rtwdev, rtwvif_link, &roc_chan);
3514 rtw89_set_channel(rtwdev);
3515
3516 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
3517 rtw89_write32_clr(rtwdev, reg, B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH);
3518
3519 ieee80211_ready_on_channel(hw);
3520 wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work);
3521 wiphy_delayed_work_queue(hw->wiphy, &rtwvif->roc.roc_work,
3522 msecs_to_jiffies(rtwvif->roc.duration));
3523 }
3524
rtw89_roc_end(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)3525 void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
3526 {
3527 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
3528 struct ieee80211_hw *hw = rtwdev->hw;
3529 struct rtw89_roc *roc = &rtwvif->roc;
3530 struct rtw89_vif_link *rtwvif_link;
3531 struct rtw89_vif *tmp_vif;
3532 u32 reg;
3533 int ret;
3534
3535 lockdep_assert_wiphy(hw->wiphy);
3536
3537 ieee80211_remain_on_channel_expired(hw);
3538
3539 rtw89_leave_ips_by_hwflags(rtwdev);
3540 rtw89_leave_lps(rtwdev);
3541
3542 rtwvif_link = rtwvif->links[roc->link_id];
3543 if (unlikely(!rtwvif_link)) {
3544 rtw89_err(rtwdev, "roc end: find no link (link id %u)\n",
3545 roc->link_id);
3546 return;
3547 }
3548
3549 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
3550 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr);
3551
3552 roc->state = RTW89_ROC_IDLE;
3553 rtw89_config_roc_chandef(rtwdev, rtwvif_link, NULL);
3554 rtw89_chanctx_proceed(rtwdev, NULL);
3555 ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false,
3556 RTW89_ROC_TX_TIMEOUT);
3557 if (ret)
3558 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
3559 "roc send null-0 failed: %d\n", ret);
3560
3561 rtw89_for_each_rtwvif(rtwdev, tmp_vif)
3562 tmp_vif->offchan = false;
3563
3564 rtw89_core_handle_sta_pending_tx(rtwdev, rtwvif_link);
3565 queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
3566
3567 if (hw->conf.flags & IEEE80211_CONF_IDLE)
3568 wiphy_delayed_work_queue(hw->wiphy, &roc->roc_work,
3569 msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT));
3570 }
3571
rtw89_roc_work(struct wiphy * wiphy,struct wiphy_work * work)3572 void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
3573 {
3574 struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
3575 roc.roc_work.work);
3576 struct rtw89_dev *rtwdev = rtwvif->rtwdev;
3577 struct rtw89_roc *roc = &rtwvif->roc;
3578
3579 lockdep_assert_wiphy(wiphy);
3580
3581 switch (roc->state) {
3582 case RTW89_ROC_IDLE:
3583 rtw89_enter_ips_by_hwflags(rtwdev);
3584 break;
3585 case RTW89_ROC_MGMT:
3586 case RTW89_ROC_NORMAL:
3587 rtw89_roc_end(rtwdev, rtwvif);
3588 break;
3589 default:
3590 break;
3591 }
3592 }
3593
rtw89_get_traffic_level(struct rtw89_dev * rtwdev,u32 throughput,u64 cnt,enum rtw89_tfc_interval interval)3594 static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
3595 u32 throughput, u64 cnt,
3596 enum rtw89_tfc_interval interval)
3597 {
3598 u64 cnt_level;
3599
3600 switch (interval) {
3601 default:
3602 case RTW89_TFC_INTERVAL_100MS:
3603 cnt_level = 5;
3604 break;
3605 case RTW89_TFC_INTERVAL_2SEC:
3606 cnt_level = 100;
3607 break;
3608 }
3609
3610 if (cnt < cnt_level)
3611 return RTW89_TFC_IDLE;
3612 if (throughput > 50)
3613 return RTW89_TFC_HIGH;
3614 if (throughput > 10)
3615 return RTW89_TFC_MID;
3616 if (throughput > 2)
3617 return RTW89_TFC_LOW;
3618 return RTW89_TFC_ULTRA_LOW;
3619 }
3620
rtw89_traffic_stats_calc(struct rtw89_dev * rtwdev,struct rtw89_traffic_stats * stats,enum rtw89_tfc_interval interval)3621 static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev,
3622 struct rtw89_traffic_stats *stats,
3623 enum rtw89_tfc_interval interval)
3624 {
3625 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
3626 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
3627
3628 stats->tx_throughput_raw = rtw89_bytes_to_mbps(stats->tx_unicast, interval);
3629 stats->rx_throughput_raw = rtw89_bytes_to_mbps(stats->rx_unicast, interval);
3630
3631 ewma_tp_add(&stats->tx_ewma_tp, stats->tx_throughput_raw);
3632 ewma_tp_add(&stats->rx_ewma_tp, stats->rx_throughput_raw);
3633
3634 stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp);
3635 stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp);
3636 stats->tx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->tx_throughput,
3637 stats->tx_cnt, interval);
3638 stats->rx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->rx_throughput,
3639 stats->rx_cnt, interval);
3640 stats->tx_avg_len = stats->tx_cnt ?
3641 DIV_ROUND_DOWN_ULL(stats->tx_unicast, stats->tx_cnt) : 0;
3642 stats->rx_avg_len = stats->rx_cnt ?
3643 DIV_ROUND_DOWN_ULL(stats->rx_unicast, stats->rx_cnt) : 0;
3644
3645 stats->tx_unicast = 0;
3646 stats->rx_unicast = 0;
3647 stats->tx_cnt = 0;
3648 stats->rx_cnt = 0;
3649 stats->rx_tf_periodic = stats->rx_tf_acc;
3650 stats->rx_tf_acc = 0;
3651
3652 if (tx_tfc_lv != stats->tx_tfc_lv || rx_tfc_lv != stats->rx_tfc_lv)
3653 return true;
3654
3655 return false;
3656 }
3657
rtw89_traffic_stats_track(struct rtw89_dev * rtwdev)3658 static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
3659 {
3660 struct rtw89_vif_link *rtwvif_link;
3661 struct rtw89_vif *rtwvif;
3662 unsigned int link_id;
3663 bool tfc_changed;
3664
3665 tfc_changed = rtw89_traffic_stats_calc(rtwdev, &rtwdev->stats,
3666 RTW89_TFC_INTERVAL_2SEC);
3667
3668 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
3669 rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats,
3670 RTW89_TFC_INTERVAL_2SEC);
3671
3672 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
3673 rtw89_fw_h2c_tp_offload(rtwdev, rtwvif_link);
3674 }
3675
3676 return tfc_changed;
3677 }
3678
rtw89_enter_lps_track(struct rtw89_dev * rtwdev)3679 static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
3680 {
3681 struct ieee80211_vif *vif;
3682 struct rtw89_vif *rtwvif;
3683
3684 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
3685 if (rtwvif->tdls_peer)
3686 continue;
3687 if (rtwvif->offchan)
3688 continue;
3689
3690 if (rtwvif->stats_ps.tx_tfc_lv >= RTW89_TFC_MID ||
3691 rtwvif->stats_ps.rx_tfc_lv >= RTW89_TFC_MID)
3692 continue;
3693
3694 vif = rtwvif_to_vif(rtwvif);
3695
3696 if (!(vif->type == NL80211_IFTYPE_STATION ||
3697 vif->type == NL80211_IFTYPE_P2P_CLIENT))
3698 continue;
3699
3700 rtw89_enter_lps(rtwdev, rtwvif, true);
3701 }
3702 }
3703
rtw89_core_rfk_track(struct rtw89_dev * rtwdev)3704 static void rtw89_core_rfk_track(struct rtw89_dev *rtwdev)
3705 {
3706 enum rtw89_entity_mode mode;
3707
3708 mode = rtw89_get_entity_mode(rtwdev);
3709 if (mode == RTW89_ENTITY_MODE_MCC)
3710 return;
3711
3712 rtw89_chip_rfk_track(rtwdev);
3713 }
3714
rtw89_core_update_p2p_ps(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_bss_conf * bss_conf)3715 void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev,
3716 struct rtw89_vif_link *rtwvif_link,
3717 struct ieee80211_bss_conf *bss_conf)
3718 {
3719 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
3720
3721 if (mode == RTW89_ENTITY_MODE_MCC)
3722 rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_P2P_PS_CHANGE);
3723 else
3724 rtw89_process_p2p_ps(rtwdev, rtwvif_link, bss_conf);
3725 }
3726
rtw89_traffic_stats_init(struct rtw89_dev * rtwdev,struct rtw89_traffic_stats * stats)3727 void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
3728 struct rtw89_traffic_stats *stats)
3729 {
3730 stats->tx_unicast = 0;
3731 stats->rx_unicast = 0;
3732 stats->tx_cnt = 0;
3733 stats->rx_cnt = 0;
3734 ewma_tp_init(&stats->tx_ewma_tp);
3735 ewma_tp_init(&stats->rx_ewma_tp);
3736 }
3737
3738 #define RTW89_MLSR_GOTO_2GHZ_THRESHOLD -53
3739 #define RTW89_MLSR_EXIT_2GHZ_THRESHOLD -38
rtw89_core_mlsr_link_decision(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)3740 static void rtw89_core_mlsr_link_decision(struct rtw89_dev *rtwdev,
3741 struct rtw89_vif *rtwvif)
3742 {
3743 unsigned int sel_link_id = IEEE80211_MLD_MAX_NUM_LINKS;
3744 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
3745 struct rtw89_vif_link *rtwvif_link;
3746 const struct rtw89_chan *chan;
3747 unsigned long usable_links;
3748 unsigned int link_id;
3749 u8 decided_bands;
3750 u8 rssi;
3751
3752 rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
3753 if (unlikely(!rssi))
3754 return;
3755
3756 if (RTW89_RSSI_RAW_TO_DBM(rssi) >= RTW89_MLSR_EXIT_2GHZ_THRESHOLD)
3757 decided_bands = BIT(RTW89_BAND_5G) | BIT(RTW89_BAND_6G);
3758 else if (RTW89_RSSI_RAW_TO_DBM(rssi) <= RTW89_MLSR_GOTO_2GHZ_THRESHOLD)
3759 decided_bands = BIT(RTW89_BAND_2G);
3760 else
3761 return;
3762
3763 usable_links = ieee80211_vif_usable_links(vif);
3764
3765 rtwvif_link = rtw89_get_designated_link(rtwvif);
3766 if (unlikely(!rtwvif_link))
3767 goto select;
3768
3769 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3770 if (decided_bands & BIT(chan->band_type))
3771 return;
3772
3773 usable_links &= ~BIT(rtwvif_link->link_id);
3774
3775 select:
3776 rcu_read_lock();
3777
3778 for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
3779 struct ieee80211_bss_conf *link_conf;
3780 struct ieee80211_channel *channel;
3781 enum rtw89_band band;
3782
3783 link_conf = rcu_dereference(vif->link_conf[link_id]);
3784 if (unlikely(!link_conf))
3785 continue;
3786
3787 channel = link_conf->chanreq.oper.chan;
3788 if (unlikely(!channel))
3789 continue;
3790
3791 band = rtw89_nl80211_to_hw_band(channel->band);
3792 if (decided_bands & BIT(band)) {
3793 sel_link_id = link_id;
3794 break;
3795 }
3796 }
3797
3798 rcu_read_unlock();
3799
3800 if (sel_link_id == IEEE80211_MLD_MAX_NUM_LINKS)
3801 return;
3802
3803 rtw89_core_mlsr_switch(rtwdev, rtwvif, sel_link_id);
3804 }
3805
rtw89_core_mlo_track(struct rtw89_dev * rtwdev)3806 static void rtw89_core_mlo_track(struct rtw89_dev *rtwdev)
3807 {
3808 struct rtw89_hal *hal = &rtwdev->hal;
3809 struct ieee80211_vif *vif;
3810 struct rtw89_vif *rtwvif;
3811
3812 if (hal->disabled_dm_bitmap & BIT(RTW89_DM_MLO))
3813 return;
3814
3815 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
3816 vif = rtwvif_to_vif(rtwvif);
3817 if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
3818 continue;
3819
3820 switch (rtwvif->mlo_mode) {
3821 case RTW89_MLO_MODE_MLSR:
3822 rtw89_core_mlsr_link_decision(rtwdev, rtwvif);
3823 break;
3824 default:
3825 break;
3826 }
3827 }
3828 }
3829
rtw89_track_ps_work(struct wiphy * wiphy,struct wiphy_work * work)3830 static void rtw89_track_ps_work(struct wiphy *wiphy, struct wiphy_work *work)
3831 {
3832 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
3833 track_ps_work.work);
3834 struct rtw89_vif *rtwvif;
3835
3836 lockdep_assert_wiphy(wiphy);
3837
3838 if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags))
3839 return;
3840
3841 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
3842 return;
3843
3844 wiphy_delayed_work_queue(wiphy, &rtwdev->track_ps_work,
3845 RTW89_TRACK_PS_WORK_PERIOD);
3846
3847 rtw89_for_each_rtwvif(rtwdev, rtwvif)
3848 rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats_ps,
3849 RTW89_TFC_INTERVAL_100MS);
3850
3851 if (rtwdev->scanning)
3852 return;
3853
3854 if (rtwdev->lps_enabled && !rtwdev->btc.lps)
3855 rtw89_enter_lps_track(rtwdev);
3856 }
3857
rtw89_track_work(struct wiphy * wiphy,struct wiphy_work * work)3858 static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
3859 {
3860 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
3861 track_work.work);
3862 bool tfc_changed;
3863
3864 lockdep_assert_wiphy(wiphy);
3865
3866 if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags))
3867 return;
3868
3869 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
3870 return;
3871
3872 wiphy_delayed_work_queue(wiphy, &rtwdev->track_work,
3873 RTW89_TRACK_WORK_PERIOD);
3874
3875 tfc_changed = rtw89_traffic_stats_track(rtwdev);
3876 if (rtwdev->scanning)
3877 return;
3878
3879 rtw89_leave_lps(rtwdev);
3880
3881 if (tfc_changed) {
3882 rtw89_hci_recalc_int_mit(rtwdev);
3883 rtw89_btc_ntfy_wl_sta(rtwdev);
3884 }
3885 rtw89_mac_bf_monitor_track(rtwdev);
3886 rtw89_phy_stat_track(rtwdev);
3887 rtw89_phy_env_monitor_track(rtwdev);
3888 rtw89_phy_dig(rtwdev);
3889 rtw89_core_rfk_track(rtwdev);
3890 rtw89_phy_ra_update(rtwdev);
3891 rtw89_phy_cfo_track(rtwdev);
3892 rtw89_phy_tx_path_div_track(rtwdev);
3893 rtw89_phy_antdiv_track(rtwdev);
3894 rtw89_phy_ul_tb_ctrl_track(rtwdev);
3895 rtw89_phy_edcca_track(rtwdev);
3896 rtw89_sar_track(rtwdev);
3897 rtw89_chanctx_track(rtwdev);
3898 rtw89_core_rfkill_poll(rtwdev, false);
3899 rtw89_core_mlo_track(rtwdev);
3900
3901 if (rtwdev->lps_enabled && !rtwdev->btc.lps)
3902 rtw89_enter_lps_track(rtwdev);
3903 }
3904
rtw89_core_acquire_bit_map(unsigned long * addr,unsigned long size)3905 u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size)
3906 {
3907 unsigned long bit;
3908
3909 bit = find_first_zero_bit(addr, size);
3910 if (bit < size)
3911 set_bit(bit, addr);
3912
3913 return bit;
3914 }
3915
rtw89_core_release_bit_map(unsigned long * addr,u8 bit)3916 void rtw89_core_release_bit_map(unsigned long *addr, u8 bit)
3917 {
3918 clear_bit(bit, addr);
3919 }
3920
rtw89_core_release_all_bits_map(unsigned long * addr,unsigned int nbits)3921 void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits)
3922 {
3923 bitmap_zero(addr, nbits);
3924 }
3925
rtw89_core_acquire_sta_ba_entry(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link,u8 tid,u8 * cam_idx)3926 int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
3927 struct rtw89_sta_link *rtwsta_link, u8 tid,
3928 u8 *cam_idx)
3929 {
3930 const struct rtw89_chip_info *chip = rtwdev->chip;
3931 struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
3932 struct rtw89_ba_cam_entry *entry = NULL, *tmp;
3933 u8 idx;
3934 int i;
3935
3936 lockdep_assert_wiphy(rtwdev->hw->wiphy);
3937
3938 idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num);
3939 if (idx == chip->bacam_num) {
3940 /* allocate a static BA CAM to tid=0/5, so replace the existing
3941 * one if BA CAM is full. Hardware will process the original tid
3942 * automatically.
3943 */
3944 if (tid != 0 && tid != 5)
3945 return -ENOSPC;
3946
3947 for_each_set_bit(i, cam_info->ba_cam_map, chip->bacam_num) {
3948 tmp = &cam_info->ba_cam_entry[i];
3949 if (tmp->tid == 0 || tmp->tid == 5)
3950 continue;
3951
3952 idx = i;
3953 entry = tmp;
3954 list_del(&entry->list);
3955 break;
3956 }
3957
3958 if (!entry)
3959 return -ENOSPC;
3960 } else {
3961 entry = &cam_info->ba_cam_entry[idx];
3962 }
3963
3964 entry->tid = tid;
3965 list_add_tail(&entry->list, &rtwsta_link->ba_cam_list);
3966
3967 *cam_idx = idx;
3968
3969 return 0;
3970 }
3971
rtw89_core_release_sta_ba_entry(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link,u8 tid,u8 * cam_idx)3972 int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
3973 struct rtw89_sta_link *rtwsta_link, u8 tid,
3974 u8 *cam_idx)
3975 {
3976 struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
3977 struct rtw89_ba_cam_entry *entry = NULL, *tmp;
3978 u8 idx;
3979
3980 lockdep_assert_wiphy(rtwdev->hw->wiphy);
3981
3982 list_for_each_entry_safe(entry, tmp, &rtwsta_link->ba_cam_list, list) {
3983 if (entry->tid != tid)
3984 continue;
3985
3986 idx = entry - cam_info->ba_cam_entry;
3987 list_del(&entry->list);
3988
3989 rtw89_core_release_bit_map(cam_info->ba_cam_map, idx);
3990 *cam_idx = idx;
3991 return 0;
3992 }
3993
3994 return -ENOENT;
3995 }
3996
3997 #define RTW89_TYPE_MAPPING(_type) \
3998 case NL80211_IFTYPE_ ## _type: \
3999 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_ ## _type; \
4000 break
rtw89_vif_type_mapping(struct rtw89_vif_link * rtwvif_link,bool assoc)4001 void rtw89_vif_type_mapping(struct rtw89_vif_link *rtwvif_link, bool assoc)
4002 {
4003 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4004 const struct ieee80211_bss_conf *bss_conf;
4005
4006 switch (vif->type) {
4007 case NL80211_IFTYPE_STATION:
4008 if (vif->p2p)
4009 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_P2P_CLIENT;
4010 else
4011 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_STATION;
4012 break;
4013 case NL80211_IFTYPE_AP:
4014 if (vif->p2p)
4015 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_P2P_GO;
4016 else
4017 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_AP;
4018 break;
4019 RTW89_TYPE_MAPPING(ADHOC);
4020 RTW89_TYPE_MAPPING(MONITOR);
4021 RTW89_TYPE_MAPPING(MESH_POINT);
4022 default:
4023 WARN_ON(1);
4024 break;
4025 }
4026
4027 switch (vif->type) {
4028 case NL80211_IFTYPE_AP:
4029 case NL80211_IFTYPE_MESH_POINT:
4030 rtwvif_link->net_type = RTW89_NET_TYPE_AP_MODE;
4031 rtwvif_link->self_role = RTW89_SELF_ROLE_AP;
4032 break;
4033 case NL80211_IFTYPE_ADHOC:
4034 rtwvif_link->net_type = RTW89_NET_TYPE_AD_HOC;
4035 rtwvif_link->self_role = RTW89_SELF_ROLE_CLIENT;
4036 break;
4037 case NL80211_IFTYPE_STATION:
4038 if (assoc) {
4039 rtwvif_link->net_type = RTW89_NET_TYPE_INFRA;
4040
4041 rcu_read_lock();
4042 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
4043 rtwvif_link->trigger = bss_conf->he_support;
4044 rcu_read_unlock();
4045 } else {
4046 rtwvif_link->net_type = RTW89_NET_TYPE_NO_LINK;
4047 rtwvif_link->trigger = false;
4048 }
4049 rtwvif_link->self_role = RTW89_SELF_ROLE_CLIENT;
4050 rtwvif_link->addr_cam.sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
4051 break;
4052 case NL80211_IFTYPE_MONITOR:
4053 break;
4054 default:
4055 WARN_ON(1);
4056 break;
4057 }
4058 }
4059
rtw89_core_sta_link_add(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4060 int rtw89_core_sta_link_add(struct rtw89_dev *rtwdev,
4061 struct rtw89_vif_link *rtwvif_link,
4062 struct rtw89_sta_link *rtwsta_link)
4063 {
4064 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4065 const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
4066 struct rtw89_hal *hal = &rtwdev->hal;
4067 u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
4068 int i;
4069 int ret;
4070
4071 rtwsta_link->prev_rssi = 0;
4072 INIT_LIST_HEAD(&rtwsta_link->ba_cam_list);
4073 ewma_rssi_init(&rtwsta_link->avg_rssi);
4074 ewma_snr_init(&rtwsta_link->avg_snr);
4075 ewma_evm_init(&rtwsta_link->evm_1ss);
4076 for (i = 0; i < ant_num; i++) {
4077 ewma_rssi_init(&rtwsta_link->rssi[i]);
4078 ewma_evm_init(&rtwsta_link->evm_min[i]);
4079 ewma_evm_init(&rtwsta_link->evm_max[i]);
4080 }
4081
4082 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
4083 /* must do rtw89_reg_6ghz_recalc() before rfk channel */
4084 ret = rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, true);
4085 if (ret)
4086 return ret;
4087
4088 rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link,
4089 BTC_ROLE_MSTS_STA_CONN_START);
4090 rtw89_chip_rfk_channel(rtwdev, rtwvif_link);
4091
4092 if (vif->p2p) {
4093 rtw89_mac_get_tx_retry_limit(rtwdev, rtwsta_link,
4094 &rtwsta_link->tx_retry);
4095 rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false, 60);
4096 }
4097 rtw89_phy_dig_suspend(rtwdev);
4098 } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
4099 ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta_link->mac_id, false);
4100 if (ret) {
4101 rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
4102 return ret;
4103 }
4104
4105 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, rtwsta_link,
4106 RTW89_ROLE_CREATE);
4107 if (ret) {
4108 rtw89_warn(rtwdev, "failed to send h2c role info\n");
4109 return ret;
4110 }
4111
4112 ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
4113 if (ret)
4114 return ret;
4115
4116 ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
4117 if (ret)
4118 return ret;
4119 }
4120
4121 return 0;
4122 }
4123
rtw89_core_sta_link_disassoc(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4124 int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
4125 struct rtw89_vif_link *rtwvif_link,
4126 struct rtw89_sta_link *rtwsta_link)
4127 {
4128 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4129
4130 rtw89_assoc_link_clr(rtwsta_link);
4131
4132 if (vif->type == NL80211_IFTYPE_STATION)
4133 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
4134
4135 if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
4136 rtw89_p2p_noa_once_deinit(rtwvif_link);
4137
4138 return 0;
4139 }
4140
rtw89_core_sta_link_disconnect(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4141 int rtw89_core_sta_link_disconnect(struct rtw89_dev *rtwdev,
4142 struct rtw89_vif_link *rtwvif_link,
4143 struct rtw89_sta_link *rtwsta_link)
4144 {
4145 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4146 const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
4147 int ret;
4148
4149 rtw89_mac_bf_monitor_calc(rtwdev, rtwsta_link, true);
4150 rtw89_mac_bf_disassoc(rtwdev, rtwvif_link, rtwsta_link);
4151
4152 if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
4153 rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta_link->addr_cam);
4154 if (sta->tdls)
4155 rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta_link->bssid_cam);
4156
4157 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
4158 rtw89_vif_type_mapping(rtwvif_link, false);
4159 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, true);
4160 }
4161
4162 ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
4163 if (ret) {
4164 rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
4165 return ret;
4166 }
4167
4168 ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, rtwsta_link, true);
4169 if (ret) {
4170 rtw89_warn(rtwdev, "failed to send h2c join info\n");
4171 return ret;
4172 }
4173
4174 /* update cam aid mac_id net_type */
4175 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL);
4176 if (ret) {
4177 rtw89_warn(rtwdev, "failed to send h2c cam\n");
4178 return ret;
4179 }
4180
4181 return ret;
4182 }
4183
rtw89_sta_link_can_er(struct rtw89_dev * rtwdev,struct ieee80211_bss_conf * bss_conf,struct ieee80211_link_sta * link_sta)4184 static bool rtw89_sta_link_can_er(struct rtw89_dev *rtwdev,
4185 struct ieee80211_bss_conf *bss_conf,
4186 struct ieee80211_link_sta *link_sta)
4187 {
4188 if (!bss_conf->he_support ||
4189 bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE)
4190 return false;
4191
4192 if (rtwdev->chip->chip_id == RTL8852C &&
4193 rtw89_sta_link_has_su_mu_4xhe08(link_sta) &&
4194 !rtw89_sta_link_has_er_su_4xhe08(link_sta))
4195 return false;
4196
4197 return true;
4198 }
4199
rtw89_core_sta_link_assoc(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4200 int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
4201 struct rtw89_vif_link *rtwvif_link,
4202 struct rtw89_sta_link *rtwsta_link)
4203 {
4204 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4205 const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
4206 struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif_link,
4207 rtwsta_link);
4208 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
4209 rtwvif_link->chanctx_idx);
4210 struct ieee80211_link_sta *link_sta;
4211 int ret;
4212
4213 if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
4214 if (sta->tdls) {
4215 rcu_read_lock();
4216
4217 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
4218 ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif_link, bssid_cam,
4219 link_sta->addr);
4220 if (ret) {
4221 rtw89_warn(rtwdev, "failed to send h2c init bssid cam for TDLS\n");
4222 rcu_read_unlock();
4223 return ret;
4224 }
4225
4226 rcu_read_unlock();
4227 }
4228
4229 ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta_link->addr_cam, bssid_cam);
4230 if (ret) {
4231 rtw89_warn(rtwdev, "failed to send h2c init addr cam\n");
4232 return ret;
4233 }
4234 }
4235
4236 ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link);
4237 if (ret) {
4238 rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
4239 return ret;
4240 }
4241
4242 ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, rtwsta_link, false);
4243 if (ret) {
4244 rtw89_warn(rtwdev, "failed to send h2c join info\n");
4245 return ret;
4246 }
4247
4248 /* update cam aid mac_id net_type */
4249 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL);
4250 if (ret) {
4251 rtw89_warn(rtwdev, "failed to send h2c cam\n");
4252 return ret;
4253 }
4254
4255 rtw89_phy_ra_assoc(rtwdev, rtwsta_link);
4256 rtw89_mac_bf_assoc(rtwdev, rtwvif_link, rtwsta_link);
4257 rtw89_mac_bf_monitor_calc(rtwdev, rtwsta_link, false);
4258
4259 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
4260 struct ieee80211_bss_conf *bss_conf;
4261
4262 rcu_read_lock();
4263
4264 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
4265 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
4266 rtwsta_link->er_cap = rtw89_sta_link_can_er(rtwdev, bss_conf, link_sta);
4267
4268 rcu_read_unlock();
4269
4270 rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link,
4271 BTC_ROLE_MSTS_STA_CONN_END);
4272 rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta_link->htc_template, chan);
4273 rtw89_phy_ul_tb_assoc(rtwdev, rtwvif_link);
4274
4275 ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif_link, rtwsta_link->mac_id);
4276 if (ret) {
4277 rtw89_warn(rtwdev, "failed to send h2c general packet\n");
4278 return ret;
4279 }
4280
4281 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
4282
4283 if (vif->p2p)
4284 rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false,
4285 rtwsta_link->tx_retry);
4286 rtw89_phy_dig_resume(rtwdev, false);
4287 }
4288
4289 rtw89_assoc_link_set(rtwsta_link);
4290 return ret;
4291 }
4292
rtw89_core_sta_link_remove(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4293 int rtw89_core_sta_link_remove(struct rtw89_dev *rtwdev,
4294 struct rtw89_vif_link *rtwvif_link,
4295 struct rtw89_sta_link *rtwsta_link)
4296 {
4297 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4298 const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link);
4299 int ret;
4300
4301 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
4302 rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, false);
4303 rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link,
4304 BTC_ROLE_MSTS_STA_DIS_CONN);
4305
4306 if (vif->p2p)
4307 rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false,
4308 rtwsta_link->tx_retry);
4309 } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
4310 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, rtwsta_link,
4311 RTW89_ROLE_REMOVE);
4312 if (ret) {
4313 rtw89_warn(rtwdev, "failed to send h2c role info\n");
4314 return ret;
4315 }
4316 }
4317
4318 return 0;
4319 }
4320
_rtw89_core_set_tid_config(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta,struct cfg80211_tid_cfg * tid_conf)4321 static void _rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
4322 struct ieee80211_sta *sta,
4323 struct cfg80211_tid_cfg *tid_conf)
4324 {
4325 struct ieee80211_txq *txq;
4326 struct rtw89_txq *rtwtxq;
4327 u32 mask = tid_conf->mask;
4328 u8 tids = tid_conf->tids;
4329 int tids_nbit = BITS_PER_BYTE;
4330 int i;
4331
4332 for (i = 0; i < tids_nbit; i++, tids >>= 1) {
4333 if (!tids)
4334 break;
4335
4336 if (!(tids & BIT(0)))
4337 continue;
4338
4339 txq = sta->txq[i];
4340 rtwtxq = (struct rtw89_txq *)txq->drv_priv;
4341
4342 if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
4343 if (tid_conf->ampdu == NL80211_TID_CONFIG_ENABLE) {
4344 clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
4345 } else {
4346 if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags))
4347 ieee80211_stop_tx_ba_session(sta, txq->tid);
4348 spin_lock_bh(&rtwdev->ba_lock);
4349 list_del_init(&rtwtxq->list);
4350 set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
4351 spin_unlock_bh(&rtwdev->ba_lock);
4352 }
4353 }
4354
4355 if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL) && tids == 0xff) {
4356 if (tid_conf->amsdu == NL80211_TID_CONFIG_ENABLE)
4357 sta->max_amsdu_subframes = 0;
4358 else
4359 sta->max_amsdu_subframes = 1;
4360 }
4361 }
4362 }
4363
rtw89_core_set_tid_config(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta,struct cfg80211_tid_config * tid_config)4364 void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
4365 struct ieee80211_sta *sta,
4366 struct cfg80211_tid_config *tid_config)
4367 {
4368 int i;
4369
4370 for (i = 0; i < tid_config->n_tid_conf; i++)
4371 _rtw89_core_set_tid_config(rtwdev, sta,
4372 &tid_config->tid_conf[i]);
4373 }
4374
rtw89_init_ht_cap(struct rtw89_dev * rtwdev,struct ieee80211_sta_ht_cap * ht_cap)4375 static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
4376 struct ieee80211_sta_ht_cap *ht_cap)
4377 {
4378 static const __le16 highest[RF_PATH_MAX] = {
4379 cpu_to_le16(150), cpu_to_le16(300), cpu_to_le16(450), cpu_to_le16(600),
4380 };
4381 struct rtw89_hal *hal = &rtwdev->hal;
4382 u8 nss = hal->rx_nss;
4383 int i;
4384
4385 ht_cap->ht_supported = true;
4386 ht_cap->cap = 0;
4387 ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 |
4388 IEEE80211_HT_CAP_MAX_AMSDU |
4389 IEEE80211_HT_CAP_TX_STBC |
4390 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
4391 ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING;
4392 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
4393 IEEE80211_HT_CAP_DSSSCCK40 |
4394 IEEE80211_HT_CAP_SGI_40;
4395 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4396 ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
4397 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
4398 for (i = 0; i < nss; i++)
4399 ht_cap->mcs.rx_mask[i] = 0xFF;
4400 ht_cap->mcs.rx_mask[4] = 0x01;
4401 ht_cap->mcs.rx_highest = highest[nss - 1];
4402 }
4403
rtw89_init_vht_cap(struct rtw89_dev * rtwdev,struct ieee80211_sta_vht_cap * vht_cap)4404 static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
4405 struct ieee80211_sta_vht_cap *vht_cap)
4406 {
4407 static const __le16 highest_bw80[RF_PATH_MAX] = {
4408 cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733),
4409 };
4410 static const __le16 highest_bw160[RF_PATH_MAX] = {
4411 cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
4412 };
4413 const struct rtw89_chip_info *chip = rtwdev->chip;
4414 const __le16 *highest = chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160) ?
4415 highest_bw160 : highest_bw80;
4416 struct rtw89_hal *hal = &rtwdev->hal;
4417 u16 tx_mcs_map = 0, rx_mcs_map = 0;
4418 u8 sts_cap = 3;
4419 int i;
4420
4421 for (i = 0; i < 8; i++) {
4422 if (i < hal->tx_nss)
4423 tx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4424 else
4425 tx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4426 if (i < hal->rx_nss)
4427 rx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4428 else
4429 rx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4430 }
4431
4432 vht_cap->vht_supported = true;
4433 vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
4434 IEEE80211_VHT_CAP_SHORT_GI_80 |
4435 IEEE80211_VHT_CAP_RXSTBC_1 |
4436 IEEE80211_VHT_CAP_HTC_VHT |
4437 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
4438 0;
4439 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
4440 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
4441 vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
4442 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
4443 vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4444 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
4445 vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
4446 IEEE80211_VHT_CAP_SHORT_GI_160;
4447 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
4448 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
4449 vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
4450 vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1];
4451
4452 if (ieee80211_hw_check(rtwdev->hw, SUPPORTS_VHT_EXT_NSS_BW))
4453 vht_cap->vht_mcs.tx_highest |=
4454 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
4455 }
4456
rtw89_init_he_cap(struct rtw89_dev * rtwdev,enum nl80211_band band,enum nl80211_iftype iftype,struct ieee80211_sband_iftype_data * iftype_data)4457 static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
4458 enum nl80211_band band,
4459 enum nl80211_iftype iftype,
4460 struct ieee80211_sband_iftype_data *iftype_data)
4461 {
4462 const struct rtw89_chip_info *chip = rtwdev->chip;
4463 struct rtw89_hal *hal = &rtwdev->hal;
4464 bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) ||
4465 (chip->chip_id == RTL8852B && hal->cv == CHIP_CAV);
4466 struct ieee80211_sta_he_cap *he_cap;
4467 int nss = hal->rx_nss;
4468 u8 *mac_cap_info;
4469 u8 *phy_cap_info;
4470 u16 mcs_map = 0;
4471 int i;
4472
4473 for (i = 0; i < 8; i++) {
4474 if (i < nss)
4475 mcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
4476 else
4477 mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
4478 }
4479
4480 he_cap = &iftype_data->he_cap;
4481 mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
4482 phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
4483
4484 he_cap->has_he = true;
4485 mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
4486 if (iftype == NL80211_IFTYPE_STATION)
4487 mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
4488 mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
4489 IEEE80211_HE_MAC_CAP2_BSR;
4490 mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
4491 if (iftype == NL80211_IFTYPE_AP)
4492 mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
4493 mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
4494 IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
4495 if (iftype == NL80211_IFTYPE_STATION)
4496 mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
4497 if (band == NL80211_BAND_2GHZ) {
4498 phy_cap_info[0] =
4499 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
4500 } else {
4501 phy_cap_info[0] =
4502 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
4503 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
4504 phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
4505 }
4506 phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
4507 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
4508 IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
4509 phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
4510 IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
4511 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
4512 IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
4513 phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
4514 if (iftype == NL80211_IFTYPE_STATION)
4515 phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
4516 IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
4517 if (iftype == NL80211_IFTYPE_AP)
4518 phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
4519 phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
4520 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
4521 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
4522 phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
4523 phy_cap_info[5] = no_ng16 ? 0 :
4524 IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
4525 IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
4526 phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
4527 IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
4528 IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
4529 IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
4530 phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
4531 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
4532 IEEE80211_HE_PHY_CAP7_MAX_NC_1;
4533 phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
4534 IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
4535 IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
4536 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
4537 phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
4538 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
4539 phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
4540 IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
4541 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
4542 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
4543 u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
4544 IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
4545 if (iftype == NL80211_IFTYPE_STATION)
4546 phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
4547 he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
4548 he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
4549 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) {
4550 he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
4551 he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
4552 }
4553
4554 if (band == NL80211_BAND_6GHZ) {
4555 __le16 capa;
4556
4557 capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
4558 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
4559 le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
4560 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
4561 le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
4562 IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
4563 iftype_data->he_6ghz_capa.capa = capa;
4564 }
4565 }
4566
rtw89_init_eht_cap(struct rtw89_dev * rtwdev,enum nl80211_band band,enum nl80211_iftype iftype,struct ieee80211_sband_iftype_data * iftype_data)4567 static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
4568 enum nl80211_band band,
4569 enum nl80211_iftype iftype,
4570 struct ieee80211_sband_iftype_data *iftype_data)
4571 {
4572 const struct rtw89_chip_info *chip = rtwdev->chip;
4573 struct ieee80211_eht_cap_elem_fixed *eht_cap_elem;
4574 struct ieee80211_eht_mcs_nss_supp *eht_nss;
4575 struct ieee80211_sta_eht_cap *eht_cap;
4576 struct rtw89_hal *hal = &rtwdev->hal;
4577 bool support_mcs_12_13 = true;
4578 bool support_320mhz = false;
4579 u8 val, val_mcs13;
4580 int sts = 8;
4581
4582 if (chip->chip_gen == RTW89_CHIP_AX)
4583 return;
4584
4585 if (hal->no_mcs_12_13)
4586 support_mcs_12_13 = false;
4587
4588 if (band == NL80211_BAND_6GHZ &&
4589 chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_320))
4590 support_320mhz = true;
4591
4592 eht_cap = &iftype_data->eht_cap;
4593 eht_cap_elem = &eht_cap->eht_cap_elem;
4594 eht_nss = &eht_cap->eht_mcs_nss_supp;
4595
4596 eht_cap->has_eht = true;
4597
4598 eht_cap_elem->mac_cap_info[0] =
4599 u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991,
4600 IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
4601 eht_cap_elem->mac_cap_info[1] = 0;
4602
4603 eht_cap_elem->phy_cap_info[0] =
4604 IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
4605 IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
4606 if (support_320mhz)
4607 eht_cap_elem->phy_cap_info[0] |=
4608 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4609
4610 eht_cap_elem->phy_cap_info[0] |=
4611 u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
4612 IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
4613 eht_cap_elem->phy_cap_info[1] =
4614 u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
4615 IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
4616 u8_encode_bits(sts - 1,
4617 IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
4618 if (support_320mhz)
4619 eht_cap_elem->phy_cap_info[1] |=
4620 u8_encode_bits(sts - 1,
4621 IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
4622
4623 eht_cap_elem->phy_cap_info[2] = 0;
4624
4625 eht_cap_elem->phy_cap_info[3] =
4626 IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
4627 IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
4628 IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
4629 IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK;
4630
4631 eht_cap_elem->phy_cap_info[4] =
4632 IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
4633 u8_encode_bits(1, IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
4634
4635 eht_cap_elem->phy_cap_info[5] =
4636 u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US,
4637 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
4638
4639 eht_cap_elem->phy_cap_info[6] = 0;
4640 eht_cap_elem->phy_cap_info[7] = 0;
4641 eht_cap_elem->phy_cap_info[8] = 0;
4642
4643 val = u8_encode_bits(hal->rx_nss, IEEE80211_EHT_MCS_NSS_RX) |
4644 u8_encode_bits(hal->tx_nss, IEEE80211_EHT_MCS_NSS_TX);
4645 val_mcs13 = support_mcs_12_13 ? val : 0;
4646
4647 eht_nss->bw._80.rx_tx_mcs9_max_nss = val;
4648 eht_nss->bw._80.rx_tx_mcs11_max_nss = val;
4649 eht_nss->bw._80.rx_tx_mcs13_max_nss = val_mcs13;
4650 eht_nss->bw._160.rx_tx_mcs9_max_nss = val;
4651 eht_nss->bw._160.rx_tx_mcs11_max_nss = val;
4652 eht_nss->bw._160.rx_tx_mcs13_max_nss = val_mcs13;
4653 if (support_320mhz) {
4654 eht_nss->bw._320.rx_tx_mcs9_max_nss = val;
4655 eht_nss->bw._320.rx_tx_mcs11_max_nss = val;
4656 eht_nss->bw._320.rx_tx_mcs13_max_nss = val_mcs13;
4657 }
4658 }
4659
4660 #define RTW89_SBAND_IFTYPES_NR 2
4661
rtw89_init_he_eht_cap(struct rtw89_dev * rtwdev,enum nl80211_band band,struct ieee80211_supported_band * sband)4662 static int rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
4663 enum nl80211_band band,
4664 struct ieee80211_supported_band *sband)
4665 {
4666 struct ieee80211_sband_iftype_data *iftype_data;
4667 enum nl80211_iftype iftype;
4668 int idx = 0;
4669
4670 iftype_data = devm_kcalloc(rtwdev->dev, RTW89_SBAND_IFTYPES_NR,
4671 sizeof(*iftype_data), GFP_KERNEL);
4672 if (!iftype_data)
4673 return -ENOMEM;
4674
4675 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
4676 switch (iftype) {
4677 case NL80211_IFTYPE_STATION:
4678 case NL80211_IFTYPE_AP:
4679 break;
4680 default:
4681 continue;
4682 }
4683
4684 if (idx >= RTW89_SBAND_IFTYPES_NR) {
4685 rtw89_warn(rtwdev, "run out of iftype_data\n");
4686 break;
4687 }
4688
4689 iftype_data[idx].types_mask = BIT(iftype);
4690
4691 rtw89_init_he_cap(rtwdev, band, iftype, &iftype_data[idx]);
4692 rtw89_init_eht_cap(rtwdev, band, iftype, &iftype_data[idx]);
4693
4694 idx++;
4695 }
4696
4697 _ieee80211_set_sband_iftype_data(sband, iftype_data, idx);
4698 return 0;
4699 }
4700
4701 static struct ieee80211_supported_band *
rtw89_core_sband_dup(struct rtw89_dev * rtwdev,const struct ieee80211_supported_band * sband)4702 rtw89_core_sband_dup(struct rtw89_dev *rtwdev,
4703 const struct ieee80211_supported_band *sband)
4704 {
4705 struct ieee80211_supported_band *dup;
4706
4707 dup = devm_kmemdup(rtwdev->dev, sband, sizeof(*sband), GFP_KERNEL);
4708 if (!dup)
4709 return NULL;
4710
4711 dup->channels = devm_kmemdup(rtwdev->dev, sband->channels,
4712 sizeof(*sband->channels) * sband->n_channels,
4713 GFP_KERNEL);
4714 if (!dup->channels)
4715 return NULL;
4716
4717 dup->bitrates = devm_kmemdup(rtwdev->dev, sband->bitrates,
4718 sizeof(*sband->bitrates) * sband->n_bitrates,
4719 GFP_KERNEL);
4720 if (!dup->bitrates)
4721 return NULL;
4722
4723 return dup;
4724 }
4725
rtw89_core_set_supported_band(struct rtw89_dev * rtwdev)4726 static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
4727 {
4728 struct ieee80211_hw *hw = rtwdev->hw;
4729 struct ieee80211_supported_band *sband;
4730 u8 support_bands = rtwdev->chip->support_bands;
4731 int ret;
4732
4733 if (support_bands & BIT(NL80211_BAND_2GHZ)) {
4734 sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_2ghz);
4735 if (!sband)
4736 return -ENOMEM;
4737 rtw89_init_ht_cap(rtwdev, &sband->ht_cap);
4738 ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband);
4739 if (ret)
4740 return ret;
4741 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
4742 }
4743
4744 if (support_bands & BIT(NL80211_BAND_5GHZ)) {
4745 sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_5ghz);
4746 if (!sband)
4747 return -ENOMEM;
4748 rtw89_init_ht_cap(rtwdev, &sband->ht_cap);
4749 rtw89_init_vht_cap(rtwdev, &sband->vht_cap);
4750 ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband);
4751 if (ret)
4752 return ret;
4753 hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
4754 }
4755
4756 if (support_bands & BIT(NL80211_BAND_6GHZ)) {
4757 sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_6ghz);
4758 if (!sband)
4759 return -ENOMEM;
4760 ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband);
4761 if (ret)
4762 return ret;
4763 hw->wiphy->bands[NL80211_BAND_6GHZ] = sband;
4764 }
4765
4766 return 0;
4767 }
4768
rtw89_core_ppdu_sts_init(struct rtw89_dev * rtwdev)4769 static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
4770 {
4771 int i;
4772
4773 for (i = 0; i < RTW89_PHY_NUM; i++)
4774 skb_queue_head_init(&rtwdev->ppdu_sts.rx_queue[i]);
4775 for (i = 0; i < RTW89_PHY_NUM; i++)
4776 rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX;
4777 }
4778
rtw89_core_update_beacon_work(struct wiphy * wiphy,struct wiphy_work * work)4779 void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
4780 {
4781 struct rtw89_dev *rtwdev;
4782 struct rtw89_vif_link *rtwvif_link = container_of(work, struct rtw89_vif_link,
4783 update_beacon_work);
4784
4785 lockdep_assert_wiphy(wiphy);
4786
4787 if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE)
4788 return;
4789
4790 rtwdev = rtwvif_link->rtwvif->rtwdev;
4791
4792 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
4793 }
4794
rtw89_core_csa_beacon_work(struct wiphy * wiphy,struct wiphy_work * work)4795 void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
4796 {
4797 struct rtw89_vif_link *rtwvif_link =
4798 container_of(work, struct rtw89_vif_link, csa_beacon_work.work);
4799 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
4800 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
4801 struct rtw89_dev *rtwdev = rtwvif->rtwdev;
4802 struct ieee80211_bss_conf *bss_conf;
4803 unsigned int delay;
4804
4805 lockdep_assert_wiphy(wiphy);
4806
4807 if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE)
4808 return;
4809
4810 rcu_read_lock();
4811
4812 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
4813 if (!bss_conf->csa_active) {
4814 rcu_read_unlock();
4815 return;
4816 }
4817
4818 delay = ieee80211_tu_to_usec(bss_conf->beacon_int);
4819
4820 rcu_read_unlock();
4821
4822 if (!ieee80211_beacon_cntdwn_is_complete(vif, rtwvif_link->link_id)) {
4823 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
4824
4825 wiphy_delayed_work_queue(wiphy, &rtwvif_link->csa_beacon_work,
4826 usecs_to_jiffies(delay));
4827 } else {
4828 ieee80211_csa_finish(vif, rtwvif_link->link_id);
4829 }
4830 }
4831
rtw89_wait_for_cond(struct rtw89_wait_info * wait,unsigned int cond)4832 int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
4833 {
4834 struct completion *cmpl = &wait->completion;
4835 unsigned long time_left;
4836 unsigned int cur;
4837
4838 cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond);
4839 if (cur != RTW89_WAIT_COND_IDLE)
4840 return -EBUSY;
4841
4842 time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
4843 if (time_left == 0) {
4844 atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
4845 return -ETIMEDOUT;
4846 }
4847
4848 if (wait->data.err)
4849 return -EFAULT;
4850
4851 return 0;
4852 }
4853
rtw89_complete_cond(struct rtw89_wait_info * wait,unsigned int cond,const struct rtw89_completion_data * data)4854 void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
4855 const struct rtw89_completion_data *data)
4856 {
4857 unsigned int cur;
4858
4859 cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE);
4860 if (cur != cond)
4861 return;
4862
4863 wait->data = *data;
4864 complete(&wait->completion);
4865 }
4866
rtw89_core_ntfy_btc_event(struct rtw89_dev * rtwdev,enum rtw89_btc_hmsg event)4867 void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event)
4868 {
4869 u16 bt_req_len;
4870
4871 switch (event) {
4872 case RTW89_BTC_HMSG_SET_BT_REQ_SLOT:
4873 bt_req_len = rtw89_coex_query_bt_req_len(rtwdev, RTW89_PHY_0);
4874 rtw89_debug(rtwdev, RTW89_DBG_BTC,
4875 "coex updates BT req len to %d TU\n", bt_req_len);
4876 rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BT_SLOT_CHANGE);
4877 break;
4878 default:
4879 if (event < NUM_OF_RTW89_BTC_HMSG)
4880 rtw89_debug(rtwdev, RTW89_DBG_BTC,
4881 "unhandled BTC HMSG event: %d\n", event);
4882 else
4883 rtw89_warn(rtwdev,
4884 "unrecognized BTC HMSG event: %d\n", event);
4885 break;
4886 }
4887 }
4888
rtw89_check_quirks(struct rtw89_dev * rtwdev,const struct dmi_system_id * quirks)4889 void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks)
4890 {
4891 const struct dmi_system_id *match;
4892 enum rtw89_quirks quirk;
4893
4894 if (!quirks)
4895 return;
4896
4897 for (match = dmi_first_match(quirks); match; match = dmi_first_match(match + 1)) {
4898 quirk = (uintptr_t)match->driver_data;
4899 if (quirk >= NUM_OF_RTW89_QUIRKS)
4900 continue;
4901
4902 set_bit(quirk, rtwdev->quirks);
4903 }
4904 }
4905 EXPORT_SYMBOL(rtw89_check_quirks);
4906
rtw89_core_start(struct rtw89_dev * rtwdev)4907 int rtw89_core_start(struct rtw89_dev *rtwdev)
4908 {
4909 int ret;
4910
4911 ret = rtw89_mac_init(rtwdev);
4912 if (ret) {
4913 rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
4914 return ret;
4915 }
4916
4917 rtw89_btc_ntfy_poweron(rtwdev);
4918
4919 /* efuse process */
4920
4921 /* pre-config BB/RF, BB reset/RFC reset */
4922 ret = rtw89_chip_reset_bb_rf(rtwdev);
4923 if (ret)
4924 return ret;
4925
4926 rtw89_phy_init_bb_reg(rtwdev);
4927 rtw89_chip_bb_postinit(rtwdev);
4928 rtw89_phy_init_rf_reg(rtwdev, false);
4929
4930 rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL);
4931
4932 rtw89_phy_dm_init(rtwdev);
4933
4934 rtw89_mac_cfg_ppdu_status_bands(rtwdev, true);
4935 rtw89_mac_cfg_phy_rpt_bands(rtwdev, true);
4936 rtw89_mac_update_rts_threshold(rtwdev);
4937
4938 ret = rtw89_hci_start(rtwdev);
4939 if (ret) {
4940 rtw89_err(rtwdev, "failed to start hci\n");
4941 return ret;
4942 }
4943
4944 wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->track_work,
4945 RTW89_TRACK_WORK_PERIOD);
4946 wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->track_ps_work,
4947 RTW89_TRACK_PS_WORK_PERIOD);
4948
4949 set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
4950
4951 rtw89_chip_rfk_init_late(rtwdev);
4952 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
4953 rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable);
4954 rtw89_fw_h2c_init_ba_cam(rtwdev);
4955
4956 return 0;
4957 }
4958
rtw89_core_stop(struct rtw89_dev * rtwdev)4959 void rtw89_core_stop(struct rtw89_dev *rtwdev)
4960 {
4961 struct wiphy *wiphy = rtwdev->hw->wiphy;
4962 struct rtw89_btc *btc = &rtwdev->btc;
4963
4964 lockdep_assert_wiphy(wiphy);
4965
4966 /* Prvent to stop twice; enter_ips and ops_stop */
4967 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
4968 return;
4969
4970 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF);
4971
4972 clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
4973
4974 wiphy_work_cancel(wiphy, &rtwdev->c2h_work);
4975 wiphy_work_cancel(wiphy, &rtwdev->cancel_6ghz_probe_work);
4976 wiphy_work_cancel(wiphy, &btc->eapol_notify_work);
4977 wiphy_work_cancel(wiphy, &btc->arp_notify_work);
4978 wiphy_work_cancel(wiphy, &btc->dhcp_notify_work);
4979 wiphy_work_cancel(wiphy, &btc->icmp_notify_work);
4980 cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
4981 wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
4982 wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work);
4983 wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work);
4984 wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_act1_work);
4985 wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_bt_devinfo_work);
4986 wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_rfk_chk_work);
4987 wiphy_delayed_work_cancel(wiphy, &rtwdev->cfo_track_work);
4988 wiphy_delayed_work_cancel(wiphy, &rtwdev->mcc_prepare_done_work);
4989 cancel_delayed_work_sync(&rtwdev->forbid_ba_work);
4990 wiphy_delayed_work_cancel(wiphy, &rtwdev->antdiv_work);
4991
4992 rtw89_btc_ntfy_poweroff(rtwdev);
4993 rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, true);
4994 rtw89_mac_flush_txq(rtwdev, BIT(rtwdev->hw->queues) - 1, true);
4995 rtw89_hci_stop(rtwdev);
4996 rtw89_hci_deinit(rtwdev);
4997 rtw89_mac_pwr_off(rtwdev);
4998 rtw89_hci_reset(rtwdev);
4999 }
5000
rtw89_acquire_mac_id(struct rtw89_dev * rtwdev)5001 u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev)
5002 {
5003 const struct rtw89_chip_info *chip = rtwdev->chip;
5004 u8 mac_id_num;
5005 u8 mac_id;
5006
5007 if (rtwdev->support_mlo)
5008 mac_id_num = chip->support_macid_num / chip->support_link_num;
5009 else
5010 mac_id_num = chip->support_macid_num;
5011
5012 mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num);
5013 if (mac_id == mac_id_num)
5014 return RTW89_MAX_MAC_ID_NUM;
5015
5016 set_bit(mac_id, rtwdev->mac_id_map);
5017 return mac_id;
5018 }
5019
rtw89_release_mac_id(struct rtw89_dev * rtwdev,u8 mac_id)5020 void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id)
5021 {
5022 clear_bit(mac_id, rtwdev->mac_id_map);
5023 }
5024
rtw89_init_vif(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,u8 mac_id,u8 port)5025 void rtw89_init_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
5026 u8 mac_id, u8 port)
5027 {
5028 const struct rtw89_chip_info *chip = rtwdev->chip;
5029 u8 support_link_num = chip->support_link_num;
5030 u8 support_mld_num = 0;
5031 unsigned int link_id;
5032 u8 index;
5033
5034 bitmap_zero(rtwvif->links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
5035 for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
5036 rtwvif->links[link_id] = NULL;
5037
5038 rtwvif->rtwdev = rtwdev;
5039
5040 if (rtwdev->support_mlo) {
5041 rtwvif->links_inst_valid_num = support_link_num;
5042 support_mld_num = chip->support_macid_num / support_link_num;
5043 } else {
5044 rtwvif->links_inst_valid_num = 1;
5045 }
5046
5047 for (index = 0; index < rtwvif->links_inst_valid_num; index++) {
5048 struct rtw89_vif_link *inst = &rtwvif->links_inst[index];
5049
5050 inst->rtwvif = rtwvif;
5051 inst->mac_id = mac_id + index * support_mld_num;
5052 inst->mac_idx = RTW89_MAC_0 + index;
5053 inst->phy_idx = RTW89_PHY_0 + index;
5054
5055 /* multi-link use the same port id on different HW bands */
5056 inst->port = port;
5057 }
5058 }
5059
rtw89_init_sta(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,u8 mac_id)5060 void rtw89_init_sta(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
5061 struct rtw89_sta *rtwsta, u8 mac_id)
5062 {
5063 const struct rtw89_chip_info *chip = rtwdev->chip;
5064 u8 support_link_num = chip->support_link_num;
5065 u8 support_mld_num = 0;
5066 unsigned int link_id;
5067 u8 index;
5068
5069 bitmap_zero(rtwsta->links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
5070 for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
5071 rtwsta->links[link_id] = NULL;
5072
5073 rtwsta->rtwdev = rtwdev;
5074 rtwsta->rtwvif = rtwvif;
5075
5076 if (rtwdev->support_mlo) {
5077 rtwsta->links_inst_valid_num = support_link_num;
5078 support_mld_num = chip->support_macid_num / support_link_num;
5079 } else {
5080 rtwsta->links_inst_valid_num = 1;
5081 }
5082
5083 for (index = 0; index < rtwsta->links_inst_valid_num; index++) {
5084 struct rtw89_sta_link *inst = &rtwsta->links_inst[index];
5085
5086 inst->rtwvif_link = &rtwvif->links_inst[index];
5087
5088 inst->rtwsta = rtwsta;
5089 inst->mac_id = mac_id + index * support_mld_num;
5090 }
5091 }
5092
rtw89_vif_set_link(struct rtw89_vif * rtwvif,unsigned int link_id)5093 struct rtw89_vif_link *rtw89_vif_set_link(struct rtw89_vif *rtwvif,
5094 unsigned int link_id)
5095 {
5096 struct rtw89_vif_link *rtwvif_link = rtwvif->links[link_id];
5097 u8 index;
5098 int ret;
5099
5100 if (rtwvif_link)
5101 return rtwvif_link;
5102
5103 index = find_first_zero_bit(rtwvif->links_inst_map,
5104 rtwvif->links_inst_valid_num);
5105 if (index == rtwvif->links_inst_valid_num) {
5106 ret = -EBUSY;
5107 goto err;
5108 }
5109
5110 rtwvif_link = &rtwvif->links_inst[index];
5111 rtwvif_link->link_id = link_id;
5112
5113 set_bit(index, rtwvif->links_inst_map);
5114 rtwvif->links[link_id] = rtwvif_link;
5115 list_add_tail(&rtwvif_link->dlink_schd, &rtwvif->dlink_pool);
5116 return rtwvif_link;
5117
5118 err:
5119 rtw89_err(rtwvif->rtwdev, "vif (link_id %u) failed to set link: %d\n",
5120 link_id, ret);
5121 return NULL;
5122 }
5123
rtw89_vif_unset_link(struct rtw89_vif * rtwvif,unsigned int link_id)5124 void rtw89_vif_unset_link(struct rtw89_vif *rtwvif, unsigned int link_id)
5125 {
5126 struct rtw89_vif_link **container = &rtwvif->links[link_id];
5127 struct rtw89_vif_link *link = *container;
5128 u8 index;
5129
5130 if (!link)
5131 return;
5132
5133 index = rtw89_vif_link_inst_get_index(link);
5134 clear_bit(index, rtwvif->links_inst_map);
5135 *container = NULL;
5136 list_del(&link->dlink_schd);
5137 }
5138
rtw89_sta_set_link(struct rtw89_sta * rtwsta,unsigned int link_id)5139 struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
5140 unsigned int link_id)
5141 {
5142 struct rtw89_vif *rtwvif = rtwsta->rtwvif;
5143 struct rtw89_vif_link *rtwvif_link = rtwvif->links[link_id];
5144 struct rtw89_sta_link *rtwsta_link = rtwsta->links[link_id];
5145 u8 index;
5146 int ret;
5147
5148 if (rtwsta_link)
5149 return rtwsta_link;
5150
5151 if (!rtwvif_link) {
5152 ret = -ENOLINK;
5153 goto err;
5154 }
5155
5156 index = rtw89_vif_link_inst_get_index(rtwvif_link);
5157 if (test_bit(index, rtwsta->links_inst_map)) {
5158 ret = -EBUSY;
5159 goto err;
5160 }
5161
5162 rtwsta_link = &rtwsta->links_inst[index];
5163 rtwsta_link->link_id = link_id;
5164
5165 set_bit(index, rtwsta->links_inst_map);
5166 rtwsta->links[link_id] = rtwsta_link;
5167 list_add_tail(&rtwsta_link->dlink_schd, &rtwsta->dlink_pool);
5168 return rtwsta_link;
5169
5170 err:
5171 rtw89_err(rtwsta->rtwdev, "sta (link_id %u) failed to set link: %d\n",
5172 link_id, ret);
5173 return NULL;
5174 }
5175
rtw89_sta_unset_link(struct rtw89_sta * rtwsta,unsigned int link_id)5176 void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id)
5177 {
5178 struct rtw89_sta_link **container = &rtwsta->links[link_id];
5179 struct rtw89_sta_link *link = *container;
5180 u8 index;
5181
5182 if (!link)
5183 return;
5184
5185 index = rtw89_sta_link_inst_get_index(link);
5186 clear_bit(index, rtwsta->links_inst_map);
5187 *container = NULL;
5188 list_del(&link->dlink_schd);
5189 }
5190
rtw89_core_init(struct rtw89_dev * rtwdev)5191 int rtw89_core_init(struct rtw89_dev *rtwdev)
5192 {
5193 struct rtw89_btc *btc = &rtwdev->btc;
5194 u8 band;
5195
5196 INIT_LIST_HEAD(&rtwdev->ba_list);
5197 INIT_LIST_HEAD(&rtwdev->forbid_ba_list);
5198 INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
5199 INIT_LIST_HEAD(&rtwdev->early_h2c_list);
5200 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5201 if (!(rtwdev->chip->support_bands & BIT(band)))
5202 continue;
5203 INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
5204 }
5205 INIT_LIST_HEAD(&rtwdev->scan_info.chan_list);
5206 INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
5207 INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
5208 INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
5209 wiphy_delayed_work_init(&rtwdev->track_work, rtw89_track_work);
5210 wiphy_delayed_work_init(&rtwdev->track_ps_work, rtw89_track_ps_work);
5211 wiphy_delayed_work_init(&rtwdev->chanctx_work, rtw89_chanctx_work);
5212 wiphy_delayed_work_init(&rtwdev->coex_act1_work, rtw89_coex_act1_work);
5213 wiphy_delayed_work_init(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
5214 wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
5215 wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
5216 wiphy_delayed_work_init(&rtwdev->mcc_prepare_done_work, rtw89_mcc_prepare_done_work);
5217 INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
5218 wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
5219 rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
5220 if (!rtwdev->txq_wq)
5221 return -ENOMEM;
5222 spin_lock_init(&rtwdev->ba_lock);
5223 spin_lock_init(&rtwdev->rpwm_lock);
5224 mutex_init(&rtwdev->rf_mutex);
5225 rtwdev->total_sta_assoc = 0;
5226
5227 rtw89_init_wait(&rtwdev->mcc.wait);
5228 rtw89_init_wait(&rtwdev->mlo.wait);
5229 rtw89_init_wait(&rtwdev->mac.fw_ofld_wait);
5230 rtw89_init_wait(&rtwdev->wow.wait);
5231 rtw89_init_wait(&rtwdev->mac.ps_wait);
5232
5233 wiphy_work_init(&rtwdev->c2h_work, rtw89_fw_c2h_work);
5234 wiphy_work_init(&rtwdev->ips_work, rtw89_ips_work);
5235 wiphy_work_init(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work);
5236 INIT_WORK(&rtwdev->load_firmware_work, rtw89_load_firmware_work);
5237
5238 skb_queue_head_init(&rtwdev->c2h_queue);
5239 rtw89_core_ppdu_sts_init(rtwdev);
5240 rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
5241
5242 rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR;
5243 rtwdev->dbcc_en = false;
5244 rtwdev->mlo_dbcc_mode = MLO_DBCC_NOT_SUPPORT;
5245 rtwdev->mac.qta_mode = RTW89_QTA_SCC;
5246
5247 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
5248 rtwdev->dbcc_en = true;
5249 rtwdev->mac.qta_mode = RTW89_QTA_DBCC;
5250 rtwdev->mlo_dbcc_mode = MLO_1_PLUS_1_1RF;
5251 }
5252
5253 rtwdev->bbs[RTW89_PHY_0].phy_idx = RTW89_PHY_0;
5254 rtwdev->bbs[RTW89_PHY_1].phy_idx = RTW89_PHY_1;
5255
5256 wiphy_work_init(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
5257 wiphy_work_init(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
5258 wiphy_work_init(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work);
5259 wiphy_work_init(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
5260
5261 init_completion(&rtwdev->fw.req.completion);
5262 init_completion(&rtwdev->rfk_wait.completion);
5263
5264 schedule_work(&rtwdev->load_firmware_work);
5265
5266 rtw89_ser_init(rtwdev);
5267 rtw89_entity_init(rtwdev);
5268 rtw89_sar_init(rtwdev);
5269 rtw89_phy_ant_gain_init(rtwdev);
5270
5271 return 0;
5272 }
5273 EXPORT_SYMBOL(rtw89_core_init);
5274
rtw89_core_deinit(struct rtw89_dev * rtwdev)5275 void rtw89_core_deinit(struct rtw89_dev *rtwdev)
5276 {
5277 rtw89_ser_deinit(rtwdev);
5278 rtw89_unload_firmware(rtwdev);
5279 __rtw89_fw_free_all_early_h2c(rtwdev);
5280
5281 destroy_workqueue(rtwdev->txq_wq);
5282 mutex_destroy(&rtwdev->rf_mutex);
5283 }
5284 EXPORT_SYMBOL(rtw89_core_deinit);
5285
rtw89_core_scan_start(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,const u8 * mac_addr,bool hw_scan)5286 void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
5287 const u8 *mac_addr, bool hw_scan)
5288 {
5289 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
5290 rtwvif_link->chanctx_idx);
5291 struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
5292
5293 rtwdev->scanning = true;
5294
5295 ether_addr_copy(rtwvif_link->mac_addr, mac_addr);
5296 rtw89_btc_ntfy_scan_start(rtwdev, rtwvif_link->phy_idx, chan->band_type);
5297 rtw89_chip_rfk_scan(rtwdev, rtwvif_link, true);
5298 rtw89_hci_recalc_int_mit(rtwdev);
5299 rtw89_phy_config_edcca(rtwdev, bb, true);
5300 rtw89_tas_scan(rtwdev, true);
5301
5302 rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, mac_addr);
5303 }
5304
rtw89_core_scan_complete(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool hw_scan)5305 void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
5306 struct rtw89_vif_link *rtwvif_link, bool hw_scan)
5307 {
5308 struct ieee80211_bss_conf *bss_conf;
5309 struct rtw89_bb_ctx *bb;
5310 int ret;
5311
5312 if (!rtwvif_link)
5313 return;
5314
5315 rcu_read_lock();
5316
5317 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
5318 ether_addr_copy(rtwvif_link->mac_addr, bss_conf->addr);
5319
5320 rcu_read_unlock();
5321
5322 rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
5323
5324 rtw89_chip_rfk_scan(rtwdev, rtwvif_link, false);
5325 rtw89_btc_ntfy_scan_finish(rtwdev, rtwvif_link->phy_idx);
5326 bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
5327 rtw89_phy_config_edcca(rtwdev, bb, false);
5328 rtw89_tas_scan(rtwdev, false);
5329
5330 if (hw_scan) {
5331 ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, false, false,
5332 RTW89_SCAN_NULL_TIMEOUT);
5333 if (ret)
5334 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
5335 "scan send null-0 failed: %d\n", ret);
5336 }
5337
5338 rtwdev->scanning = false;
5339 rtw89_for_each_active_bb(rtwdev, bb)
5340 bb->dig.bypass_dig = true;
5341 if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
5342 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->ips_work);
5343 }
5344
rtw89_read_chip_ver(struct rtw89_dev * rtwdev)5345 static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
5346 {
5347 const struct rtw89_chip_info *chip = rtwdev->chip;
5348 int ret;
5349 u8 val;
5350 u8 cv;
5351
5352 cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK);
5353 if (chip->chip_id == RTL8852A && cv <= CHIP_CBV) {
5354 if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD)
5355 cv = CHIP_CAV;
5356 else
5357 cv = CHIP_CBV;
5358 }
5359
5360 rtwdev->hal.cv = cv;
5361
5362 if (rtw89_is_rtl885xb(rtwdev)) {
5363 ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CV, &val);
5364 if (ret)
5365 return;
5366
5367 rtwdev->hal.acv = u8_get_bits(val, XTAL_SI_ACV_MASK);
5368 }
5369 }
5370
rtw89_core_setup_phycap(struct rtw89_dev * rtwdev)5371 static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev)
5372 {
5373 const struct rtw89_chip_info *chip = rtwdev->chip;
5374
5375 rtwdev->hal.support_cckpd =
5376 !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) &&
5377 !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV);
5378 rtwdev->hal.support_igi =
5379 rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV;
5380
5381 if (test_bit(RTW89_QUIRK_THERMAL_PROT_120C, rtwdev->quirks))
5382 rtwdev->hal.thermal_prot_th = chip->thermal_th[1];
5383 else if (test_bit(RTW89_QUIRK_THERMAL_PROT_110C, rtwdev->quirks))
5384 rtwdev->hal.thermal_prot_th = chip->thermal_th[0];
5385 else
5386 rtwdev->hal.thermal_prot_th = 0;
5387 }
5388
rtw89_core_setup_rfe_parms(struct rtw89_dev * rtwdev)5389 static void rtw89_core_setup_rfe_parms(struct rtw89_dev *rtwdev)
5390 {
5391 const struct rtw89_chip_info *chip = rtwdev->chip;
5392 const struct rtw89_rfe_parms_conf *conf = chip->rfe_parms_conf;
5393 struct rtw89_efuse *efuse = &rtwdev->efuse;
5394 const struct rtw89_rfe_parms *sel;
5395 u8 rfe_type = efuse->rfe_type;
5396
5397 if (!conf) {
5398 sel = chip->dflt_parms;
5399 goto out;
5400 }
5401
5402 while (conf->rfe_parms) {
5403 if (rfe_type == conf->rfe_type) {
5404 sel = conf->rfe_parms;
5405 goto out;
5406 }
5407 conf++;
5408 }
5409
5410 sel = chip->dflt_parms;
5411
5412 out:
5413 rtwdev->rfe_parms = rtw89_load_rfe_data_from_fw(rtwdev, sel);
5414 rtw89_load_txpwr_table(rtwdev, rtwdev->rfe_parms->byr_tbl);
5415 }
5416
rtw89_core_mlsr_switch(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,unsigned int link_id)5417 int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
5418 unsigned int link_id)
5419 {
5420 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
5421 u16 usable_links = ieee80211_vif_usable_links(vif);
5422 u16 active_links = vif->active_links;
5423 struct rtw89_vif_link *target, *cur;
5424 int ret;
5425
5426 lockdep_assert_wiphy(rtwdev->hw->wiphy);
5427
5428 if (unlikely(!ieee80211_vif_is_mld(vif)))
5429 return -EOPNOTSUPP;
5430
5431 if (unlikely(link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
5432 !(usable_links & BIT(link_id)))) {
5433 rtw89_warn(rtwdev, "%s: link id %u is not usable\n", __func__,
5434 link_id);
5435 return -ENOLINK;
5436 }
5437
5438 if (active_links == BIT(link_id))
5439 return 0;
5440
5441 rtw89_debug(rtwdev, RTW89_DBG_STATE, "%s: switch to link id %u MLSR\n",
5442 __func__, link_id);
5443
5444 rtw89_leave_lps(rtwdev);
5445
5446 ieee80211_stop_queues(rtwdev->hw);
5447 flush_work(&rtwdev->txq_work);
5448
5449 cur = rtw89_get_designated_link(rtwvif);
5450
5451 ret = ieee80211_set_active_links(vif, active_links | BIT(link_id));
5452 if (ret) {
5453 rtw89_err(rtwdev, "%s: failed to activate link id %u\n",
5454 __func__, link_id);
5455 goto wake_queue;
5456 }
5457
5458 target = rtwvif->links[link_id];
5459 if (unlikely(!target)) {
5460 rtw89_err(rtwdev, "%s: failed to confirm link id %u\n",
5461 __func__, link_id);
5462
5463 ieee80211_set_active_links(vif, active_links);
5464 ret = -EFAULT;
5465 goto wake_queue;
5466 }
5467
5468 if (likely(cur))
5469 rtw89_fw_h2c_mlo_link_cfg(rtwdev, cur, false);
5470
5471 rtw89_fw_h2c_mlo_link_cfg(rtwdev, target, true);
5472
5473 ret = ieee80211_set_active_links(vif, BIT(link_id));
5474 if (ret)
5475 rtw89_err(rtwdev, "%s: failed to inactivate links 0x%x\n",
5476 __func__, active_links);
5477
5478 rtw89_chip_rfk_channel(rtwdev, target);
5479
5480 rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR;
5481
5482 wake_queue:
5483 ieee80211_wake_queues(rtwdev->hw);
5484
5485 return ret;
5486 }
5487
rtw89_chip_efuse_info_setup(struct rtw89_dev * rtwdev)5488 static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
5489 {
5490 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
5491 int ret;
5492
5493 ret = rtw89_mac_partial_init(rtwdev, false);
5494 if (ret)
5495 return ret;
5496
5497 ret = mac->parse_efuse_map(rtwdev);
5498 if (ret)
5499 return ret;
5500
5501 ret = mac->parse_phycap_map(rtwdev);
5502 if (ret)
5503 return ret;
5504
5505 ret = rtw89_mac_setup_phycap(rtwdev);
5506 if (ret)
5507 return ret;
5508
5509 rtw89_core_setup_phycap(rtwdev);
5510
5511 rtw89_hci_mac_pre_deinit(rtwdev);
5512
5513 return 0;
5514 }
5515
rtw89_chip_board_info_setup(struct rtw89_dev * rtwdev)5516 static int rtw89_chip_board_info_setup(struct rtw89_dev *rtwdev)
5517 {
5518 rtw89_chip_fem_setup(rtwdev);
5519
5520 return 0;
5521 }
5522
rtw89_chip_has_rfkill(struct rtw89_dev * rtwdev)5523 static bool rtw89_chip_has_rfkill(struct rtw89_dev *rtwdev)
5524 {
5525 return !!rtwdev->chip->rfkill_init;
5526 }
5527
rtw89_core_rfkill_init(struct rtw89_dev * rtwdev)5528 static void rtw89_core_rfkill_init(struct rtw89_dev *rtwdev)
5529 {
5530 const struct rtw89_rfkill_regs *regs = rtwdev->chip->rfkill_init;
5531
5532 rtw89_write16_mask(rtwdev, regs->pinmux.addr,
5533 regs->pinmux.mask, regs->pinmux.data);
5534 rtw89_write16_mask(rtwdev, regs->mode.addr,
5535 regs->mode.mask, regs->mode.data);
5536 }
5537
rtw89_core_rfkill_get(struct rtw89_dev * rtwdev)5538 static bool rtw89_core_rfkill_get(struct rtw89_dev *rtwdev)
5539 {
5540 const struct rtw89_reg_def *reg = &rtwdev->chip->rfkill_get;
5541
5542 return !rtw89_read8_mask(rtwdev, reg->addr, reg->mask);
5543 }
5544
rtw89_rfkill_polling_init(struct rtw89_dev * rtwdev)5545 static void rtw89_rfkill_polling_init(struct rtw89_dev *rtwdev)
5546 {
5547 if (!rtw89_chip_has_rfkill(rtwdev))
5548 return;
5549
5550 rtw89_core_rfkill_init(rtwdev);
5551 rtw89_core_rfkill_poll(rtwdev, true);
5552 wiphy_rfkill_start_polling(rtwdev->hw->wiphy);
5553 }
5554
rtw89_rfkill_polling_deinit(struct rtw89_dev * rtwdev)5555 static void rtw89_rfkill_polling_deinit(struct rtw89_dev *rtwdev)
5556 {
5557 if (!rtw89_chip_has_rfkill(rtwdev))
5558 return;
5559
5560 wiphy_rfkill_stop_polling(rtwdev->hw->wiphy);
5561 }
5562
rtw89_core_rfkill_poll(struct rtw89_dev * rtwdev,bool force)5563 void rtw89_core_rfkill_poll(struct rtw89_dev *rtwdev, bool force)
5564 {
5565 bool prev, blocked;
5566
5567 if (!rtw89_chip_has_rfkill(rtwdev))
5568 return;
5569
5570 prev = test_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags);
5571 blocked = rtw89_core_rfkill_get(rtwdev);
5572
5573 if (!force && prev == blocked)
5574 return;
5575
5576 rtw89_info(rtwdev, "rfkill hardware state changed to %s\n",
5577 blocked ? "disable" : "enable");
5578
5579 if (blocked)
5580 set_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags);
5581 else
5582 clear_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags);
5583
5584 wiphy_rfkill_set_hw_state(rtwdev->hw->wiphy, blocked);
5585 }
5586
rtw89_chip_info_setup(struct rtw89_dev * rtwdev)5587 int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
5588 {
5589 int ret;
5590
5591 rtw89_read_chip_ver(rtwdev);
5592
5593 ret = rtw89_mac_pwr_on(rtwdev);
5594 if (ret) {
5595 rtw89_err(rtwdev, "failed to power on\n");
5596 return ret;
5597 }
5598
5599 ret = rtw89_wait_firmware_completion(rtwdev);
5600 if (ret) {
5601 rtw89_err(rtwdev, "failed to wait firmware completion\n");
5602 goto out;
5603 }
5604
5605 ret = rtw89_fw_recognize(rtwdev);
5606 if (ret) {
5607 rtw89_err(rtwdev, "failed to recognize firmware\n");
5608 goto out;
5609 }
5610
5611 ret = rtw89_chip_efuse_info_setup(rtwdev);
5612 if (ret)
5613 goto out;
5614
5615 ret = rtw89_fw_recognize_elements(rtwdev);
5616 if (ret) {
5617 rtw89_err(rtwdev, "failed to recognize firmware elements\n");
5618 goto out;
5619 }
5620
5621 ret = rtw89_chip_board_info_setup(rtwdev);
5622 if (ret)
5623 goto out;
5624
5625 rtw89_core_setup_rfe_parms(rtwdev);
5626 rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
5627
5628 out:
5629 rtw89_mac_pwr_off(rtwdev);
5630
5631 return ret;
5632 }
5633 EXPORT_SYMBOL(rtw89_chip_info_setup);
5634
rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)5635 void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
5636 struct rtw89_vif_link *rtwvif_link)
5637 {
5638 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
5639 const struct rtw89_chip_info *chip = rtwdev->chip;
5640 struct ieee80211_bss_conf *bss_conf;
5641
5642 rcu_read_lock();
5643
5644 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
5645 if (!bss_conf->he_support || !vif->cfg.assoc) {
5646 rcu_read_unlock();
5647 return;
5648 }
5649
5650 rcu_read_unlock();
5651
5652 if (chip->ops->set_txpwr_ul_tb_offset)
5653 chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif_link->mac_idx);
5654 }
5655
rtw89_core_register_hw(struct rtw89_dev * rtwdev)5656 static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
5657 {
5658 const struct rtw89_chip_info *chip = rtwdev->chip;
5659 u8 n = rtwdev->support_mlo ? chip->support_link_num : 1;
5660 struct ieee80211_hw *hw = rtwdev->hw;
5661 struct rtw89_efuse *efuse = &rtwdev->efuse;
5662 struct rtw89_hal *hal = &rtwdev->hal;
5663 int ret;
5664 int tx_headroom = IEEE80211_HT_CTL_LEN;
5665
5666 if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
5667 tx_headroom += chip->txwd_body_size + chip->txwd_info_size;
5668
5669 hw->vif_data_size = struct_size_t(struct rtw89_vif, links_inst, n);
5670 hw->sta_data_size = struct_size_t(struct rtw89_sta, links_inst, n);
5671 hw->txq_data_size = sizeof(struct rtw89_txq);
5672 hw->chanctx_data_size = sizeof(struct rtw89_chanctx_cfg);
5673
5674 SET_IEEE80211_PERM_ADDR(hw, efuse->addr);
5675
5676 hw->extra_tx_headroom = tx_headroom;
5677 hw->queues = IEEE80211_NUM_ACS;
5678 hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM;
5679 hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM;
5680 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
5681
5682 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
5683 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
5684 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
5685
5686 ieee80211_hw_set(hw, SIGNAL_DBM);
5687 ieee80211_hw_set(hw, HAS_RATE_CONTROL);
5688 ieee80211_hw_set(hw, MFP_CAPABLE);
5689 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
5690 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
5691 ieee80211_hw_set(hw, RX_INCLUDES_FCS);
5692 ieee80211_hw_set(hw, TX_AMSDU);
5693 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
5694 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
5695 ieee80211_hw_set(hw, SUPPORTS_PS);
5696 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
5697 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
5698 ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
5699 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
5700 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
5701
5702 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
5703 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
5704
5705 if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
5706 ieee80211_hw_set(hw, CONNECTION_MONITOR);
5707
5708 if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
5709 ieee80211_hw_set(hw, AP_LINK_PS);
5710
5711 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5712 BIT(NL80211_IFTYPE_AP) |
5713 BIT(NL80211_IFTYPE_P2P_CLIENT) |
5714 BIT(NL80211_IFTYPE_P2P_GO);
5715
5716 if (hal->ant_diversity) {
5717 hw->wiphy->available_antennas_tx = 0x3;
5718 hw->wiphy->available_antennas_rx = 0x3;
5719 } else {
5720 hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
5721 hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
5722 }
5723
5724 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
5725 WIPHY_FLAG_TDLS_EXTERNAL_SETUP |
5726 WIPHY_FLAG_AP_UAPSD |
5727 WIPHY_FLAG_HAS_CHANNEL_SWITCH |
5728 WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
5729
5730 if (!chip->support_rnr)
5731 hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ;
5732
5733 if (chip->chip_gen == RTW89_CHIP_BE)
5734 hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
5735
5736 if (rtwdev->support_mlo) {
5737 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
5738 hw->wiphy->iftype_ext_capab = rtw89_iftypes_ext_capa;
5739 hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(rtw89_iftypes_ext_capa);
5740 }
5741
5742 hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
5743
5744 hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
5745 hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN;
5746
5747 #ifdef CONFIG_PM
5748 hw->wiphy->wowlan = rtwdev->chip->wowlan_stub;
5749 hw->wiphy->max_sched_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
5750 #endif
5751
5752 hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
5753 hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
5754 hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
5755 hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
5756 hw->wiphy->max_remain_on_channel_duration = 1000;
5757
5758 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
5759 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
5760 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
5761
5762 ret = rtw89_core_set_supported_band(rtwdev);
5763 if (ret) {
5764 rtw89_err(rtwdev, "failed to set supported band\n");
5765 return ret;
5766 }
5767
5768 ret = rtw89_regd_setup(rtwdev);
5769 if (ret) {
5770 rtw89_err(rtwdev, "failed to set up regd\n");
5771 return ret;
5772 }
5773
5774 hw->wiphy->sar_capa = &rtw89_sar_capa;
5775
5776 ret = ieee80211_register_hw(hw);
5777 if (ret) {
5778 rtw89_err(rtwdev, "failed to register hw\n");
5779 return ret;
5780 }
5781
5782 ret = rtw89_regd_init_hint(rtwdev);
5783 if (ret) {
5784 rtw89_err(rtwdev, "failed to init regd\n");
5785 goto err_unregister_hw;
5786 }
5787
5788 rtw89_rfkill_polling_init(rtwdev);
5789
5790 return 0;
5791
5792 err_unregister_hw:
5793 ieee80211_unregister_hw(hw);
5794
5795 return ret;
5796 }
5797
rtw89_core_unregister_hw(struct rtw89_dev * rtwdev)5798 static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev)
5799 {
5800 struct ieee80211_hw *hw = rtwdev->hw;
5801
5802 rtw89_rfkill_polling_deinit(rtwdev);
5803 ieee80211_unregister_hw(hw);
5804 }
5805
rtw89_core_register(struct rtw89_dev * rtwdev)5806 int rtw89_core_register(struct rtw89_dev *rtwdev)
5807 {
5808 int ret;
5809
5810 ret = rtw89_core_register_hw(rtwdev);
5811 if (ret) {
5812 rtw89_err(rtwdev, "failed to register core hw\n");
5813 return ret;
5814 }
5815
5816 rtw89_debugfs_init(rtwdev);
5817
5818 return 0;
5819 }
5820 EXPORT_SYMBOL(rtw89_core_register);
5821
rtw89_core_unregister(struct rtw89_dev * rtwdev)5822 void rtw89_core_unregister(struct rtw89_dev *rtwdev)
5823 {
5824 rtw89_core_unregister_hw(rtwdev);
5825
5826 rtw89_debugfs_deinit(rtwdev);
5827 }
5828 EXPORT_SYMBOL(rtw89_core_unregister);
5829
rtw89_alloc_ieee80211_hw(struct device * device,u32 bus_data_size,const struct rtw89_chip_info * chip,const struct rtw89_chip_variant * variant)5830 struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
5831 u32 bus_data_size,
5832 const struct rtw89_chip_info *chip,
5833 const struct rtw89_chip_variant *variant)
5834 {
5835 struct rtw89_fw_info early_fw = {};
5836 const struct firmware *firmware;
5837 struct ieee80211_hw *hw;
5838 struct rtw89_dev *rtwdev;
5839 struct ieee80211_ops *ops;
5840 u32 driver_data_size;
5841 int fw_format = -1;
5842 bool support_mlo;
5843 bool no_chanctx;
5844
5845 firmware = rtw89_early_fw_feature_recognize(device, chip, &early_fw, &fw_format);
5846
5847 ops = kmemdup(&rtw89_ops, sizeof(rtw89_ops), GFP_KERNEL);
5848 if (!ops)
5849 goto err;
5850
5851 no_chanctx = chip->support_chanctx_num == 0 ||
5852 !RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &early_fw) ||
5853 !RTW89_CHK_FW_FEATURE(BEACON_FILTER, &early_fw);
5854
5855 if (no_chanctx) {
5856 ops->add_chanctx = ieee80211_emulate_add_chanctx;
5857 ops->remove_chanctx = ieee80211_emulate_remove_chanctx;
5858 ops->change_chanctx = ieee80211_emulate_change_chanctx;
5859 ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx;
5860 ops->assign_vif_chanctx = NULL;
5861 ops->unassign_vif_chanctx = NULL;
5862 ops->remain_on_channel = NULL;
5863 ops->cancel_remain_on_channel = NULL;
5864 }
5865
5866 driver_data_size = sizeof(struct rtw89_dev) + bus_data_size;
5867 hw = ieee80211_alloc_hw(driver_data_size, ops);
5868 if (!hw)
5869 goto err;
5870
5871 /* Currently, our AP_LINK_PS handling only works for non-MLD softap
5872 * or MLD-single-link softap. If RTW89_MLD_NON_STA_LINK_NUM enlarges,
5873 * please tweak entire AP_LINKS_PS handling before supporting MLO.
5874 */
5875 support_mlo = !no_chanctx && chip->support_link_num &&
5876 RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &early_fw) &&
5877 RTW89_MLD_NON_STA_LINK_NUM == 1;
5878
5879 hw->wiphy->iface_combinations = rtw89_iface_combs;
5880
5881 if (no_chanctx || chip->support_chanctx_num == 1)
5882 hw->wiphy->n_iface_combinations = 1;
5883 else
5884 hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw89_iface_combs);
5885
5886 rtwdev = hw->priv;
5887 rtwdev->hw = hw;
5888 rtwdev->dev = device;
5889 rtwdev->ops = ops;
5890 rtwdev->chip = chip;
5891 rtwdev->variant = variant;
5892 rtwdev->fw.req.firmware = firmware;
5893 rtwdev->fw.fw_format = fw_format;
5894 rtwdev->support_mlo = support_mlo;
5895
5896 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s chanctx\n",
5897 no_chanctx ? "without" : "with");
5898 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s MLO cap\n",
5899 support_mlo ? "with" : "without");
5900
5901 return rtwdev;
5902
5903 err:
5904 kfree(ops);
5905 release_firmware(firmware);
5906 return NULL;
5907 }
5908 EXPORT_SYMBOL(rtw89_alloc_ieee80211_hw);
5909
rtw89_free_ieee80211_hw(struct rtw89_dev * rtwdev)5910 void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev)
5911 {
5912 kfree(rtwdev->ops);
5913 kfree(rtwdev->rfe_data);
5914 release_firmware(rtwdev->fw.req.firmware);
5915 ieee80211_free_hw(rtwdev->hw);
5916 }
5917 EXPORT_SYMBOL(rtw89_free_ieee80211_hw);
5918
5919 MODULE_AUTHOR("Realtek Corporation");
5920 MODULE_DESCRIPTION("Realtek 802.11ax wireless core module");
5921 MODULE_LICENSE("Dual BSD/GPL");
5922