1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 #include <linux/ip.h> 5 #include <linux/sort.h> 6 #include <linux/udp.h> 7 8 #include "cam.h" 9 #include "chan.h" 10 #include "coex.h" 11 #include "core.h" 12 #include "efuse.h" 13 #include "fw.h" 14 #include "mac.h" 15 #include "phy.h" 16 #include "ps.h" 17 #include "reg.h" 18 #include "sar.h" 19 #include "ser.h" 20 #include "txrx.h" 21 #include "util.h" 22 #include "wow.h" 23 24 static bool rtw89_disable_ps_mode; 25 module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644); 26 MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode"); 27 28 #define RTW89_DEF_CHAN(_freq, _hw_val, _flags, _band) \ 29 { .center_freq = _freq, .hw_value = _hw_val, .flags = _flags, .band = _band, } 30 #define RTW89_DEF_CHAN_2G(_freq, _hw_val) \ 31 RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_2GHZ) 32 #define RTW89_DEF_CHAN_5G(_freq, _hw_val) \ 33 RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_5GHZ) 34 #define RTW89_DEF_CHAN_5G_NO_HT40MINUS(_freq, _hw_val) \ 35 RTW89_DEF_CHAN(_freq, _hw_val, IEEE80211_CHAN_NO_HT40MINUS, NL80211_BAND_5GHZ) 36 #define RTW89_DEF_CHAN_6G(_freq, _hw_val) \ 37 RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_6GHZ) 38 39 static struct ieee80211_channel rtw89_channels_2ghz[] = { 40 RTW89_DEF_CHAN_2G(2412, 1), 41 RTW89_DEF_CHAN_2G(2417, 2), 42 RTW89_DEF_CHAN_2G(2422, 3), 43 RTW89_DEF_CHAN_2G(2427, 4), 44 RTW89_DEF_CHAN_2G(2432, 5), 45 RTW89_DEF_CHAN_2G(2437, 6), 46 RTW89_DEF_CHAN_2G(2442, 7), 47 RTW89_DEF_CHAN_2G(2447, 8), 48 RTW89_DEF_CHAN_2G(2452, 9), 49 RTW89_DEF_CHAN_2G(2457, 10), 50 RTW89_DEF_CHAN_2G(2462, 11), 51 RTW89_DEF_CHAN_2G(2467, 12), 52 RTW89_DEF_CHAN_2G(2472, 13), 53 RTW89_DEF_CHAN_2G(2484, 14), 54 }; 55 56 static struct ieee80211_channel rtw89_channels_5ghz[] = { 57 RTW89_DEF_CHAN_5G(5180, 36), 58 RTW89_DEF_CHAN_5G(5200, 40), 59 RTW89_DEF_CHAN_5G(5220, 44), 60 RTW89_DEF_CHAN_5G(5240, 48), 61 RTW89_DEF_CHAN_5G(5260, 52), 62 RTW89_DEF_CHAN_5G(5280, 56), 63 RTW89_DEF_CHAN_5G(5300, 60), 64 RTW89_DEF_CHAN_5G(5320, 64), 65 RTW89_DEF_CHAN_5G(5500, 100), 66 RTW89_DEF_CHAN_5G(5520, 104), 67 RTW89_DEF_CHAN_5G(5540, 108), 68 RTW89_DEF_CHAN_5G(5560, 112), 69 RTW89_DEF_CHAN_5G(5580, 116), 70 RTW89_DEF_CHAN_5G(5600, 120), 71 RTW89_DEF_CHAN_5G(5620, 124), 72 RTW89_DEF_CHAN_5G(5640, 128), 73 RTW89_DEF_CHAN_5G(5660, 132), 74 RTW89_DEF_CHAN_5G(5680, 136), 75 RTW89_DEF_CHAN_5G(5700, 140), 76 RTW89_DEF_CHAN_5G(5720, 144), 77 RTW89_DEF_CHAN_5G(5745, 149), 78 RTW89_DEF_CHAN_5G(5765, 153), 79 RTW89_DEF_CHAN_5G(5785, 157), 80 RTW89_DEF_CHAN_5G(5805, 161), 81 RTW89_DEF_CHAN_5G_NO_HT40MINUS(5825, 165), 82 RTW89_DEF_CHAN_5G(5845, 169), 83 RTW89_DEF_CHAN_5G(5865, 173), 84 RTW89_DEF_CHAN_5G(5885, 177), 85 }; 86 87 static_assert(RTW89_5GHZ_UNII4_START_INDEX + RTW89_5GHZ_UNII4_CHANNEL_NUM == 88 ARRAY_SIZE(rtw89_channels_5ghz)); 89 90 static struct ieee80211_channel rtw89_channels_6ghz[] = { 91 RTW89_DEF_CHAN_6G(5955, 1), 92 RTW89_DEF_CHAN_6G(5975, 5), 93 RTW89_DEF_CHAN_6G(5995, 9), 94 RTW89_DEF_CHAN_6G(6015, 13), 95 RTW89_DEF_CHAN_6G(6035, 17), 96 RTW89_DEF_CHAN_6G(6055, 21), 97 RTW89_DEF_CHAN_6G(6075, 25), 98 RTW89_DEF_CHAN_6G(6095, 29), 99 RTW89_DEF_CHAN_6G(6115, 33), 100 RTW89_DEF_CHAN_6G(6135, 37), 101 RTW89_DEF_CHAN_6G(6155, 41), 102 RTW89_DEF_CHAN_6G(6175, 45), 103 RTW89_DEF_CHAN_6G(6195, 49), 104 RTW89_DEF_CHAN_6G(6215, 53), 105 RTW89_DEF_CHAN_6G(6235, 57), 106 RTW89_DEF_CHAN_6G(6255, 61), 107 RTW89_DEF_CHAN_6G(6275, 65), 108 RTW89_DEF_CHAN_6G(6295, 69), 109 RTW89_DEF_CHAN_6G(6315, 73), 110 RTW89_DEF_CHAN_6G(6335, 77), 111 RTW89_DEF_CHAN_6G(6355, 81), 112 RTW89_DEF_CHAN_6G(6375, 85), 113 RTW89_DEF_CHAN_6G(6395, 89), 114 RTW89_DEF_CHAN_6G(6415, 93), 115 RTW89_DEF_CHAN_6G(6435, 97), 116 RTW89_DEF_CHAN_6G(6455, 101), 117 RTW89_DEF_CHAN_6G(6475, 105), 118 RTW89_DEF_CHAN_6G(6495, 109), 119 RTW89_DEF_CHAN_6G(6515, 113), 120 RTW89_DEF_CHAN_6G(6535, 117), 121 RTW89_DEF_CHAN_6G(6555, 121), 122 RTW89_DEF_CHAN_6G(6575, 125), 123 RTW89_DEF_CHAN_6G(6595, 129), 124 RTW89_DEF_CHAN_6G(6615, 133), 125 RTW89_DEF_CHAN_6G(6635, 137), 126 RTW89_DEF_CHAN_6G(6655, 141), 127 RTW89_DEF_CHAN_6G(6675, 145), 128 RTW89_DEF_CHAN_6G(6695, 149), 129 RTW89_DEF_CHAN_6G(6715, 153), 130 RTW89_DEF_CHAN_6G(6735, 157), 131 RTW89_DEF_CHAN_6G(6755, 161), 132 RTW89_DEF_CHAN_6G(6775, 165), 133 RTW89_DEF_CHAN_6G(6795, 169), 134 RTW89_DEF_CHAN_6G(6815, 173), 135 RTW89_DEF_CHAN_6G(6835, 177), 136 RTW89_DEF_CHAN_6G(6855, 181), 137 RTW89_DEF_CHAN_6G(6875, 185), 138 RTW89_DEF_CHAN_6G(6895, 189), 139 RTW89_DEF_CHAN_6G(6915, 193), 140 RTW89_DEF_CHAN_6G(6935, 197), 141 RTW89_DEF_CHAN_6G(6955, 201), 142 RTW89_DEF_CHAN_6G(6975, 205), 143 RTW89_DEF_CHAN_6G(6995, 209), 144 RTW89_DEF_CHAN_6G(7015, 213), 145 RTW89_DEF_CHAN_6G(7035, 217), 146 RTW89_DEF_CHAN_6G(7055, 221), 147 RTW89_DEF_CHAN_6G(7075, 225), 148 RTW89_DEF_CHAN_6G(7095, 229), 149 RTW89_DEF_CHAN_6G(7115, 233), 150 }; 151 152 static struct ieee80211_rate rtw89_bitrates[] = { 153 { .bitrate = 10, .hw_value = 0x00, }, 154 { .bitrate = 20, .hw_value = 0x01, }, 155 { .bitrate = 55, .hw_value = 0x02, }, 156 { .bitrate = 110, .hw_value = 0x03, }, 157 { .bitrate = 60, .hw_value = 0x04, }, 158 { .bitrate = 90, .hw_value = 0x05, }, 159 { .bitrate = 120, .hw_value = 0x06, }, 160 { .bitrate = 180, .hw_value = 0x07, }, 161 { .bitrate = 240, .hw_value = 0x08, }, 162 { .bitrate = 360, .hw_value = 0x09, }, 163 { .bitrate = 480, .hw_value = 0x0a, }, 164 { .bitrate = 540, .hw_value = 0x0b, }, 165 }; 166 167 static const struct ieee80211_iface_limit rtw89_iface_limits[] = { 168 { 169 .max = 1, 170 .types = BIT(NL80211_IFTYPE_STATION), 171 }, 172 { 173 .max = 1, 174 .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | 175 BIT(NL80211_IFTYPE_P2P_GO) | 176 BIT(NL80211_IFTYPE_AP), 177 }, 178 }; 179 180 static const struct ieee80211_iface_limit rtw89_iface_limits_mcc[] = { 181 { 182 .max = 1, 183 .types = BIT(NL80211_IFTYPE_STATION), 184 }, 185 { 186 .max = 1, 187 .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | 188 BIT(NL80211_IFTYPE_P2P_GO), 189 }, 190 }; 191 192 static const struct ieee80211_iface_combination rtw89_iface_combs[] = { 193 { 194 .limits = rtw89_iface_limits, 195 .n_limits = ARRAY_SIZE(rtw89_iface_limits), 196 .max_interfaces = RTW89_MAX_INTERFACE_NUM, 197 .num_different_channels = 1, 198 }, 199 { 200 .limits = rtw89_iface_limits_mcc, 201 .n_limits = ARRAY_SIZE(rtw89_iface_limits_mcc), 202 .max_interfaces = RTW89_MAX_INTERFACE_NUM, 203 .num_different_channels = 2, 204 }, 205 }; 206 207 static const u8 rtw89_ext_capa_sta[] = { 208 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 209 [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, 210 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 211 }; 212 213 static const struct wiphy_iftype_ext_capab rtw89_iftypes_ext_capa[] = { 214 { 215 .iftype = NL80211_IFTYPE_STATION, 216 .extended_capabilities = rtw89_ext_capa_sta, 217 .extended_capabilities_mask = rtw89_ext_capa_sta, 218 .extended_capabilities_len = sizeof(rtw89_ext_capa_sta), 219 /* relevant only if EHT is supported */ 220 .eml_capabilities = 0, 221 .mld_capa_and_ops = 0, 222 }, 223 }; 224 225 #define RTW89_6GHZ_SPAN_HEAD 6145 226 #define RTW89_6GHZ_SPAN_IDX(center_freq) \ 227 ((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2) 228 229 #define RTW89_DECL_6GHZ_SPAN(center_freq, subband_l, subband_h) \ 230 [RTW89_6GHZ_SPAN_IDX(center_freq)] = { \ 231 .sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \ 232 .sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \ 233 .acpi_sar_subband_low = RTW89_ACPI_SAR_6GHZ_ ## subband_l, \ 234 .acpi_sar_subband_high = RTW89_ACPI_SAR_6GHZ_ ## subband_h, \ 235 .ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \ 236 .ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \ 237 } 238 239 /* Since 6GHz subbands are not edge aligned, some cases span two subbands. 240 * In the following, we describe each of them with rtw89_6ghz_span. 241 */ 242 static const struct rtw89_6ghz_span rtw89_overlapping_6ghz[] = { 243 RTW89_DECL_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H), 244 RTW89_DECL_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H), 245 RTW89_DECL_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H), 246 RTW89_DECL_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L), 247 RTW89_DECL_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L), 248 RTW89_DECL_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L), 249 RTW89_DECL_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H), 250 RTW89_DECL_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H), 251 RTW89_DECL_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8), 252 RTW89_DECL_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8), 253 RTW89_DECL_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8), 254 RTW89_DECL_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8), 255 }; 256 257 const struct rtw89_6ghz_span * 258 rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq) 259 { 260 int idx; 261 262 if (center_freq >= RTW89_6GHZ_SPAN_HEAD) { 263 idx = RTW89_6GHZ_SPAN_IDX(center_freq); 264 /* To decrease size of rtw89_overlapping_6ghz[], 265 * RTW89_6GHZ_SPAN_IDX() truncates the leading NULLs 266 * to make first span as index 0 of the table. So, if center 267 * frequency is less than the first one, it will get netative. 268 */ 269 if (idx >= 0 && idx < ARRAY_SIZE(rtw89_overlapping_6ghz)) 270 return &rtw89_overlapping_6ghz[idx]; 271 } 272 273 return NULL; 274 } 275 276 bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate) 277 { 278 const struct ieee80211_rate *rate; 279 280 if (unlikely(legacy_rate >= ARRAY_SIZE(rtw89_bitrates))) { 281 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 282 "invalid legacy rate %d\n", legacy_rate); 283 return false; 284 } 285 286 rate = &rtw89_bitrates[legacy_rate]; 287 *bitrate = rate->bitrate; 288 289 return true; 290 } 291 292 static const struct ieee80211_supported_band rtw89_sband_2ghz = { 293 .band = NL80211_BAND_2GHZ, 294 .channels = rtw89_channels_2ghz, 295 .n_channels = ARRAY_SIZE(rtw89_channels_2ghz), 296 .bitrates = rtw89_bitrates, 297 .n_bitrates = ARRAY_SIZE(rtw89_bitrates), 298 .ht_cap = {0}, 299 .vht_cap = {0}, 300 }; 301 302 static const struct ieee80211_supported_band rtw89_sband_5ghz = { 303 .band = NL80211_BAND_5GHZ, 304 .channels = rtw89_channels_5ghz, 305 .n_channels = ARRAY_SIZE(rtw89_channels_5ghz), 306 307 /* 5G has no CCK rates, 1M/2M/5.5M/11M */ 308 .bitrates = rtw89_bitrates + 4, 309 .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4, 310 .ht_cap = {0}, 311 .vht_cap = {0}, 312 }; 313 314 static const struct ieee80211_supported_band rtw89_sband_6ghz = { 315 .band = NL80211_BAND_6GHZ, 316 .channels = rtw89_channels_6ghz, 317 .n_channels = ARRAY_SIZE(rtw89_channels_6ghz), 318 319 /* 6G has no CCK rates, 1M/2M/5.5M/11M */ 320 .bitrates = rtw89_bitrates + 4, 321 .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4, 322 }; 323 324 static void __rtw89_traffic_stats_accu(struct rtw89_traffic_stats *stats, 325 struct sk_buff *skb, bool tx) 326 { 327 if (tx) { 328 stats->tx_cnt++; 329 stats->tx_unicast += skb->len; 330 } else { 331 stats->rx_cnt++; 332 stats->rx_unicast += skb->len; 333 } 334 } 335 336 static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev, 337 struct rtw89_vif *rtwvif, 338 struct sk_buff *skb, 339 bool accu_dev, bool tx) 340 { 341 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 342 343 if (!ieee80211_is_data(hdr->frame_control)) 344 return; 345 346 if (is_broadcast_ether_addr(hdr->addr1) || 347 is_multicast_ether_addr(hdr->addr1)) 348 return; 349 350 if (accu_dev) 351 __rtw89_traffic_stats_accu(&rtwdev->stats, skb, tx); 352 353 if (rtwvif) { 354 __rtw89_traffic_stats_accu(&rtwvif->stats, skb, tx); 355 __rtw89_traffic_stats_accu(&rtwvif->stats_ps, skb, tx); 356 } 357 } 358 359 void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef) 360 { 361 cfg80211_chandef_create(chandef, &rtw89_channels_2ghz[0], 362 NL80211_CHAN_NO_HT); 363 } 364 365 void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, 366 struct rtw89_chan *chan) 367 { 368 struct ieee80211_channel *channel = chandef->chan; 369 enum nl80211_chan_width width = chandef->width; 370 u32 primary_freq, center_freq; 371 u8 center_chan; 372 u8 bandwidth = RTW89_CHANNEL_WIDTH_20; 373 u32 offset; 374 u8 band; 375 376 center_chan = channel->hw_value; 377 primary_freq = channel->center_freq; 378 center_freq = chandef->center_freq1; 379 380 switch (width) { 381 case NL80211_CHAN_WIDTH_20_NOHT: 382 case NL80211_CHAN_WIDTH_20: 383 bandwidth = RTW89_CHANNEL_WIDTH_20; 384 break; 385 case NL80211_CHAN_WIDTH_40: 386 bandwidth = RTW89_CHANNEL_WIDTH_40; 387 if (primary_freq > center_freq) { 388 center_chan -= 2; 389 } else { 390 center_chan += 2; 391 } 392 break; 393 case NL80211_CHAN_WIDTH_80: 394 case NL80211_CHAN_WIDTH_160: 395 bandwidth = nl_to_rtw89_bandwidth(width); 396 if (primary_freq > center_freq) { 397 offset = (primary_freq - center_freq - 10) / 20; 398 center_chan -= 2 + offset * 4; 399 } else { 400 offset = (center_freq - primary_freq - 10) / 20; 401 center_chan += 2 + offset * 4; 402 } 403 break; 404 default: 405 center_chan = 0; 406 break; 407 } 408 409 switch (channel->band) { 410 default: 411 case NL80211_BAND_2GHZ: 412 band = RTW89_BAND_2G; 413 break; 414 case NL80211_BAND_5GHZ: 415 band = RTW89_BAND_5G; 416 break; 417 case NL80211_BAND_6GHZ: 418 band = RTW89_BAND_6G; 419 break; 420 } 421 422 rtw89_chan_create(chan, center_chan, channel->hw_value, band, bandwidth); 423 } 424 425 static void __rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev, 426 const struct rtw89_chan *chan, 427 enum rtw89_phy_idx phy_idx) 428 { 429 const struct rtw89_chip_info *chip = rtwdev->chip; 430 bool entity_active; 431 432 entity_active = rtw89_get_entity_state(rtwdev, phy_idx); 433 if (!entity_active) 434 return; 435 436 chip->ops->set_txpwr(rtwdev, chan, phy_idx); 437 } 438 439 void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) 440 { 441 const struct rtw89_chan *chan; 442 443 chan = rtw89_mgnt_chan_get(rtwdev, 0); 444 __rtw89_core_set_chip_txpwr(rtwdev, chan, RTW89_PHY_0); 445 446 if (!rtwdev->support_mlo) 447 return; 448 449 chan = rtw89_mgnt_chan_get(rtwdev, 1); 450 __rtw89_core_set_chip_txpwr(rtwdev, chan, RTW89_PHY_1); 451 } 452 453 static void __rtw89_set_channel(struct rtw89_dev *rtwdev, 454 const struct rtw89_chan *chan, 455 enum rtw89_mac_idx mac_idx, 456 enum rtw89_phy_idx phy_idx) 457 { 458 const struct rtw89_chip_info *chip = rtwdev->chip; 459 const struct rtw89_chan_rcd *chan_rcd; 460 struct rtw89_channel_help_params bak; 461 bool entity_active; 462 463 entity_active = rtw89_get_entity_state(rtwdev, phy_idx); 464 465 chan_rcd = rtw89_chan_rcd_get_by_chan(chan); 466 467 rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx); 468 469 chip->ops->set_channel(rtwdev, chan, mac_idx, phy_idx); 470 471 chip->ops->set_txpwr(rtwdev, chan, phy_idx); 472 473 rtw89_chip_set_channel_done(rtwdev, &bak, chan, mac_idx, phy_idx); 474 475 if (!entity_active || chan_rcd->band_changed) { 476 rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type); 477 rtw89_chip_rfk_band_changed(rtwdev, phy_idx, chan); 478 } 479 480 rtw89_set_entity_state(rtwdev, phy_idx, true); 481 } 482 483 int rtw89_set_channel(struct rtw89_dev *rtwdev) 484 { 485 const struct rtw89_chan *chan; 486 enum rtw89_entity_mode mode; 487 488 mode = rtw89_entity_recalc(rtwdev); 489 if (mode < 0 || mode >= NUM_OF_RTW89_ENTITY_MODE) { 490 WARN(1, "Invalid ent mode: %d\n", mode); 491 return -EINVAL; 492 } 493 494 chan = rtw89_mgnt_chan_get(rtwdev, 0); 495 __rtw89_set_channel(rtwdev, chan, RTW89_MAC_0, RTW89_PHY_0); 496 497 if (!rtwdev->support_mlo) 498 return 0; 499 500 chan = rtw89_mgnt_chan_get(rtwdev, 1); 501 __rtw89_set_channel(rtwdev, chan, RTW89_MAC_1, RTW89_PHY_1); 502 503 return 0; 504 } 505 506 static enum rtw89_core_tx_type 507 rtw89_core_get_tx_type(struct rtw89_dev *rtwdev, 508 struct sk_buff *skb) 509 { 510 struct ieee80211_hdr *hdr = (void *)skb->data; 511 __le16 fc = hdr->frame_control; 512 513 if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc)) 514 return RTW89_CORE_TX_TYPE_MGMT; 515 516 return RTW89_CORE_TX_TYPE_DATA; 517 } 518 519 static void 520 rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev, 521 struct rtw89_core_tx_request *tx_req, 522 enum btc_pkt_type pkt_type) 523 { 524 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link; 525 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 526 struct ieee80211_link_sta *link_sta; 527 struct sk_buff *skb = tx_req->skb; 528 struct rtw89_sta *rtwsta; 529 u8 ampdu_num; 530 u8 tid; 531 532 if (pkt_type == PACKET_EAPOL) { 533 desc_info->bk = true; 534 return; 535 } 536 537 if (!(IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU)) 538 return; 539 540 if (!rtwsta_link) { 541 rtw89_warn(rtwdev, "cannot set ampdu info without sta\n"); 542 return; 543 } 544 545 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 546 rtwsta = rtwsta_link->rtwsta; 547 548 rcu_read_lock(); 549 550 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 551 ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ? 552 rtwsta->ampdu_params[tid].agg_num : 553 4 << link_sta->ht_cap.ampdu_factor) - 1); 554 555 desc_info->agg_en = true; 556 desc_info->ampdu_density = link_sta->ht_cap.ampdu_density; 557 desc_info->ampdu_num = ampdu_num; 558 559 rcu_read_unlock(); 560 } 561 562 static void 563 rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, 564 struct rtw89_core_tx_request *tx_req) 565 { 566 struct rtw89_cam_info *cam_info = &rtwdev->cam_info; 567 const struct rtw89_chip_info *chip = rtwdev->chip; 568 const struct rtw89_sec_cam_entry *sec_cam; 569 struct ieee80211_tx_info *info; 570 struct ieee80211_key_conf *key; 571 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 572 struct sk_buff *skb = tx_req->skb; 573 u8 sec_type = RTW89_SEC_KEY_TYPE_NONE; 574 u8 sec_cam_idx; 575 u64 pn64; 576 577 info = IEEE80211_SKB_CB(skb); 578 key = info->control.hw_key; 579 sec_cam_idx = key->hw_key_idx; 580 sec_cam = cam_info->sec_entries[sec_cam_idx]; 581 if (!sec_cam) { 582 rtw89_warn(rtwdev, "sec cam entry is empty\n"); 583 return; 584 } 585 586 switch (key->cipher) { 587 case WLAN_CIPHER_SUITE_WEP40: 588 sec_type = RTW89_SEC_KEY_TYPE_WEP40; 589 break; 590 case WLAN_CIPHER_SUITE_WEP104: 591 sec_type = RTW89_SEC_KEY_TYPE_WEP104; 592 break; 593 case WLAN_CIPHER_SUITE_TKIP: 594 sec_type = RTW89_SEC_KEY_TYPE_TKIP; 595 break; 596 case WLAN_CIPHER_SUITE_CCMP: 597 sec_type = RTW89_SEC_KEY_TYPE_CCMP128; 598 break; 599 case WLAN_CIPHER_SUITE_CCMP_256: 600 sec_type = RTW89_SEC_KEY_TYPE_CCMP256; 601 break; 602 case WLAN_CIPHER_SUITE_GCMP: 603 sec_type = RTW89_SEC_KEY_TYPE_GCMP128; 604 break; 605 case WLAN_CIPHER_SUITE_GCMP_256: 606 sec_type = RTW89_SEC_KEY_TYPE_GCMP256; 607 break; 608 default: 609 rtw89_warn(rtwdev, "key cipher not supported %d\n", key->cipher); 610 return; 611 } 612 613 desc_info->sec_en = true; 614 desc_info->sec_keyid = key->keyidx; 615 desc_info->sec_type = sec_type; 616 desc_info->sec_cam_idx = sec_cam->sec_cam_idx; 617 618 if (!chip->hw_sec_hdr) 619 return; 620 621 pn64 = atomic64_inc_return(&key->tx_pn); 622 desc_info->sec_seq[0] = pn64; 623 desc_info->sec_seq[1] = pn64 >> 8; 624 desc_info->sec_seq[2] = pn64 >> 16; 625 desc_info->sec_seq[3] = pn64 >> 24; 626 desc_info->sec_seq[4] = pn64 >> 32; 627 desc_info->sec_seq[5] = pn64 >> 40; 628 desc_info->wp_offset = 1; /* in unit of 8 bytes for security header */ 629 } 630 631 static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev, 632 struct rtw89_core_tx_request *tx_req, 633 const struct rtw89_chan *chan) 634 { 635 struct sk_buff *skb = tx_req->skb; 636 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; 637 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link; 638 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 639 struct ieee80211_vif *vif = tx_info->control.vif; 640 struct ieee80211_bss_conf *bss_conf; 641 u16 lowest_rate; 642 u16 rate; 643 644 if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE || 645 (vif && vif->p2p)) 646 lowest_rate = RTW89_HW_RATE_OFDM6; 647 else if (chan->band_type == RTW89_BAND_2G) 648 lowest_rate = RTW89_HW_RATE_CCK1; 649 else 650 lowest_rate = RTW89_HW_RATE_OFDM6; 651 652 if (!rtwvif_link) 653 return lowest_rate; 654 655 rcu_read_lock(); 656 657 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 658 if (!bss_conf->basic_rates || !rtwsta_link) { 659 rate = lowest_rate; 660 goto out; 661 } 662 663 rate = __ffs(bss_conf->basic_rates) + lowest_rate; 664 665 out: 666 rcu_read_unlock(); 667 668 return rate; 669 } 670 671 static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev, 672 struct rtw89_core_tx_request *tx_req) 673 { 674 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 675 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; 676 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link; 677 678 if (desc_info->mlo && !desc_info->sw_mld) { 679 if (rtwsta_link) 680 return rtw89_sta_get_main_macid(rtwsta_link->rtwsta); 681 else 682 return rtw89_vif_get_main_macid(rtwvif_link->rtwvif); 683 } 684 685 if (!rtwsta_link) 686 return rtwvif_link->mac_id; 687 688 return rtwsta_link->mac_id; 689 } 690 691 static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev, 692 struct rtw89_tx_desc_info *desc_info, 693 struct sk_buff *skb) 694 { 695 struct ieee80211_hdr *hdr = (void *)skb->data; 696 __le16 fc = hdr->frame_control; 697 698 desc_info->hdr_llc_len = ieee80211_hdrlen(fc); 699 desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */ 700 } 701 702 u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel) 703 { 704 switch (qsel) { 705 default: 706 rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel); 707 fallthrough; 708 case RTW89_TX_QSEL_BE_0: 709 case RTW89_TX_QSEL_BE_1: 710 case RTW89_TX_QSEL_BE_2: 711 case RTW89_TX_QSEL_BE_3: 712 return RTW89_TXCH_ACH0; 713 case RTW89_TX_QSEL_BK_0: 714 case RTW89_TX_QSEL_BK_1: 715 case RTW89_TX_QSEL_BK_2: 716 case RTW89_TX_QSEL_BK_3: 717 return RTW89_TXCH_ACH1; 718 case RTW89_TX_QSEL_VI_0: 719 case RTW89_TX_QSEL_VI_1: 720 case RTW89_TX_QSEL_VI_2: 721 case RTW89_TX_QSEL_VI_3: 722 return RTW89_TXCH_ACH2; 723 case RTW89_TX_QSEL_VO_0: 724 case RTW89_TX_QSEL_VO_1: 725 case RTW89_TX_QSEL_VO_2: 726 case RTW89_TX_QSEL_VO_3: 727 return RTW89_TXCH_ACH3; 728 case RTW89_TX_QSEL_B0_MGMT: 729 return RTW89_TXCH_CH8; 730 case RTW89_TX_QSEL_B0_HI: 731 return RTW89_TXCH_CH9; 732 case RTW89_TX_QSEL_B1_MGMT: 733 return RTW89_TXCH_CH10; 734 case RTW89_TX_QSEL_B1_HI: 735 return RTW89_TXCH_CH11; 736 } 737 } 738 EXPORT_SYMBOL(rtw89_core_get_ch_dma); 739 740 u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel) 741 { 742 switch (qsel) { 743 default: 744 rtw89_warn(rtwdev, "Cannot map qsel to dma v1: %d\n", qsel); 745 fallthrough; 746 case RTW89_TX_QSEL_BE_0: 747 case RTW89_TX_QSEL_BK_0: 748 return RTW89_TXCH_ACH0; 749 case RTW89_TX_QSEL_VI_0: 750 case RTW89_TX_QSEL_VO_0: 751 return RTW89_TXCH_ACH2; 752 case RTW89_TX_QSEL_B0_MGMT: 753 case RTW89_TX_QSEL_B0_HI: 754 return RTW89_TXCH_CH8; 755 case RTW89_TX_QSEL_B1_MGMT: 756 case RTW89_TX_QSEL_B1_HI: 757 return RTW89_TXCH_CH10; 758 } 759 } 760 EXPORT_SYMBOL(rtw89_core_get_ch_dma_v1); 761 762 static void 763 rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, 764 struct rtw89_core_tx_request *tx_req) 765 { 766 const struct rtw89_chip_info *chip = rtwdev->chip; 767 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; 768 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 769 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 770 rtwvif_link->chanctx_idx); 771 struct sk_buff *skb = tx_req->skb; 772 u8 qsel, ch_dma; 773 774 qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req); 775 ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); 776 777 desc_info->qsel = qsel; 778 desc_info->ch_dma = ch_dma; 779 desc_info->port = desc_info->hiq ? rtwvif_link->port : 0; 780 desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req); 781 desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL; 782 desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE; 783 784 /* fixed data rate for mgmt frames */ 785 desc_info->en_wd_info = true; 786 desc_info->use_rate = true; 787 desc_info->dis_data_fb = true; 788 desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req, chan); 789 790 if (chip->hw_mgmt_tx_encrypt && IEEE80211_SKB_CB(skb)->control.hw_key) { 791 rtw89_core_tx_update_sec_key(rtwdev, tx_req); 792 rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb); 793 } 794 795 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 796 "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n", 797 desc_info->data_rate, chan->channel, chan->band_type, 798 chan->band_width); 799 } 800 801 static void 802 rtw89_core_tx_update_h2c_info(struct rtw89_dev *rtwdev, 803 struct rtw89_core_tx_request *tx_req) 804 { 805 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 806 807 desc_info->is_bmc = false; 808 desc_info->wd_page = false; 809 desc_info->ch_dma = RTW89_DMA_H2C; 810 } 811 812 static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc, 813 const struct rtw89_chan *chan) 814 { 815 static const u8 rtw89_bandwidth_to_om[] = { 816 [RTW89_CHANNEL_WIDTH_20] = HTC_OM_CHANNEL_WIDTH_20, 817 [RTW89_CHANNEL_WIDTH_40] = HTC_OM_CHANNEL_WIDTH_40, 818 [RTW89_CHANNEL_WIDTH_80] = HTC_OM_CHANNEL_WIDTH_80, 819 [RTW89_CHANNEL_WIDTH_160] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80, 820 [RTW89_CHANNEL_WIDTH_80_80] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80, 821 }; 822 const struct rtw89_chip_info *chip = rtwdev->chip; 823 struct rtw89_hal *hal = &rtwdev->hal; 824 u8 om_bandwidth; 825 826 if (!chip->dis_2g_40m_ul_ofdma || 827 chan->band_type != RTW89_BAND_2G || 828 chan->band_width != RTW89_CHANNEL_WIDTH_40) 829 return; 830 831 om_bandwidth = chan->band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ? 832 rtw89_bandwidth_to_om[chan->band_width] : 0; 833 *htc = le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) | 834 le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_OM, RTW89_HTC_MASK_CTL_ID) | 835 le32_encode_bits(hal->rx_nss - 1, RTW89_HTC_MASK_HTC_OM_RX_NSS) | 836 le32_encode_bits(om_bandwidth, RTW89_HTC_MASK_HTC_OM_CH_WIDTH) | 837 le32_encode_bits(1, RTW89_HTC_MASK_HTC_OM_UL_MU_DIS) | 838 le32_encode_bits(hal->tx_nss - 1, RTW89_HTC_MASK_HTC_OM_TX_NSTS) | 839 le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_ER_SU_DIS) | 840 le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR) | 841 le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS); 842 } 843 844 static bool 845 __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev, 846 struct rtw89_core_tx_request *tx_req, 847 enum btc_pkt_type pkt_type) 848 { 849 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link; 850 struct sk_buff *skb = tx_req->skb; 851 struct ieee80211_hdr *hdr = (void *)skb->data; 852 struct ieee80211_link_sta *link_sta; 853 __le16 fc = hdr->frame_control; 854 855 /* AP IOT issue with EAPoL, ARP and DHCP */ 856 if (pkt_type < PACKET_MAX) 857 return false; 858 859 if (!rtwsta_link) 860 return false; 861 862 rcu_read_lock(); 863 864 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 865 if (!link_sta->he_cap.has_he) { 866 rcu_read_unlock(); 867 return false; 868 } 869 870 rcu_read_unlock(); 871 872 if (!ieee80211_is_data_qos(fc)) 873 return false; 874 875 if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN) 876 return false; 877 878 if (rtwsta_link && rtwsta_link->ra_report.might_fallback_legacy) 879 return false; 880 881 return true; 882 } 883 884 static void 885 __rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev *rtwdev, 886 struct rtw89_core_tx_request *tx_req) 887 { 888 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link; 889 struct sk_buff *skb = tx_req->skb; 890 struct ieee80211_hdr *hdr = (void *)skb->data; 891 __le16 fc = hdr->frame_control; 892 void *data; 893 __le32 *htc; 894 u8 *qc; 895 int hdr_len; 896 897 hdr_len = ieee80211_has_a4(fc) ? 32 : 26; 898 data = skb_push(skb, IEEE80211_HT_CTL_LEN); 899 memmove(data, data + IEEE80211_HT_CTL_LEN, hdr_len); 900 901 hdr = data; 902 htc = data + hdr_len; 903 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_ORDER); 904 *htc = rtwsta_link->htc_template ? rtwsta_link->htc_template : 905 le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) | 906 le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_CAS, RTW89_HTC_MASK_CTL_ID); 907 908 qc = data + hdr_len - IEEE80211_QOS_CTL_LEN; 909 qc[0] |= IEEE80211_QOS_CTL_EOSP; 910 } 911 912 static void 913 rtw89_core_tx_update_he_qos_htc(struct rtw89_dev *rtwdev, 914 struct rtw89_core_tx_request *tx_req, 915 enum btc_pkt_type pkt_type) 916 { 917 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 918 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; 919 920 if (!__rtw89_core_tx_check_he_qos_htc(rtwdev, tx_req, pkt_type)) 921 goto desc_bk; 922 923 __rtw89_core_tx_adjust_he_qos_htc(rtwdev, tx_req); 924 925 desc_info->pkt_size += IEEE80211_HT_CTL_LEN; 926 desc_info->a_ctrl_bsr = true; 927 928 desc_bk: 929 if (!rtwvif_link || rtwvif_link->last_a_ctrl == desc_info->a_ctrl_bsr) 930 return; 931 932 rtwvif_link->last_a_ctrl = desc_info->a_ctrl_bsr; 933 desc_info->bk = true; 934 } 935 936 static u16 rtw89_core_get_data_rate(struct rtw89_dev *rtwdev, 937 struct rtw89_core_tx_request *tx_req) 938 { 939 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; 940 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link; 941 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 942 struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern; 943 enum rtw89_chanctx_idx idx = rtwvif_link->chanctx_idx; 944 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx); 945 struct ieee80211_link_sta *link_sta; 946 u16 lowest_rate; 947 u16 rate; 948 949 if (rate_pattern->enable) 950 return rate_pattern->rate; 951 952 if (vif->p2p) 953 lowest_rate = RTW89_HW_RATE_OFDM6; 954 else if (chan->band_type == RTW89_BAND_2G) 955 lowest_rate = RTW89_HW_RATE_CCK1; 956 else 957 lowest_rate = RTW89_HW_RATE_OFDM6; 958 959 if (!rtwsta_link) 960 return lowest_rate; 961 962 rcu_read_lock(); 963 964 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 965 if (!link_sta->supp_rates[chan->band_type]) { 966 rate = lowest_rate; 967 goto out; 968 } 969 970 rate = __ffs(link_sta->supp_rates[chan->band_type]) + lowest_rate; 971 972 out: 973 rcu_read_unlock(); 974 975 return rate; 976 } 977 978 static void 979 rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev, 980 struct rtw89_core_tx_request *tx_req) 981 { 982 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; 983 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link; 984 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 985 struct sk_buff *skb = tx_req->skb; 986 u8 tid, tid_indicate; 987 u8 qsel, ch_dma; 988 989 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 990 tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid); 991 qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid); 992 ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); 993 994 desc_info->ch_dma = ch_dma; 995 desc_info->tid_indicate = tid_indicate; 996 desc_info->qsel = qsel; 997 desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req); 998 desc_info->port = desc_info->hiq ? rtwvif_link->port : 0; 999 desc_info->er_cap = rtwsta_link ? rtwsta_link->er_cap : false; 1000 desc_info->stbc = rtwsta_link ? rtwsta_link->ra.stbc_cap : false; 1001 desc_info->ldpc = rtwsta_link ? rtwsta_link->ra.ldpc_cap : false; 1002 1003 /* enable wd_info for AMPDU */ 1004 desc_info->en_wd_info = true; 1005 1006 if (IEEE80211_SKB_CB(skb)->control.hw_key) 1007 rtw89_core_tx_update_sec_key(rtwdev, tx_req); 1008 1009 desc_info->data_retry_lowest_rate = rtw89_core_get_data_rate(rtwdev, tx_req); 1010 } 1011 1012 static enum btc_pkt_type 1013 rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev, 1014 struct rtw89_core_tx_request *tx_req) 1015 { 1016 struct wiphy *wiphy = rtwdev->hw->wiphy; 1017 struct sk_buff *skb = tx_req->skb; 1018 struct udphdr *udphdr; 1019 1020 if (IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { 1021 wiphy_work_queue(wiphy, &rtwdev->btc.eapol_notify_work); 1022 return PACKET_EAPOL; 1023 } 1024 1025 if (skb->protocol == htons(ETH_P_ARP)) { 1026 wiphy_work_queue(wiphy, &rtwdev->btc.arp_notify_work); 1027 return PACKET_ARP; 1028 } 1029 1030 if (skb->protocol == htons(ETH_P_IP) && 1031 ip_hdr(skb)->protocol == IPPROTO_UDP) { 1032 udphdr = udp_hdr(skb); 1033 if (((udphdr->source == htons(67) && udphdr->dest == htons(68)) || 1034 (udphdr->source == htons(68) && udphdr->dest == htons(67))) && 1035 skb->len > 282) { 1036 wiphy_work_queue(wiphy, &rtwdev->btc.dhcp_notify_work); 1037 return PACKET_DHCP; 1038 } 1039 } 1040 1041 if (skb->protocol == htons(ETH_P_IP) && 1042 ip_hdr(skb)->protocol == IPPROTO_ICMP) { 1043 wiphy_work_queue(wiphy, &rtwdev->btc.icmp_notify_work); 1044 return PACKET_ICMP; 1045 } 1046 1047 return PACKET_MAX; 1048 } 1049 1050 static void 1051 rtw89_core_tx_wake(struct rtw89_dev *rtwdev, 1052 struct rtw89_core_tx_request *tx_req) 1053 { 1054 const struct rtw89_chip_info *chip = rtwdev->chip; 1055 1056 if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw)) 1057 return; 1058 1059 switch (chip->chip_id) { 1060 case RTL8852BT: 1061 if (test_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags)) 1062 goto notify; 1063 break; 1064 case RTL8852C: 1065 if (test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags)) 1066 goto notify; 1067 break; 1068 default: 1069 if (test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags) && 1070 tx_req->tx_type == RTW89_CORE_TX_TYPE_MGMT) 1071 goto notify; 1072 break; 1073 } 1074 1075 return; 1076 1077 notify: 1078 rtw89_mac_notify_wake(rtwdev); 1079 } 1080 1081 static void 1082 rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, 1083 struct rtw89_core_tx_request *tx_req) 1084 { 1085 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1086 struct sk_buff *skb = tx_req->skb; 1087 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1088 struct ieee80211_hdr *hdr = (void *)skb->data; 1089 struct rtw89_addr_cam_entry *addr_cam; 1090 enum rtw89_core_tx_type tx_type; 1091 enum btc_pkt_type pkt_type; 1092 bool upd_wlan_hdr = false; 1093 bool is_bmc; 1094 u16 seq; 1095 1096 if (tx_req->sta) 1097 desc_info->mlo = tx_req->sta->mlo; 1098 else if (tx_req->vif) 1099 desc_info->mlo = ieee80211_vif_is_mld(tx_req->vif); 1100 1101 seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; 1102 if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) { 1103 tx_type = rtw89_core_get_tx_type(rtwdev, skb); 1104 tx_req->tx_type = tx_type; 1105 1106 addr_cam = rtw89_get_addr_cam_of(tx_req->rtwvif_link, 1107 tx_req->rtwsta_link); 1108 if (addr_cam->valid && desc_info->mlo) 1109 upd_wlan_hdr = true; 1110 } 1111 is_bmc = (is_broadcast_ether_addr(hdr->addr1) || 1112 is_multicast_ether_addr(hdr->addr1)); 1113 1114 desc_info->seq = seq; 1115 desc_info->pkt_size = skb->len; 1116 desc_info->is_bmc = is_bmc; 1117 desc_info->wd_page = true; 1118 desc_info->hiq = info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM; 1119 desc_info->upd_wlan_hdr = upd_wlan_hdr; 1120 1121 switch (tx_req->tx_type) { 1122 case RTW89_CORE_TX_TYPE_MGMT: 1123 rtw89_core_tx_update_mgmt_info(rtwdev, tx_req); 1124 break; 1125 case RTW89_CORE_TX_TYPE_DATA: 1126 rtw89_core_tx_update_data_info(rtwdev, tx_req); 1127 pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req); 1128 rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type); 1129 rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, pkt_type); 1130 rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb); 1131 break; 1132 case RTW89_CORE_TX_TYPE_FWCMD: 1133 rtw89_core_tx_update_h2c_info(rtwdev, tx_req); 1134 break; 1135 } 1136 } 1137 1138 static void rtw89_tx_wait_work(struct wiphy *wiphy, struct wiphy_work *work) 1139 { 1140 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 1141 tx_wait_work.work); 1142 1143 rtw89_tx_wait_list_clear(rtwdev); 1144 } 1145 1146 void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel) 1147 { 1148 u8 ch_dma; 1149 1150 ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); 1151 1152 rtw89_hci_tx_kick_off(rtwdev, ch_dma); 1153 } 1154 1155 int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1156 struct rtw89_tx_wait_info *wait, int qsel, 1157 unsigned int timeout) 1158 { 1159 unsigned long time_left; 1160 int ret = 0; 1161 1162 lockdep_assert_wiphy(rtwdev->hw->wiphy); 1163 1164 rtw89_core_tx_kick_off(rtwdev, qsel); 1165 time_left = wait_for_completion_timeout(&wait->completion, 1166 msecs_to_jiffies(timeout)); 1167 1168 if (time_left == 0) { 1169 ret = -ETIMEDOUT; 1170 list_add_tail(&wait->list, &rtwdev->tx_waits); 1171 wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->tx_wait_work, 1172 RTW89_TX_WAIT_WORK_TIMEOUT); 1173 } else { 1174 if (!wait->tx_done) 1175 ret = -EAGAIN; 1176 rtw89_tx_wait_release(wait); 1177 } 1178 1179 return ret; 1180 } 1181 1182 int rtw89_h2c_tx(struct rtw89_dev *rtwdev, 1183 struct sk_buff *skb, bool fwdl) 1184 { 1185 struct rtw89_core_tx_request tx_req = {0}; 1186 u32 cnt; 1187 int ret; 1188 1189 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 1190 rtw89_debug(rtwdev, RTW89_DBG_FW, 1191 "ignore h2c due to power is off with firmware state=%d\n", 1192 test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)); 1193 dev_kfree_skb(skb); 1194 return 0; 1195 } 1196 1197 tx_req.skb = skb; 1198 tx_req.tx_type = RTW89_CORE_TX_TYPE_FWCMD; 1199 if (fwdl) 1200 tx_req.desc_info.fw_dl = true; 1201 1202 rtw89_core_tx_update_desc_info(rtwdev, &tx_req); 1203 1204 if (!fwdl) 1205 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "H2C: ", skb->data, skb->len); 1206 1207 cnt = rtw89_hci_check_and_reclaim_tx_resource(rtwdev, RTW89_TXCH_CH12); 1208 if (cnt == 0) { 1209 rtw89_err(rtwdev, "no tx fwcmd resource\n"); 1210 return -ENOSPC; 1211 } 1212 1213 ret = rtw89_hci_tx_write(rtwdev, &tx_req); 1214 if (ret) { 1215 rtw89_err(rtwdev, "failed to transmit skb to HCI\n"); 1216 return ret; 1217 } 1218 rtw89_hci_tx_kick_off(rtwdev, RTW89_TXCH_CH12); 1219 1220 return 0; 1221 } 1222 1223 static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev, 1224 struct rtw89_vif_link *rtwvif_link, 1225 struct rtw89_sta_link *rtwsta_link, 1226 struct sk_buff *skb, int *qsel, bool sw_mld, 1227 struct rtw89_tx_wait_info *wait) 1228 { 1229 struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link); 1230 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 1231 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1232 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 1233 struct rtw89_core_tx_request tx_req = {}; 1234 int ret; 1235 1236 tx_req.skb = skb; 1237 tx_req.vif = vif; 1238 tx_req.sta = sta; 1239 tx_req.rtwvif_link = rtwvif_link; 1240 tx_req.rtwsta_link = rtwsta_link; 1241 tx_req.desc_info.sw_mld = sw_mld; 1242 1243 rtw89_traffic_stats_accu(rtwdev, rtwvif, skb, true, true); 1244 rtw89_wow_parse_akm(rtwdev, skb); 1245 rtw89_core_tx_update_desc_info(rtwdev, &tx_req); 1246 rtw89_core_tx_wake(rtwdev, &tx_req); 1247 1248 rcu_assign_pointer(skb_data->wait, wait); 1249 1250 ret = rtw89_hci_tx_write(rtwdev, &tx_req); 1251 if (ret) { 1252 rtw89_err(rtwdev, "failed to transmit skb to HCI\n"); 1253 return ret; 1254 } 1255 1256 if (qsel) 1257 *qsel = tx_req.desc_info.qsel; 1258 1259 return 0; 1260 } 1261 1262 int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1263 struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel) 1264 { 1265 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1266 struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); 1267 struct rtw89_sta_link *rtwsta_link = NULL; 1268 struct rtw89_vif_link *rtwvif_link; 1269 1270 if (rtwsta) { 1271 rtwsta_link = rtw89_get_designated_link(rtwsta); 1272 if (unlikely(!rtwsta_link)) { 1273 rtw89_err(rtwdev, "tx: find no sta designated link\n"); 1274 return -ENOLINK; 1275 } 1276 1277 rtwvif_link = rtwsta_link->rtwvif_link; 1278 } else { 1279 rtwvif_link = rtw89_get_designated_link(rtwvif); 1280 if (unlikely(!rtwvif_link)) { 1281 rtw89_err(rtwdev, "tx: find no vif designated link\n"); 1282 return -ENOLINK; 1283 } 1284 } 1285 1286 return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false, 1287 NULL); 1288 } 1289 1290 static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info) 1291 { 1292 u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET, desc_info->wp_offset) | 1293 FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) | 1294 FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) | 1295 FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) | 1296 FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) | 1297 FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl) | 1298 FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_SEL, desc_info->hw_ssn_sel) | 1299 FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_MODE, desc_info->hw_seq_mode); 1300 1301 return cpu_to_le32(dword); 1302 } 1303 1304 static __le32 rtw89_build_txwd_body0_v1(struct rtw89_tx_desc_info *desc_info) 1305 { 1306 u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) | 1307 FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) | 1308 FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) | 1309 FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) | 1310 FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) | 1311 FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl); 1312 1313 return cpu_to_le32(dword); 1314 } 1315 1316 static __le32 rtw89_build_txwd_body1_v1(struct rtw89_tx_desc_info *desc_info) 1317 { 1318 u32 dword = FIELD_PREP(RTW89_TXWD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) | 1319 FIELD_PREP(RTW89_TXWD_BODY1_SEC_KEYID, desc_info->sec_keyid) | 1320 FIELD_PREP(RTW89_TXWD_BODY1_SEC_TYPE, desc_info->sec_type); 1321 1322 return cpu_to_le32(dword); 1323 } 1324 1325 static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info) 1326 { 1327 u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) | 1328 FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) | 1329 FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size) | 1330 FIELD_PREP(RTW89_TXWD_BODY2_MACID, desc_info->mac_id); 1331 1332 return cpu_to_le32(dword); 1333 } 1334 1335 static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info) 1336 { 1337 u32 dword = FIELD_PREP(RTW89_TXWD_BODY3_SW_SEQ, desc_info->seq) | 1338 FIELD_PREP(RTW89_TXWD_BODY3_AGG_EN, desc_info->agg_en) | 1339 FIELD_PREP(RTW89_TXWD_BODY3_BK, desc_info->bk); 1340 1341 return cpu_to_le32(dword); 1342 } 1343 1344 static __le32 rtw89_build_txwd_body4(struct rtw89_tx_desc_info *desc_info) 1345 { 1346 u32 dword = FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) | 1347 FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]); 1348 1349 return cpu_to_le32(dword); 1350 } 1351 1352 static __le32 rtw89_build_txwd_body5(struct rtw89_tx_desc_info *desc_info) 1353 { 1354 u32 dword = FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) | 1355 FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) | 1356 FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) | 1357 FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]); 1358 1359 return cpu_to_le32(dword); 1360 } 1361 1362 static __le32 rtw89_build_txwd_body7_v1(struct rtw89_tx_desc_info *desc_info) 1363 { 1364 u32 dword = FIELD_PREP(RTW89_TXWD_BODY7_USE_RATE_V1, desc_info->use_rate) | 1365 FIELD_PREP(RTW89_TXWD_BODY7_DATA_RATE, desc_info->data_rate); 1366 1367 return cpu_to_le32(dword); 1368 } 1369 1370 static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info) 1371 { 1372 u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) | 1373 FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) | 1374 FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) | 1375 FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) | 1376 FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) | 1377 FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port); 1378 1379 return cpu_to_le32(dword); 1380 } 1381 1382 static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info) 1383 { 1384 u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) | 1385 FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) | 1386 FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) | 1387 FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port) | 1388 FIELD_PREP(RTW89_TXWD_INFO0_DATA_ER, desc_info->er_cap) | 1389 FIELD_PREP(RTW89_TXWD_INFO0_DATA_BW_ER, 0); 1390 1391 return cpu_to_le32(dword); 1392 } 1393 1394 static __le32 rtw89_build_txwd_info1(struct rtw89_tx_desc_info *desc_info) 1395 { 1396 u32 dword = FIELD_PREP(RTW89_TXWD_INFO1_MAX_AGGNUM, desc_info->ampdu_num) | 1397 FIELD_PREP(RTW89_TXWD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) | 1398 FIELD_PREP(RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE, 1399 desc_info->data_retry_lowest_rate); 1400 1401 return cpu_to_le32(dword); 1402 } 1403 1404 static __le32 rtw89_build_txwd_info2(struct rtw89_tx_desc_info *desc_info) 1405 { 1406 u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) | 1407 FIELD_PREP(RTW89_TXWD_INFO2_SEC_TYPE, desc_info->sec_type) | 1408 FIELD_PREP(RTW89_TXWD_INFO2_SEC_HW_ENC, desc_info->sec_en) | 1409 FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx); 1410 1411 return cpu_to_le32(dword); 1412 } 1413 1414 static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info) 1415 { 1416 u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) | 1417 FIELD_PREP(RTW89_TXWD_INFO2_FORCE_KEY_EN, desc_info->sec_en) | 1418 FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx); 1419 1420 return cpu_to_le32(dword); 1421 } 1422 1423 static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info) 1424 { 1425 bool rts_en = !desc_info->is_bmc; 1426 u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, rts_en) | 1427 FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1); 1428 1429 return cpu_to_le32(dword); 1430 } 1431 1432 void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, 1433 struct rtw89_tx_desc_info *desc_info, 1434 void *txdesc) 1435 { 1436 struct rtw89_txwd_body *txwd_body = (struct rtw89_txwd_body *)txdesc; 1437 struct rtw89_txwd_info *txwd_info; 1438 1439 txwd_body->dword0 = rtw89_build_txwd_body0(desc_info); 1440 txwd_body->dword2 = rtw89_build_txwd_body2(desc_info); 1441 txwd_body->dword3 = rtw89_build_txwd_body3(desc_info); 1442 1443 if (!desc_info->en_wd_info) 1444 return; 1445 1446 txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1); 1447 txwd_info->dword0 = rtw89_build_txwd_info0(desc_info); 1448 txwd_info->dword1 = rtw89_build_txwd_info1(desc_info); 1449 txwd_info->dword2 = rtw89_build_txwd_info2(desc_info); 1450 txwd_info->dword4 = rtw89_build_txwd_info4(desc_info); 1451 1452 } 1453 EXPORT_SYMBOL(rtw89_core_fill_txdesc); 1454 1455 void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev, 1456 struct rtw89_tx_desc_info *desc_info, 1457 void *txdesc) 1458 { 1459 struct rtw89_txwd_body_v1 *txwd_body = (struct rtw89_txwd_body_v1 *)txdesc; 1460 struct rtw89_txwd_info *txwd_info; 1461 1462 txwd_body->dword0 = rtw89_build_txwd_body0_v1(desc_info); 1463 txwd_body->dword1 = rtw89_build_txwd_body1_v1(desc_info); 1464 txwd_body->dword2 = rtw89_build_txwd_body2(desc_info); 1465 txwd_body->dword3 = rtw89_build_txwd_body3(desc_info); 1466 if (desc_info->sec_en) { 1467 txwd_body->dword4 = rtw89_build_txwd_body4(desc_info); 1468 txwd_body->dword5 = rtw89_build_txwd_body5(desc_info); 1469 } 1470 txwd_body->dword7 = rtw89_build_txwd_body7_v1(desc_info); 1471 1472 if (!desc_info->en_wd_info) 1473 return; 1474 1475 txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1); 1476 txwd_info->dword0 = rtw89_build_txwd_info0_v1(desc_info); 1477 txwd_info->dword1 = rtw89_build_txwd_info1(desc_info); 1478 txwd_info->dword2 = rtw89_build_txwd_info2_v1(desc_info); 1479 txwd_info->dword4 = rtw89_build_txwd_info4(desc_info); 1480 } 1481 EXPORT_SYMBOL(rtw89_core_fill_txdesc_v1); 1482 1483 static __le32 rtw89_build_txwd_body0_v2(struct rtw89_tx_desc_info *desc_info) 1484 { 1485 u32 dword = FIELD_PREP(BE_TXD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) | 1486 FIELD_PREP(BE_TXD_BODY0_WDINFO_EN, desc_info->en_wd_info) | 1487 FIELD_PREP(BE_TXD_BODY0_CH_DMA, desc_info->ch_dma) | 1488 FIELD_PREP(BE_TXD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) | 1489 FIELD_PREP(BE_TXD_BODY0_WD_PAGE, desc_info->wd_page); 1490 1491 return cpu_to_le32(dword); 1492 } 1493 1494 static __le32 rtw89_build_txwd_body1_v2(struct rtw89_tx_desc_info *desc_info) 1495 { 1496 u32 dword = FIELD_PREP(BE_TXD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) | 1497 FIELD_PREP(BE_TXD_BODY1_SEC_KEYID, desc_info->sec_keyid) | 1498 FIELD_PREP(BE_TXD_BODY1_SEC_TYPE, desc_info->sec_type); 1499 1500 return cpu_to_le32(dword); 1501 } 1502 1503 static __le32 rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info *desc_info) 1504 { 1505 u32 dword = FIELD_PREP(BE_TXD_BODY2_TID_IND, desc_info->tid_indicate) | 1506 FIELD_PREP(BE_TXD_BODY2_QSEL, desc_info->qsel) | 1507 FIELD_PREP(BE_TXD_BODY2_TXPKTSIZE, desc_info->pkt_size) | 1508 FIELD_PREP(BE_TXD_BODY2_AGG_EN, desc_info->agg_en) | 1509 FIELD_PREP(BE_TXD_BODY2_BK, desc_info->bk) | 1510 FIELD_PREP(BE_TXD_BODY2_MACID, desc_info->mac_id); 1511 1512 return cpu_to_le32(dword); 1513 } 1514 1515 static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info) 1516 { 1517 u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq) | 1518 FIELD_PREP(BE_TXD_BODY3_MLO_FLAG, desc_info->mlo) | 1519 FIELD_PREP(BE_TXD_BODY3_IS_MLD_SW_EN, desc_info->sw_mld); 1520 1521 return cpu_to_le32(dword); 1522 } 1523 1524 static __le32 rtw89_build_txwd_body4_v2(struct rtw89_tx_desc_info *desc_info) 1525 { 1526 u32 dword = FIELD_PREP(BE_TXD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) | 1527 FIELD_PREP(BE_TXD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]); 1528 1529 return cpu_to_le32(dword); 1530 } 1531 1532 static __le32 rtw89_build_txwd_body5_v2(struct rtw89_tx_desc_info *desc_info) 1533 { 1534 u32 dword = FIELD_PREP(BE_TXD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) | 1535 FIELD_PREP(BE_TXD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) | 1536 FIELD_PREP(BE_TXD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) | 1537 FIELD_PREP(BE_TXD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]); 1538 1539 return cpu_to_le32(dword); 1540 } 1541 1542 static __le32 rtw89_build_txwd_body6_v2(struct rtw89_tx_desc_info *desc_info) 1543 { 1544 u32 dword = FIELD_PREP(BE_TXD_BODY6_UPD_WLAN_HDR, desc_info->upd_wlan_hdr); 1545 1546 return cpu_to_le32(dword); 1547 } 1548 1549 static __le32 rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info *desc_info) 1550 { 1551 u32 dword = FIELD_PREP(BE_TXD_BODY7_USERATE_SEL, desc_info->use_rate) | 1552 FIELD_PREP(BE_TXD_BODY7_DATA_ER, desc_info->er_cap) | 1553 FIELD_PREP(BE_TXD_BODY7_DATA_BW_ER, 0) | 1554 FIELD_PREP(BE_TXD_BODY7_DATARATE, desc_info->data_rate); 1555 1556 return cpu_to_le32(dword); 1557 } 1558 1559 static __le32 rtw89_build_txwd_info0_v2(struct rtw89_tx_desc_info *desc_info) 1560 { 1561 u32 dword = FIELD_PREP(BE_TXD_INFO0_DATA_STBC, desc_info->stbc) | 1562 FIELD_PREP(BE_TXD_INFO0_DATA_LDPC, desc_info->ldpc) | 1563 FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) | 1564 FIELD_PREP(BE_TXD_INFO0_MULTIPORT_ID, desc_info->port); 1565 1566 return cpu_to_le32(dword); 1567 } 1568 1569 static __le32 rtw89_build_txwd_info1_v2(struct rtw89_tx_desc_info *desc_info) 1570 { 1571 u32 dword = FIELD_PREP(BE_TXD_INFO1_MAX_AGG_NUM, desc_info->ampdu_num) | 1572 FIELD_PREP(BE_TXD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) | 1573 FIELD_PREP(BE_TXD_INFO1_DATA_RTY_LOWEST_RATE, 1574 desc_info->data_retry_lowest_rate); 1575 1576 return cpu_to_le32(dword); 1577 } 1578 1579 static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info) 1580 { 1581 u32 dword = FIELD_PREP(BE_TXD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) | 1582 FIELD_PREP(BE_TXD_INFO2_FORCE_KEY_EN, desc_info->sec_en) | 1583 FIELD_PREP(BE_TXD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx); 1584 1585 return cpu_to_le32(dword); 1586 } 1587 1588 static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info) 1589 { 1590 bool rts_en = !desc_info->is_bmc; 1591 u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, rts_en) | 1592 FIELD_PREP(BE_TXD_INFO4_HW_RTS_EN, 1); 1593 1594 return cpu_to_le32(dword); 1595 } 1596 1597 void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev, 1598 struct rtw89_tx_desc_info *desc_info, 1599 void *txdesc) 1600 { 1601 struct rtw89_txwd_body_v2 *txwd_body = txdesc; 1602 struct rtw89_txwd_info_v2 *txwd_info; 1603 1604 txwd_body->dword0 = rtw89_build_txwd_body0_v2(desc_info); 1605 txwd_body->dword1 = rtw89_build_txwd_body1_v2(desc_info); 1606 txwd_body->dword2 = rtw89_build_txwd_body2_v2(desc_info); 1607 txwd_body->dword3 = rtw89_build_txwd_body3_v2(desc_info); 1608 if (desc_info->sec_en) { 1609 txwd_body->dword4 = rtw89_build_txwd_body4_v2(desc_info); 1610 txwd_body->dword5 = rtw89_build_txwd_body5_v2(desc_info); 1611 } 1612 txwd_body->dword6 = rtw89_build_txwd_body6_v2(desc_info); 1613 txwd_body->dword7 = rtw89_build_txwd_body7_v2(desc_info); 1614 1615 if (!desc_info->en_wd_info) 1616 return; 1617 1618 txwd_info = (struct rtw89_txwd_info_v2 *)(txwd_body + 1); 1619 txwd_info->dword0 = rtw89_build_txwd_info0_v2(desc_info); 1620 txwd_info->dword1 = rtw89_build_txwd_info1_v2(desc_info); 1621 txwd_info->dword2 = rtw89_build_txwd_info2_v2(desc_info); 1622 txwd_info->dword4 = rtw89_build_txwd_info4_v2(desc_info); 1623 } 1624 EXPORT_SYMBOL(rtw89_core_fill_txdesc_v2); 1625 1626 static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info) 1627 { 1628 u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) | 1629 FIELD_PREP(AX_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ? 1630 RTW89_CORE_RX_TYPE_FWDL : 1631 RTW89_CORE_RX_TYPE_H2C); 1632 1633 return cpu_to_le32(dword); 1634 } 1635 1636 void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev, 1637 struct rtw89_tx_desc_info *desc_info, 1638 void *txdesc) 1639 { 1640 struct rtw89_rxdesc_short *txwd_v1 = (struct rtw89_rxdesc_short *)txdesc; 1641 1642 txwd_v1->dword0 = rtw89_build_txwd_fwcmd0_v1(desc_info); 1643 } 1644 EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v1); 1645 1646 static __le32 rtw89_build_txwd_fwcmd0_v2(struct rtw89_tx_desc_info *desc_info) 1647 { 1648 u32 dword = FIELD_PREP(BE_RXD_RPKT_LEN_MASK, desc_info->pkt_size) | 1649 FIELD_PREP(BE_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ? 1650 RTW89_CORE_RX_TYPE_FWDL : 1651 RTW89_CORE_RX_TYPE_H2C); 1652 1653 return cpu_to_le32(dword); 1654 } 1655 1656 void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev, 1657 struct rtw89_tx_desc_info *desc_info, 1658 void *txdesc) 1659 { 1660 struct rtw89_rxdesc_short_v2 *txwd_v2 = (struct rtw89_rxdesc_short_v2 *)txdesc; 1661 1662 txwd_v2->dword0 = rtw89_build_txwd_fwcmd0_v2(desc_info); 1663 } 1664 EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v2); 1665 1666 static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev, 1667 struct sk_buff *skb, 1668 struct rtw89_rx_phy_ppdu *phy_ppdu) 1669 { 1670 const struct rtw89_chip_info *chip = rtwdev->chip; 1671 const struct rtw89_rxinfo *rxinfo = (const struct rtw89_rxinfo *)skb->data; 1672 const struct rtw89_rxinfo_user *user; 1673 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1674 int rx_cnt_size = RTW89_PPDU_MAC_RX_CNT_SIZE; 1675 bool rx_cnt_valid = false; 1676 bool invalid = false; 1677 u8 plcp_size = 0; 1678 u8 *phy_sts; 1679 u8 usr_num; 1680 int i; 1681 1682 if (chip_gen == RTW89_CHIP_BE) { 1683 invalid = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_INVALID_V1); 1684 rx_cnt_size = RTW89_PPDU_MAC_RX_CNT_SIZE_V1; 1685 } 1686 1687 if (invalid) 1688 return -EINVAL; 1689 1690 rx_cnt_valid = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_RX_CNT_VLD); 1691 if (chip_gen == RTW89_CHIP_BE) { 1692 plcp_size = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_PLCP_LEN_V1) << 3; 1693 usr_num = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_USR_NUM_V1); 1694 } else { 1695 plcp_size = le32_get_bits(rxinfo->w1, RTW89_RXINFO_W1_PLCP_LEN) << 3; 1696 usr_num = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_USR_NUM); 1697 } 1698 if (usr_num > chip->ppdu_max_usr) { 1699 rtw89_warn(rtwdev, "Invalid user number (%d) in mac info\n", 1700 usr_num); 1701 return -EINVAL; 1702 } 1703 1704 for (i = 0; i < usr_num; i++) { 1705 user = &rxinfo->user[i]; 1706 if (!le32_get_bits(user->w0, RTW89_RXINFO_USER_MAC_ID_VALID)) 1707 continue; 1708 /* For WiFi 7 chips, RXWD.mac_id of PPDU status is not set 1709 * by hardware, so update mac_id by rxinfo_user[].mac_id. 1710 */ 1711 if (chip_gen == RTW89_CHIP_BE) 1712 phy_ppdu->mac_id = 1713 le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID); 1714 phy_ppdu->has_data = 1715 le32_get_bits(user->w0, RTW89_RXINFO_USER_DATA); 1716 phy_ppdu->has_bcn = 1717 le32_get_bits(user->w0, RTW89_RXINFO_USER_BCN); 1718 break; 1719 } 1720 1721 phy_sts = skb->data + RTW89_PPDU_MAC_INFO_SIZE; 1722 phy_sts += usr_num * RTW89_PPDU_MAC_INFO_USR_SIZE; 1723 /* 8-byte alignment */ 1724 if (usr_num & BIT(0)) 1725 phy_sts += RTW89_PPDU_MAC_INFO_USR_SIZE; 1726 if (rx_cnt_valid) 1727 phy_sts += rx_cnt_size; 1728 phy_sts += plcp_size; 1729 1730 if (phy_sts > skb->data + skb->len) 1731 return -EINVAL; 1732 1733 phy_ppdu->buf = phy_sts; 1734 phy_ppdu->len = skb->data + skb->len - phy_sts; 1735 1736 return 0; 1737 } 1738 1739 static u8 rtw89_get_data_rate_nss(struct rtw89_dev *rtwdev, u16 data_rate) 1740 { 1741 u8 data_rate_mode; 1742 1743 data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate); 1744 switch (data_rate_mode) { 1745 case DATA_RATE_MODE_NON_HT: 1746 return 1; 1747 case DATA_RATE_MODE_HT: 1748 return rtw89_get_data_ht_nss(rtwdev, data_rate) + 1; 1749 case DATA_RATE_MODE_VHT: 1750 case DATA_RATE_MODE_HE: 1751 case DATA_RATE_MODE_EHT: 1752 return rtw89_get_data_nss(rtwdev, data_rate) + 1; 1753 default: 1754 rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); 1755 return 0; 1756 } 1757 } 1758 1759 static void rtw89_core_rx_process_phy_ppdu_iter(void *data, 1760 struct ieee80211_sta *sta) 1761 { 1762 struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data; 1763 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 1764 struct rtw89_dev *rtwdev = rtwsta->rtwdev; 1765 struct rtw89_hal *hal = &rtwdev->hal; 1766 struct rtw89_sta_link *rtwsta_link; 1767 u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num; 1768 u8 ant_pos = U8_MAX; 1769 u8 evm_pos = 0; 1770 int i; 1771 1772 rtwsta_link = rtw89_sta_get_link_inst(rtwsta, phy_ppdu->phy_idx); 1773 if (unlikely(!rtwsta_link)) 1774 return; 1775 1776 if (rtwsta_link->mac_id != phy_ppdu->mac_id || !phy_ppdu->to_self) 1777 return; 1778 1779 if (hal->ant_diversity && hal->antenna_rx) { 1780 ant_pos = __ffs(hal->antenna_rx); 1781 evm_pos = ant_pos; 1782 } 1783 1784 ewma_rssi_add(&rtwsta_link->avg_rssi, phy_ppdu->rssi_avg); 1785 1786 if (ant_pos < ant_num) { 1787 ewma_rssi_add(&rtwsta_link->rssi[ant_pos], phy_ppdu->rssi[0]); 1788 } else { 1789 for (i = 0; i < rtwdev->chip->rf_path_num; i++) 1790 ewma_rssi_add(&rtwsta_link->rssi[i], phy_ppdu->rssi[i]); 1791 } 1792 1793 if (phy_ppdu->ofdm.has && (phy_ppdu->has_data || phy_ppdu->has_bcn)) { 1794 ewma_snr_add(&rtwsta_link->avg_snr, phy_ppdu->ofdm.avg_snr); 1795 if (rtw89_get_data_rate_nss(rtwdev, phy_ppdu->rate) == 1) { 1796 ewma_evm_add(&rtwsta_link->evm_1ss, phy_ppdu->ofdm.evm_min); 1797 } else { 1798 ewma_evm_add(&rtwsta_link->evm_min[evm_pos], 1799 phy_ppdu->ofdm.evm_min); 1800 ewma_evm_add(&rtwsta_link->evm_max[evm_pos], 1801 phy_ppdu->ofdm.evm_max); 1802 } 1803 } 1804 } 1805 1806 #define VAR_LEN 0xff 1807 #define VAR_LEN_UNIT 8 1808 static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev, 1809 const struct rtw89_phy_sts_iehdr *iehdr) 1810 { 1811 static const u8 physts_ie_len_tabs[RTW89_CHIP_GEN_NUM][32] = { 1812 [RTW89_CHIP_AX] = { 1813 16, 32, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN, 1814 VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN, 1815 VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32 1816 }, 1817 [RTW89_CHIP_BE] = { 1818 32, 40, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN, 1819 VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 88, 56, VAR_LEN, 1820 VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32 1821 }, 1822 }; 1823 const u8 *physts_ie_len_tab; 1824 u16 ie_len; 1825 u8 ie; 1826 1827 physts_ie_len_tab = physts_ie_len_tabs[rtwdev->chip->chip_gen]; 1828 1829 ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE); 1830 if (physts_ie_len_tab[ie] != VAR_LEN) 1831 ie_len = physts_ie_len_tab[ie]; 1832 else 1833 ie_len = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_LEN) * VAR_LEN_UNIT; 1834 1835 return ie_len; 1836 } 1837 1838 static void rtw89_core_parse_phy_status_ie01_v2(struct rtw89_dev *rtwdev, 1839 const struct rtw89_phy_sts_iehdr *iehdr, 1840 struct rtw89_rx_phy_ppdu *phy_ppdu) 1841 { 1842 const struct rtw89_phy_sts_ie01_v2 *ie; 1843 u8 *rpl_fd = phy_ppdu->rpl_fd; 1844 1845 ie = (const struct rtw89_phy_sts_ie01_v2 *)iehdr; 1846 rpl_fd[RF_PATH_A] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A); 1847 rpl_fd[RF_PATH_B] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B); 1848 rpl_fd[RF_PATH_C] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C); 1849 rpl_fd[RF_PATH_D] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D); 1850 1851 phy_ppdu->bw_idx = le32_get_bits(ie->w5, RTW89_PHY_STS_IE01_V2_W5_BW_IDX); 1852 } 1853 1854 static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, 1855 const struct rtw89_phy_sts_iehdr *iehdr, 1856 struct rtw89_rx_phy_ppdu *phy_ppdu) 1857 { 1858 const struct rtw89_phy_sts_ie01 *ie = (const struct rtw89_phy_sts_ie01 *)iehdr; 1859 s16 cfo; 1860 u32 t; 1861 1862 phy_ppdu->chan_idx = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_CH_IDX); 1863 1864 if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) { 1865 phy_ppdu->ldpc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_LDPC); 1866 phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC); 1867 } 1868 1869 if (!phy_ppdu->hdr_2_en) 1870 phy_ppdu->rx_path_en = 1871 le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RX_PATH_EN); 1872 1873 if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) 1874 return; 1875 1876 if (!phy_ppdu->to_self) 1877 return; 1878 1879 phy_ppdu->rpl_avg = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD); 1880 phy_ppdu->ofdm.avg_snr = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_AVG_SNR); 1881 phy_ppdu->ofdm.evm_max = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MAX); 1882 phy_ppdu->ofdm.evm_min = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MIN); 1883 phy_ppdu->ofdm.has = true; 1884 1885 /* sign conversion for S(12,2) */ 1886 if (rtwdev->chip->cfo_src_fd) { 1887 t = le32_get_bits(ie->w1, RTW89_PHY_STS_IE01_W1_FD_CFO); 1888 cfo = sign_extend32(t, 11); 1889 } else { 1890 t = le32_get_bits(ie->w1, RTW89_PHY_STS_IE01_W1_PREMB_CFO); 1891 cfo = sign_extend32(t, 11); 1892 } 1893 1894 rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu); 1895 1896 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 1897 rtw89_core_parse_phy_status_ie01_v2(rtwdev, iehdr, phy_ppdu); 1898 } 1899 1900 static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev, 1901 const struct rtw89_phy_sts_iehdr *iehdr, 1902 struct rtw89_rx_phy_ppdu *phy_ppdu) 1903 { 1904 const struct rtw89_phy_sts_ie00 *ie = (const struct rtw89_phy_sts_ie00 *)iehdr; 1905 u16 tmp_rpl; 1906 1907 tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL); 1908 phy_ppdu->rpl_avg = tmp_rpl >> 1; 1909 1910 if (!phy_ppdu->hdr_2_en) 1911 phy_ppdu->rx_path_en = 1912 le32_get_bits(ie->w3, RTW89_PHY_STS_IE00_W3_RX_PATH_EN); 1913 } 1914 1915 static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev, 1916 const struct rtw89_phy_sts_iehdr *iehdr, 1917 struct rtw89_rx_phy_ppdu *phy_ppdu) 1918 { 1919 const struct rtw89_phy_sts_ie00_v2 *ie; 1920 u8 *rpl_path = phy_ppdu->rpl_path; 1921 u16 tmp_rpl[RF_PATH_MAX]; 1922 u8 i; 1923 1924 ie = (const struct rtw89_phy_sts_ie00_v2 *)iehdr; 1925 tmp_rpl[RF_PATH_A] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A); 1926 tmp_rpl[RF_PATH_B] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B); 1927 tmp_rpl[RF_PATH_C] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C); 1928 tmp_rpl[RF_PATH_D] = le32_get_bits(ie->w5, RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D); 1929 1930 for (i = 0; i < RF_PATH_MAX; i++) 1931 rpl_path[i] = tmp_rpl[i] >> 1; 1932 } 1933 1934 static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, 1935 const struct rtw89_phy_sts_iehdr *iehdr, 1936 struct rtw89_rx_phy_ppdu *phy_ppdu) 1937 { 1938 u8 ie; 1939 1940 ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE); 1941 1942 switch (ie) { 1943 case RTW89_PHYSTS_IE00_CMN_CCK: 1944 rtw89_core_parse_phy_status_ie00(rtwdev, iehdr, phy_ppdu); 1945 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 1946 rtw89_core_parse_phy_status_ie00_v2(rtwdev, iehdr, phy_ppdu); 1947 break; 1948 case RTW89_PHYSTS_IE01_CMN_OFDM: 1949 rtw89_core_parse_phy_status_ie01(rtwdev, iehdr, phy_ppdu); 1950 break; 1951 default: 1952 break; 1953 } 1954 1955 return 0; 1956 } 1957 1958 static void rtw89_core_update_phy_ppdu_hdr_v2(struct rtw89_rx_phy_ppdu *phy_ppdu) 1959 { 1960 const struct rtw89_phy_sts_hdr_v2 *hdr = phy_ppdu->buf + PHY_STS_HDR_LEN; 1961 1962 phy_ppdu->rx_path_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_V2_W0_PATH_EN); 1963 } 1964 1965 static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu) 1966 { 1967 const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf; 1968 u8 *rssi = phy_ppdu->rssi; 1969 1970 phy_ppdu->ie = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_IE_MAP); 1971 phy_ppdu->rssi_avg = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_RSSI_AVG); 1972 rssi[RF_PATH_A] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_A); 1973 rssi[RF_PATH_B] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_B); 1974 rssi[RF_PATH_C] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_C); 1975 rssi[RF_PATH_D] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_D); 1976 1977 phy_ppdu->hdr_2_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_HDR_2_EN); 1978 if (phy_ppdu->hdr_2_en) 1979 rtw89_core_update_phy_ppdu_hdr_v2(phy_ppdu); 1980 } 1981 1982 static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev, 1983 struct rtw89_rx_phy_ppdu *phy_ppdu) 1984 { 1985 const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf; 1986 u32 len_from_header; 1987 bool physts_valid; 1988 1989 physts_valid = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_VALID); 1990 if (!physts_valid) 1991 return -EINVAL; 1992 1993 len_from_header = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_LEN) << 3; 1994 1995 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 1996 len_from_header += PHY_STS_HDR_LEN; 1997 1998 if (len_from_header != phy_ppdu->len) { 1999 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "phy ppdu len mismatch\n"); 2000 return -EINVAL; 2001 } 2002 rtw89_core_update_phy_ppdu(phy_ppdu); 2003 2004 return 0; 2005 } 2006 2007 static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev, 2008 struct rtw89_rx_phy_ppdu *phy_ppdu) 2009 { 2010 u16 ie_len; 2011 void *pos, *end; 2012 2013 /* mark invalid reports and bypass them */ 2014 if (phy_ppdu->ie < RTW89_CCK_PKT) 2015 return -EINVAL; 2016 2017 pos = phy_ppdu->buf + PHY_STS_HDR_LEN; 2018 if (phy_ppdu->hdr_2_en) 2019 pos += PHY_STS_HDR_LEN; 2020 end = phy_ppdu->buf + phy_ppdu->len; 2021 while (pos < end) { 2022 const struct rtw89_phy_sts_iehdr *iehdr = pos; 2023 2024 ie_len = rtw89_core_get_phy_status_ie_len(rtwdev, iehdr); 2025 rtw89_core_process_phy_status_ie(rtwdev, iehdr, phy_ppdu); 2026 pos += ie_len; 2027 if (pos > end || ie_len == 0) { 2028 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2029 "phy status parse failed\n"); 2030 return -EINVAL; 2031 } 2032 } 2033 2034 rtw89_chip_convert_rpl_to_rssi(rtwdev, phy_ppdu); 2035 rtw89_phy_antdiv_parse(rtwdev, phy_ppdu); 2036 2037 return 0; 2038 } 2039 2040 static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev, 2041 struct rtw89_rx_phy_ppdu *phy_ppdu) 2042 { 2043 int ret; 2044 2045 ret = rtw89_core_rx_parse_phy_sts(rtwdev, phy_ppdu); 2046 if (ret) 2047 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "parse phy sts failed\n"); 2048 else 2049 phy_ppdu->valid = true; 2050 2051 ieee80211_iterate_stations_atomic(rtwdev->hw, 2052 rtw89_core_rx_process_phy_ppdu_iter, 2053 phy_ppdu); 2054 } 2055 2056 static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev, 2057 u8 desc_info_gi, 2058 bool rx_status) 2059 { 2060 switch (desc_info_gi) { 2061 case RTW89_GILTF_SGI_4XHE08: 2062 case RTW89_GILTF_2XHE08: 2063 case RTW89_GILTF_1XHE08: 2064 return NL80211_RATE_INFO_HE_GI_0_8; 2065 case RTW89_GILTF_2XHE16: 2066 case RTW89_GILTF_1XHE16: 2067 return NL80211_RATE_INFO_HE_GI_1_6; 2068 case RTW89_GILTF_LGI_4XHE32: 2069 return NL80211_RATE_INFO_HE_GI_3_2; 2070 default: 2071 rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info_gi); 2072 if (rx_status) 2073 return NL80211_RATE_INFO_HE_GI_3_2; 2074 return U8_MAX; 2075 } 2076 } 2077 2078 static u8 rtw89_rxdesc_to_nl_eht_gi(struct rtw89_dev *rtwdev, 2079 u8 desc_info_gi, 2080 bool rx_status) 2081 { 2082 switch (desc_info_gi) { 2083 case RTW89_GILTF_SGI_4XHE08: 2084 case RTW89_GILTF_2XHE08: 2085 case RTW89_GILTF_1XHE08: 2086 return NL80211_RATE_INFO_EHT_GI_0_8; 2087 case RTW89_GILTF_2XHE16: 2088 case RTW89_GILTF_1XHE16: 2089 return NL80211_RATE_INFO_EHT_GI_1_6; 2090 case RTW89_GILTF_LGI_4XHE32: 2091 return NL80211_RATE_INFO_EHT_GI_3_2; 2092 default: 2093 rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info_gi); 2094 if (rx_status) 2095 return NL80211_RATE_INFO_EHT_GI_3_2; 2096 return U8_MAX; 2097 } 2098 } 2099 2100 static u8 rtw89_rxdesc_to_nl_he_eht_gi(struct rtw89_dev *rtwdev, 2101 u8 desc_info_gi, 2102 bool rx_status, bool eht) 2103 { 2104 return eht ? rtw89_rxdesc_to_nl_eht_gi(rtwdev, desc_info_gi, rx_status) : 2105 rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info_gi, rx_status); 2106 } 2107 2108 static 2109 bool rtw89_check_rx_statu_gi_match(struct ieee80211_rx_status *status, u8 gi_ltf, 2110 bool eht) 2111 { 2112 if (eht) 2113 return status->eht.gi == gi_ltf; 2114 2115 return status->he_gi == gi_ltf; 2116 } 2117 2118 static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev, 2119 struct rtw89_rx_desc_info *desc_info, 2120 struct ieee80211_rx_status *status) 2121 { 2122 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; 2123 u8 data_rate_mode, bw, rate_idx = MASKBYTE0, gi_ltf; 2124 bool eht = false; 2125 u16 data_rate; 2126 bool ret; 2127 2128 data_rate = desc_info->data_rate; 2129 data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate); 2130 if (data_rate_mode == DATA_RATE_MODE_NON_HT) { 2131 rate_idx = rtw89_get_data_not_ht_idx(rtwdev, data_rate); 2132 /* rate_idx is still hardware value here */ 2133 } else if (data_rate_mode == DATA_RATE_MODE_HT) { 2134 rate_idx = rtw89_get_data_ht_mcs(rtwdev, data_rate); 2135 } else if (data_rate_mode == DATA_RATE_MODE_VHT || 2136 data_rate_mode == DATA_RATE_MODE_HE || 2137 data_rate_mode == DATA_RATE_MODE_EHT) { 2138 rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); 2139 } else { 2140 rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); 2141 } 2142 2143 eht = data_rate_mode == DATA_RATE_MODE_EHT; 2144 bw = rtw89_hw_to_rate_info_bw(desc_info->bw); 2145 gi_ltf = rtw89_rxdesc_to_nl_he_eht_gi(rtwdev, desc_info->gi_ltf, false, eht); 2146 ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt && 2147 status->rate_idx == rate_idx && 2148 rtw89_check_rx_statu_gi_match(status, gi_ltf, eht) && 2149 status->bw == bw; 2150 2151 return ret; 2152 } 2153 2154 struct rtw89_vif_rx_stats_iter_data { 2155 struct rtw89_dev *rtwdev; 2156 struct rtw89_rx_phy_ppdu *phy_ppdu; 2157 struct rtw89_rx_desc_info *desc_info; 2158 struct sk_buff *skb; 2159 const u8 *bssid; 2160 }; 2161 2162 static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, 2163 struct rtw89_vif_link *rtwvif_link, 2164 struct ieee80211_bss_conf *bss_conf, 2165 struct sk_buff *skb) 2166 { 2167 struct ieee80211_trigger *tf = (struct ieee80211_trigger *)skb->data; 2168 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2169 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2170 u8 *pos, *end, type, tf_bw; 2171 u16 aid, tf_rua; 2172 2173 if (!ether_addr_equal(bss_conf->bssid, tf->ta) || 2174 rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION || 2175 rtwvif_link->net_type == RTW89_NET_TYPE_NO_LINK) 2176 return; 2177 2178 type = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_TYPE_MASK); 2179 if (type != IEEE80211_TRIGGER_TYPE_BASIC && type != IEEE80211_TRIGGER_TYPE_MU_BAR) 2180 return; 2181 2182 end = (u8 *)tf + skb->len; 2183 pos = tf->variable; 2184 2185 while (end - pos >= RTW89_TF_BASIC_USER_INFO_SZ) { 2186 aid = RTW89_GET_TF_USER_INFO_AID12(pos); 2187 tf_rua = RTW89_GET_TF_USER_INFO_RUA(pos); 2188 tf_bw = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_ULBW_MASK); 2189 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2190 "[TF] aid: %d, ul_mcs: %d, rua: %d, bw: %d\n", 2191 aid, RTW89_GET_TF_USER_INFO_UL_MCS(pos), 2192 tf_rua, tf_bw); 2193 2194 if (aid == RTW89_TF_PAD) 2195 break; 2196 2197 if (aid == vif->cfg.aid) { 2198 enum nl80211_he_ru_alloc rua; 2199 2200 rtwvif->stats.rx_tf_acc++; 2201 rtwdev->stats.rx_tf_acc++; 2202 2203 /* The following only required for HE trigger frame, but we 2204 * cannot use UL HE-SIG-A2 reserved subfield to identify it 2205 * since some 11ax APs will fill it with all 0s, which will 2206 * be misunderstood as EHT trigger frame. 2207 */ 2208 if (bss_conf->eht_support) 2209 break; 2210 2211 rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1); 2212 2213 if (tf_bw == IEEE80211_TRIGGER_ULBW_160_80P80MHZ && 2214 rua <= NL80211_RATE_INFO_HE_RU_ALLOC_106) 2215 rtwvif_link->pwr_diff_en = true; 2216 break; 2217 } 2218 2219 pos += RTW89_TF_BASIC_USER_INFO_SZ; 2220 } 2221 } 2222 2223 static void rtw89_cancel_6ghz_probe_work(struct wiphy *wiphy, struct wiphy_work *work) 2224 { 2225 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2226 cancel_6ghz_probe_work); 2227 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 2228 struct rtw89_pktofld_info *info; 2229 2230 lockdep_assert_wiphy(wiphy); 2231 2232 if (!rtwdev->scanning) 2233 return; 2234 2235 list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) { 2236 if (!info->cancel || !test_bit(info->id, rtwdev->pkt_offload)) 2237 continue; 2238 2239 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2240 2241 /* Don't delete/free info from pkt_list at this moment. Let it 2242 * be deleted/freed in rtw89_release_pkt_list() after scanning, 2243 * since if during scanning, pkt_list is accessed in bottom half. 2244 */ 2245 } 2246 } 2247 2248 static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev, 2249 struct sk_buff *skb) 2250 { 2251 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); 2252 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 2253 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 2254 struct rtw89_pktofld_info *info; 2255 const u8 *ies = mgmt->u.beacon.variable, *ssid_ie; 2256 bool queue_work = false; 2257 2258 if (rx_status->band != NL80211_BAND_6GHZ) 2259 return; 2260 2261 if (unlikely(!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))) { 2262 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rx on unsupported 6 GHz\n"); 2263 return; 2264 } 2265 2266 ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, ies, skb->len); 2267 2268 list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) { 2269 if (ether_addr_equal(info->bssid, mgmt->bssid)) { 2270 info->cancel = true; 2271 queue_work = true; 2272 continue; 2273 } 2274 2275 if (!ssid_ie || ssid_ie[1] != info->ssid_len || info->ssid_len == 0) 2276 continue; 2277 2278 if (memcmp(&ssid_ie[2], info->ssid, info->ssid_len) == 0) { 2279 info->cancel = true; 2280 queue_work = true; 2281 } 2282 } 2283 2284 if (queue_work) 2285 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->cancel_6ghz_probe_work); 2286 } 2287 2288 static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link, 2289 struct ieee80211_hdr *hdr, size_t len) 2290 { 2291 struct ieee80211_mgmt *mgmt = (typeof(mgmt))hdr; 2292 2293 if (len < offsetof(typeof(*mgmt), u.beacon.variable)) 2294 return; 2295 2296 WRITE_ONCE(rtwvif_link->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp)); 2297 } 2298 2299 static u32 rtw89_bcn_calc_min_tbtt(struct rtw89_dev *rtwdev, u32 tbtt1, u32 tbtt2) 2300 { 2301 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2302 u32 close_bcn_intvl_th = bcn_track->close_bcn_intvl_th; 2303 u32 tbtt_diff_th = bcn_track->tbtt_diff_th; 2304 2305 if (tbtt2 > tbtt1) 2306 swap(tbtt1, tbtt2); 2307 2308 if (tbtt1 - tbtt2 > tbtt_diff_th) 2309 return tbtt1; 2310 else if (tbtt2 > close_bcn_intvl_th) 2311 return tbtt2; 2312 else if (tbtt1 > close_bcn_intvl_th) 2313 return tbtt1; 2314 else 2315 return tbtt2; 2316 } 2317 2318 static void rtw89_bcn_cfg_tbtt_offset(struct rtw89_dev *rtwdev, 2319 struct rtw89_vif_link *rtwvif_link) 2320 { 2321 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2322 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2323 u32 offset = bcn_track->tbtt_offset; 2324 2325 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2326 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 2327 const struct rtw89_port_reg *p = mac->port_base; 2328 u32 bcnspc, val; 2329 2330 bcnspc = rtw89_read32_port_mask(rtwdev, rtwvif_link, 2331 p->bcn_space, B_AX_BCN_SPACE_MASK); 2332 val = bcnspc - (offset / 1024); 2333 val = u32_encode_bits(val, B_AX_TBTT_SHIFT_OFST_MAG) | 2334 B_AX_TBTT_SHIFT_OFST_SIGN; 2335 2336 rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift, 2337 B_AX_TBTT_SHIFT_OFST_MASK, val); 2338 2339 return; 2340 } 2341 2342 rtw89_fw_h2c_tbtt_tuning(rtwdev, rtwvif_link, offset); 2343 } 2344 2345 static void rtw89_bcn_update_tbtt_offset(struct rtw89_dev *rtwdev, 2346 struct rtw89_vif_link *rtwvif_link) 2347 { 2348 struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2349 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2350 u32 *tbtt_us = bcn_stat->tbtt_us; 2351 u32 offset = tbtt_us[0]; 2352 u8 i; 2353 2354 for (i = 1; i < RTW89_BCN_TRACK_STAT_NR; i++) 2355 offset = rtw89_bcn_calc_min_tbtt(rtwdev, tbtt_us[i], offset); 2356 2357 if (bcn_track->tbtt_offset == offset) 2358 return; 2359 2360 bcn_track->tbtt_offset = offset; 2361 rtw89_bcn_cfg_tbtt_offset(rtwdev, rtwvif_link); 2362 } 2363 2364 static int cmp_u16(const void *a, const void *b) 2365 { 2366 return *(const u16 *)a - *(const u16 *)b; 2367 } 2368 2369 static u16 _rtw89_bcn_calc_drift(u16 tbtt, u16 offset, u16 beacon_int) 2370 { 2371 if (tbtt < offset) 2372 return beacon_int - offset + tbtt; 2373 2374 return tbtt - offset; 2375 } 2376 2377 static void rtw89_bcn_calc_drift(struct rtw89_dev *rtwdev) 2378 { 2379 struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2380 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2381 u16 offset_tu = bcn_track->tbtt_offset / 1024; 2382 u16 *tbtt_tu = bcn_stat->tbtt_tu; 2383 u16 *drift = bcn_stat->drift; 2384 u8 i; 2385 2386 bcn_stat->tbtt_tu_min = U16_MAX; 2387 bcn_stat->tbtt_tu_max = 0; 2388 for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) { 2389 drift[i] = _rtw89_bcn_calc_drift(tbtt_tu[i], offset_tu, 2390 bcn_track->beacon_int); 2391 2392 bcn_stat->tbtt_tu_min = min(bcn_stat->tbtt_tu_min, tbtt_tu[i]); 2393 bcn_stat->tbtt_tu_max = max(bcn_stat->tbtt_tu_max, tbtt_tu[i]); 2394 } 2395 2396 sort(drift, RTW89_BCN_TRACK_STAT_NR, sizeof(*drift), cmp_u16, NULL); 2397 } 2398 2399 static void rtw89_bcn_calc_distribution(struct rtw89_dev *rtwdev) 2400 { 2401 struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2402 struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; 2403 u16 lower_bound, upper_bound, outlier_count = 0; 2404 u16 *drift = bcn_stat->drift; 2405 u16 *bins = bcn_dist->bins; 2406 u16 q1, q3, iqr, tmp; 2407 u8 i; 2408 2409 BUILD_BUG_ON(RTW89_BCN_TRACK_STAT_NR % 4 != 0); 2410 2411 memset(bcn_dist, 0, sizeof(*bcn_dist)); 2412 2413 bcn_dist->min = drift[0]; 2414 bcn_dist->max = drift[RTW89_BCN_TRACK_STAT_NR - 1]; 2415 2416 tmp = RTW89_BCN_TRACK_STAT_NR / 4; 2417 q1 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2; 2418 2419 tmp = (RTW89_BCN_TRACK_STAT_NR * 3) / 4; 2420 q3 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2; 2421 2422 iqr = q3 - q1; 2423 tmp = (3 * iqr) / 2; 2424 2425 if (bcn_dist->min <= 5) 2426 lower_bound = bcn_dist->min; 2427 else if (q1 > tmp) 2428 lower_bound = (q1 - tmp) / RTW89_BCN_TRACK_SCALE_FACTOR; 2429 else 2430 lower_bound = 0; 2431 2432 upper_bound = (q3 + tmp) / RTW89_BCN_TRACK_SCALE_FACTOR; 2433 2434 for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) { 2435 u16 tbtt = bcn_stat->tbtt_tu[i]; 2436 u16 min = bcn_stat->tbtt_tu_min; 2437 u8 bin_idx; 2438 2439 /* histogram */ 2440 bin_idx = min((tbtt - min) / RTW89_BCN_TRACK_BIN_WIDTH, 2441 RTW89_BCN_TRACK_MAX_BIN_NUM - 1); 2442 bins[bin_idx]++; 2443 2444 /* boxplot outlier */ 2445 if (drift[i] < lower_bound || drift[i] > upper_bound) 2446 outlier_count++; 2447 } 2448 2449 bcn_dist->outlier_count = outlier_count; 2450 bcn_dist->lower_bound = lower_bound; 2451 bcn_dist->upper_bound = upper_bound; 2452 } 2453 2454 static u8 rtw89_bcn_get_coverage(struct rtw89_dev *rtwdev, u16 threshold) 2455 { 2456 struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2457 int l = 0, r = RTW89_BCN_TRACK_STAT_NR - 1, m; 2458 u16 *drift = bcn_stat->drift; 2459 int index = -1; 2460 u8 count = 0; 2461 2462 while (l <= r) { 2463 m = l + (r - l) / 2; 2464 2465 if (drift[m] <= threshold) { 2466 index = m; 2467 l = m + 1; 2468 } else { 2469 r = m - 1; 2470 } 2471 } 2472 2473 count = (index == -1) ? 0 : (index + 1); 2474 2475 return (count * PERCENT) / RTW89_BCN_TRACK_STAT_NR; 2476 } 2477 2478 static u16 rtw89_bcn_get_histogram_bound(struct rtw89_dev *rtwdev, u8 target) 2479 { 2480 struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2481 struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; 2482 u16 tbtt_tu_max = bcn_stat->tbtt_tu_max; 2483 u16 upper, lower = bcn_stat->tbtt_tu_min; 2484 u8 i, count = 0; 2485 2486 for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) { 2487 upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1; 2488 if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1) 2489 upper = max(upper, tbtt_tu_max); 2490 2491 count += bcn_dist->bins[i]; 2492 if (count > target) 2493 break; 2494 2495 lower = upper + 1; 2496 } 2497 2498 return upper; 2499 } 2500 2501 static u16 rtw89_bcn_get_rx_time(struct rtw89_dev *rtwdev, 2502 const struct rtw89_chan *chan) 2503 { 2504 #define RTW89_SYMBOL_TIME_2GHZ 192 2505 #define RTW89_SYMBOL_TIME_5GHZ 20 2506 #define RTW89_SYMBOL_TIME_6GHZ 20 2507 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2508 u16 bitrate, val; 2509 2510 if (!rtw89_legacy_rate_to_bitrate(rtwdev, pkt_stat->beacon_rate, &bitrate)) 2511 return 0; 2512 2513 val = (pkt_stat->beacon_len * 8 * RTW89_BCN_TRACK_SCALE_FACTOR) / bitrate; 2514 2515 switch (chan->band_type) { 2516 default: 2517 case RTW89_BAND_2G: 2518 val += RTW89_SYMBOL_TIME_2GHZ; 2519 break; 2520 case RTW89_BAND_5G: 2521 val += RTW89_SYMBOL_TIME_5GHZ; 2522 break; 2523 case RTW89_BAND_6G: 2524 val += RTW89_SYMBOL_TIME_6GHZ; 2525 break; 2526 } 2527 2528 /* convert to millisecond */ 2529 return DIV_ROUND_UP(val, 1000); 2530 } 2531 2532 static void rtw89_bcn_calc_timeout(struct rtw89_dev *rtwdev, 2533 struct rtw89_vif_link *rtwvif_link) 2534 { 2535 #define RTW89_BCN_TRACK_EXTEND_TIMEOUT 5 2536 #define RTW89_BCN_TRACK_COVERAGE_TH 0 /* unit: TU */ 2537 #define RTW89_BCN_TRACK_STRONG_RSSI 80 2538 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2539 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2540 struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2541 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2542 struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; 2543 u16 outlier_high_bcn_th = bcn_track->outlier_high_bcn_th; 2544 u16 outlier_low_bcn_th = bcn_track->outlier_low_bcn_th; 2545 u8 rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi); 2546 u16 target_bcn_th = bcn_track->target_bcn_th; 2547 u16 low_bcn_th = bcn_track->low_bcn_th; 2548 u16 med_bcn_th = bcn_track->med_bcn_th; 2549 u16 beacon_int = bcn_track->beacon_int; 2550 u16 bcn_timeout; 2551 2552 if (pkt_stat->beacon_nr < low_bcn_th) { 2553 bcn_timeout = (RTW89_BCN_TRACK_TARGET_BCN * beacon_int) / PERCENT; 2554 goto out; 2555 } 2556 2557 if (bcn_dist->outlier_count >= outlier_high_bcn_th) { 2558 bcn_timeout = bcn_dist->max; 2559 goto out; 2560 } 2561 2562 if (pkt_stat->beacon_nr < med_bcn_th) { 2563 if (bcn_dist->outlier_count > outlier_low_bcn_th) 2564 bcn_timeout = (bcn_dist->max + bcn_dist->upper_bound) / 2; 2565 else 2566 bcn_timeout = bcn_dist->upper_bound + 2567 RTW89_BCN_TRACK_EXTEND_TIMEOUT; 2568 2569 goto out; 2570 } 2571 2572 if (rssi >= RTW89_BCN_TRACK_STRONG_RSSI) { 2573 if (rtw89_bcn_get_coverage(rtwdev, RTW89_BCN_TRACK_COVERAGE_TH) >= 90) { 2574 /* ideal case */ 2575 bcn_timeout = 0; 2576 } else { 2577 u16 offset_tu = bcn_track->tbtt_offset / 1024; 2578 u16 upper_bound; 2579 2580 upper_bound = 2581 rtw89_bcn_get_histogram_bound(rtwdev, target_bcn_th); 2582 bcn_timeout = 2583 _rtw89_bcn_calc_drift(upper_bound, offset_tu, beacon_int); 2584 } 2585 2586 goto out; 2587 } 2588 2589 bcn_timeout = bcn_stat->drift[target_bcn_th]; 2590 2591 out: 2592 bcn_track->bcn_timeout = bcn_timeout + rtw89_bcn_get_rx_time(rtwdev, chan); 2593 } 2594 2595 static void rtw89_bcn_update_timeout(struct rtw89_dev *rtwdev, 2596 struct rtw89_vif_link *rtwvif_link) 2597 { 2598 rtw89_bcn_calc_drift(rtwdev); 2599 rtw89_bcn_calc_distribution(rtwdev); 2600 rtw89_bcn_calc_timeout(rtwdev, rtwvif_link); 2601 } 2602 2603 static void rtw89_core_bcn_track(struct rtw89_dev *rtwdev) 2604 { 2605 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2606 struct rtw89_vif_link *rtwvif_link; 2607 struct rtw89_vif *rtwvif; 2608 unsigned int link_id; 2609 2610 if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) 2611 return; 2612 2613 if (!rtwdev->lps_enabled) 2614 return; 2615 2616 if (!bcn_track->is_data_ready) 2617 return; 2618 2619 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 2620 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2621 if (!(rtwvif_link->wifi_role == RTW89_WIFI_ROLE_STATION || 2622 rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)) 2623 continue; 2624 2625 rtw89_bcn_update_tbtt_offset(rtwdev, rtwvif_link); 2626 rtw89_bcn_update_timeout(rtwdev, rtwvif_link); 2627 } 2628 } 2629 } 2630 2631 static bool rtw89_core_bcn_track_can_lps(struct rtw89_dev *rtwdev) 2632 { 2633 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2634 2635 if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) 2636 return true; 2637 2638 return bcn_track->is_data_ready; 2639 } 2640 2641 static void rtw89_core_bcn_track_assoc(struct rtw89_dev *rtwdev, 2642 struct rtw89_vif_link *rtwvif_link) 2643 { 2644 #define RTW89_BCN_TRACK_MED_BCN 70 2645 #define RTW89_BCN_TRACK_LOW_BCN 30 2646 #define RTW89_BCN_TRACK_OUTLIER_HIGH_BCN 30 2647 #define RTW89_BCN_TRACK_OUTLIER_LOW_BCN 20 2648 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2649 u32 period = jiffies_to_msecs(RTW89_TRACK_WORK_PERIOD); 2650 struct ieee80211_bss_conf *bss_conf; 2651 u32 beacons_in_period; 2652 u32 bcn_intvl_us; 2653 u16 beacon_int; 2654 u8 dtim; 2655 2656 rcu_read_lock(); 2657 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2658 beacon_int = bss_conf->beacon_int; 2659 dtim = bss_conf->dtim_period; 2660 rcu_read_unlock(); 2661 2662 beacons_in_period = period / beacon_int / dtim; 2663 bcn_intvl_us = ieee80211_tu_to_usec(beacon_int); 2664 2665 bcn_track->low_bcn_th = 2666 (beacons_in_period * RTW89_BCN_TRACK_LOW_BCN) / PERCENT; 2667 bcn_track->med_bcn_th = 2668 (beacons_in_period * RTW89_BCN_TRACK_MED_BCN) / PERCENT; 2669 bcn_track->outlier_low_bcn_th = 2670 (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_LOW_BCN) / PERCENT; 2671 bcn_track->outlier_high_bcn_th = 2672 (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_HIGH_BCN) / PERCENT; 2673 bcn_track->target_bcn_th = 2674 (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_TARGET_BCN) / PERCENT; 2675 2676 bcn_track->close_bcn_intvl_th = ieee80211_tu_to_usec(beacon_int - 3); 2677 bcn_track->tbtt_diff_th = (bcn_intvl_us * 85) / PERCENT; 2678 bcn_track->beacon_int = beacon_int; 2679 bcn_track->dtim = dtim; 2680 } 2681 2682 static void rtw89_core_bcn_track_reset(struct rtw89_dev *rtwdev) 2683 { 2684 memset(&rtwdev->phystat.bcn_stat, 0, sizeof(rtwdev->phystat.bcn_stat)); 2685 memset(&rtwdev->bcn_track, 0, sizeof(rtwdev->bcn_track)); 2686 } 2687 2688 static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev, 2689 struct ieee80211_bss_conf *bss_conf, 2690 struct sk_buff *skb) 2691 { 2692 #define RTW89_APPEND_TSF_2GHZ 384 2693 #define RTW89_APPEND_TSF_5GHZ 52 2694 #define RTW89_APPEND_TSF_6GHZ 52 2695 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 2696 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); 2697 struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2698 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2699 u32 bcn_intvl_us = ieee80211_tu_to_usec(bss_conf->beacon_int); 2700 u64 tsf = le64_to_cpu(mgmt->u.beacon.timestamp); 2701 u8 wp, num = bcn_stat->num; 2702 u16 append; 2703 2704 if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) 2705 return; 2706 2707 switch (rx_status->band) { 2708 default: 2709 case NL80211_BAND_2GHZ: 2710 append = RTW89_APPEND_TSF_2GHZ; 2711 break; 2712 case NL80211_BAND_5GHZ: 2713 append = RTW89_APPEND_TSF_5GHZ; 2714 break; 2715 case NL80211_BAND_6GHZ: 2716 append = RTW89_APPEND_TSF_6GHZ; 2717 break; 2718 } 2719 2720 wp = bcn_stat->wp; 2721 div_u64_rem(tsf - append, bcn_intvl_us, &bcn_stat->tbtt_us[wp]); 2722 bcn_stat->tbtt_tu[wp] = bcn_stat->tbtt_us[wp] / 1024; 2723 bcn_stat->wp = (wp + 1) % RTW89_BCN_TRACK_STAT_NR; 2724 bcn_stat->num = umin(num + 1, RTW89_BCN_TRACK_STAT_NR); 2725 bcn_track->is_data_ready = bcn_stat->num == RTW89_BCN_TRACK_STAT_NR; 2726 } 2727 2728 static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, 2729 struct ieee80211_vif *vif) 2730 { 2731 struct rtw89_vif_rx_stats_iter_data *iter_data = data; 2732 struct rtw89_dev *rtwdev = iter_data->rtwdev; 2733 struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); 2734 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2735 struct rtw89_rx_desc_info *desc_info = iter_data->desc_info; 2736 struct sk_buff *skb = iter_data->skb; 2737 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); 2738 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2739 struct rtw89_rx_phy_ppdu *phy_ppdu = iter_data->phy_ppdu; 2740 bool is_mld = ieee80211_vif_is_mld(vif); 2741 struct ieee80211_bss_conf *bss_conf; 2742 struct rtw89_vif_link *rtwvif_link; 2743 const u8 *bssid = iter_data->bssid; 2744 const u8 *target_bssid; 2745 2746 if (rtwdev->scanning && 2747 (ieee80211_is_beacon(hdr->frame_control) || 2748 ieee80211_is_probe_resp(hdr->frame_control))) 2749 rtw89_core_cancel_6ghz_probe_tx(rtwdev, skb); 2750 2751 rcu_read_lock(); 2752 2753 rtwvif_link = rtw89_vif_get_link_inst(rtwvif, desc_info->bb_sel); 2754 if (unlikely(!rtwvif_link)) 2755 goto out; 2756 2757 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 2758 if (!bss_conf->bssid) 2759 goto out; 2760 2761 if (ieee80211_is_trigger(hdr->frame_control)) { 2762 rtw89_stats_trigger_frame(rtwdev, rtwvif_link, bss_conf, skb); 2763 goto out; 2764 } 2765 2766 target_bssid = ieee80211_is_beacon(hdr->frame_control) && 2767 bss_conf->nontransmitted ? 2768 bss_conf->transmitter_bssid : bss_conf->bssid; 2769 if (!ether_addr_equal(target_bssid, bssid)) 2770 goto out; 2771 2772 if (is_mld) { 2773 rx_status->link_valid = true; 2774 rx_status->link_id = rtwvif_link->link_id; 2775 } 2776 2777 if (ieee80211_is_beacon(hdr->frame_control)) { 2778 if (vif->type == NL80211_IFTYPE_STATION && 2779 !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) { 2780 rtw89_vif_sync_bcn_tsf(rtwvif_link, hdr, skb->len); 2781 rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu); 2782 } 2783 2784 if (phy_ppdu) { 2785 ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg); 2786 if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags)) 2787 rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx; 2788 } 2789 2790 pkt_stat->beacon_nr++; 2791 pkt_stat->beacon_rate = desc_info->data_rate; 2792 pkt_stat->beacon_len = skb->len; 2793 2794 rtw89_vif_rx_bcn_stat(rtwdev, bss_conf, skb); 2795 } 2796 2797 if (!ether_addr_equal(bss_conf->addr, hdr->addr1)) 2798 goto out; 2799 2800 if (desc_info->data_rate < RTW89_HW_RATE_NR) 2801 pkt_stat->rx_rate_cnt[desc_info->data_rate]++; 2802 2803 rtw89_traffic_stats_accu(rtwdev, rtwvif, skb, false, false); 2804 2805 out: 2806 rcu_read_unlock(); 2807 } 2808 2809 static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev, 2810 struct rtw89_rx_phy_ppdu *phy_ppdu, 2811 struct rtw89_rx_desc_info *desc_info, 2812 struct sk_buff *skb) 2813 { 2814 struct rtw89_vif_rx_stats_iter_data iter_data; 2815 2816 rtw89_traffic_stats_accu(rtwdev, NULL, skb, true, false); 2817 2818 iter_data.rtwdev = rtwdev; 2819 iter_data.phy_ppdu = phy_ppdu; 2820 iter_data.desc_info = desc_info; 2821 iter_data.skb = skb; 2822 iter_data.bssid = get_hdr_bssid((struct ieee80211_hdr *)skb->data); 2823 rtw89_iterate_vifs_bh(rtwdev, rtw89_vif_rx_stats_iter, &iter_data); 2824 } 2825 2826 static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev, 2827 struct ieee80211_rx_status *status) 2828 { 2829 const struct rtw89_chan_rcd *rcd = 2830 rtw89_chan_rcd_get(rtwdev, RTW89_CHANCTX_0); 2831 u16 chan = rcd->prev_primary_channel; 2832 u8 band = rtw89_hw_to_nl80211_band(rcd->prev_band_type); 2833 2834 if (status->band != NL80211_BAND_2GHZ && 2835 status->encoding == RX_ENC_LEGACY && 2836 status->rate_idx < RTW89_HW_RATE_OFDM6) { 2837 status->freq = ieee80211_channel_to_frequency(chan, band); 2838 status->band = band; 2839 } 2840 } 2841 2842 static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status) 2843 { 2844 if (rx_status->band == NL80211_BAND_2GHZ || 2845 rx_status->encoding != RX_ENC_LEGACY) 2846 return; 2847 2848 /* Some control frames' freq(ACKs in this case) are reported wrong due 2849 * to FW notify timing, set to lowest rate to prevent overflow. 2850 */ 2851 if (rx_status->rate_idx < RTW89_HW_RATE_OFDM6) { 2852 rx_status->rate_idx = 0; 2853 return; 2854 } 2855 2856 /* No 4 CCK rates for non-2G */ 2857 rx_status->rate_idx -= 4; 2858 } 2859 2860 static 2861 void rtw89_core_update_rx_status_by_ppdu(struct rtw89_dev *rtwdev, 2862 struct ieee80211_rx_status *rx_status, 2863 struct rtw89_rx_phy_ppdu *phy_ppdu) 2864 { 2865 if (!(rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR)) 2866 return; 2867 2868 if (!phy_ppdu) 2869 return; 2870 2871 if (phy_ppdu->ldpc) 2872 rx_status->enc_flags |= RX_ENC_FLAG_LDPC; 2873 if (phy_ppdu->stbc) 2874 rx_status->enc_flags |= u8_encode_bits(1, RX_ENC_FLAG_STBC_MASK); 2875 } 2876 2877 static const u8 rx_status_bw_to_radiotap_eht_usig[] = { 2878 [RATE_INFO_BW_20] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ, 2879 [RATE_INFO_BW_5] = U8_MAX, 2880 [RATE_INFO_BW_10] = U8_MAX, 2881 [RATE_INFO_BW_40] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ, 2882 [RATE_INFO_BW_80] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ, 2883 [RATE_INFO_BW_160] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ, 2884 [RATE_INFO_BW_HE_RU] = U8_MAX, 2885 [RATE_INFO_BW_320] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1, 2886 [RATE_INFO_BW_EHT_RU] = U8_MAX, 2887 }; 2888 2889 static void rtw89_core_update_radiotap_eht(struct rtw89_dev *rtwdev, 2890 struct sk_buff *skb, 2891 struct ieee80211_rx_status *rx_status) 2892 { 2893 struct ieee80211_radiotap_eht_usig *usig; 2894 struct ieee80211_radiotap_eht *eht; 2895 struct ieee80211_radiotap_tlv *tlv; 2896 int eht_len = struct_size(eht, user_info, 1); 2897 int usig_len = sizeof(*usig); 2898 int len; 2899 u8 bw; 2900 2901 len = sizeof(*tlv) + ALIGN(eht_len, 4) + 2902 sizeof(*tlv) + ALIGN(usig_len, 4); 2903 2904 rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; 2905 skb_reset_mac_header(skb); 2906 2907 /* EHT */ 2908 tlv = skb_push(skb, len); 2909 memset(tlv, 0, len); 2910 tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT); 2911 tlv->len = cpu_to_le16(eht_len); 2912 2913 eht = (struct ieee80211_radiotap_eht *)tlv->data; 2914 eht->known = cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI); 2915 eht->data[0] = 2916 le32_encode_bits(rx_status->eht.gi, IEEE80211_RADIOTAP_EHT_DATA0_GI); 2917 2918 eht->user_info[0] = 2919 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | 2920 IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O | 2921 IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN); 2922 eht->user_info[0] |= 2923 le32_encode_bits(rx_status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | 2924 le32_encode_bits(rx_status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O); 2925 if (rx_status->enc_flags & RX_ENC_FLAG_LDPC) 2926 eht->user_info[0] |= 2927 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING); 2928 2929 /* U-SIG */ 2930 tlv = (void *)tlv + sizeof(*tlv) + ALIGN(eht_len, 4); 2931 tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG); 2932 tlv->len = cpu_to_le16(usig_len); 2933 2934 if (rx_status->bw >= ARRAY_SIZE(rx_status_bw_to_radiotap_eht_usig)) 2935 return; 2936 2937 bw = rx_status_bw_to_radiotap_eht_usig[rx_status->bw]; 2938 if (bw == U8_MAX) 2939 return; 2940 2941 usig = (struct ieee80211_radiotap_eht_usig *)tlv->data; 2942 usig->common = 2943 le32_encode_bits(1, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN) | 2944 le32_encode_bits(bw, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW); 2945 } 2946 2947 static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev, 2948 struct sk_buff *skb, 2949 struct ieee80211_rx_status *rx_status) 2950 { 2951 static const struct ieee80211_radiotap_he known_he = { 2952 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2953 IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN | 2954 IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | 2955 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2956 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2957 }; 2958 struct ieee80211_radiotap_he *he; 2959 2960 if (!(rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR)) 2961 return; 2962 2963 if (rx_status->encoding == RX_ENC_HE) { 2964 rx_status->flag |= RX_FLAG_RADIOTAP_HE; 2965 he = skb_push(skb, sizeof(*he)); 2966 *he = known_he; 2967 } else if (rx_status->encoding == RX_ENC_EHT) { 2968 rtw89_core_update_radiotap_eht(rtwdev, skb, rx_status); 2969 } 2970 } 2971 2972 static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status) 2973 { 2974 if (!rx_status->signal) 2975 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2976 } 2977 2978 static void rtw89_core_update_rx_freq_from_ie(struct rtw89_dev *rtwdev, 2979 struct sk_buff *skb, 2980 struct ieee80211_rx_status *rx_status) 2981 { 2982 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 2983 size_t hdr_len, ielen; 2984 u8 *variable; 2985 int chan; 2986 2987 if (!rtwdev->chip->rx_freq_frome_ie) 2988 return; 2989 2990 if (!rtwdev->scanning) 2991 return; 2992 2993 if (ieee80211_is_beacon(mgmt->frame_control)) { 2994 variable = mgmt->u.beacon.variable; 2995 hdr_len = offsetof(struct ieee80211_mgmt, 2996 u.beacon.variable); 2997 } else if (ieee80211_is_probe_resp(mgmt->frame_control)) { 2998 variable = mgmt->u.probe_resp.variable; 2999 hdr_len = offsetof(struct ieee80211_mgmt, 3000 u.probe_resp.variable); 3001 } else { 3002 return; 3003 } 3004 3005 if (skb->len > hdr_len) 3006 ielen = skb->len - hdr_len; 3007 else 3008 return; 3009 3010 /* The parsing code for both 2GHz and 5GHz bands is the same in this 3011 * function. 3012 */ 3013 chan = cfg80211_get_ies_channel_number(variable, ielen, NL80211_BAND_2GHZ); 3014 if (chan == -1) 3015 return; 3016 3017 rx_status->band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G; 3018 rx_status->freq = ieee80211_channel_to_frequency(chan, rx_status->band); 3019 } 3020 3021 static void rtw89_core_correct_mcc_chan(struct rtw89_dev *rtwdev, 3022 struct rtw89_rx_desc_info *desc_info, 3023 struct ieee80211_rx_status *rx_status, 3024 struct rtw89_rx_phy_ppdu *phy_ppdu) 3025 { 3026 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 3027 struct rtw89_vif_link *rtwvif_link; 3028 struct rtw89_sta_link *rtwsta_link; 3029 const struct rtw89_chan *chan; 3030 u8 mac_id = desc_info->mac_id; 3031 enum rtw89_entity_mode mode; 3032 enum nl80211_band band; 3033 3034 mode = rtw89_get_entity_mode(rtwdev); 3035 if (likely(mode != RTW89_ENTITY_MODE_MCC)) 3036 return; 3037 3038 if (chip_gen == RTW89_CHIP_BE && phy_ppdu) 3039 mac_id = phy_ppdu->mac_id; 3040 3041 rcu_read_lock(); 3042 3043 rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, mac_id); 3044 if (!rtwsta_link) 3045 goto out; 3046 3047 rtwvif_link = rtwsta_link->rtwvif_link; 3048 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3049 band = rtw89_hw_to_nl80211_band(chan->band_type); 3050 rx_status->freq = ieee80211_channel_to_frequency(chan->primary_channel, band); 3051 3052 out: 3053 rcu_read_unlock(); 3054 } 3055 3056 static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev, 3057 struct rtw89_rx_phy_ppdu *phy_ppdu, 3058 struct rtw89_rx_desc_info *desc_info, 3059 struct sk_buff *skb_ppdu, 3060 struct ieee80211_rx_status *rx_status) 3061 { 3062 struct napi_struct *napi = &rtwdev->napi; 3063 3064 /* In low power mode, napi isn't scheduled. Receive it to netif. */ 3065 if (unlikely(!napi_is_scheduled(napi))) 3066 napi = NULL; 3067 3068 rtw89_core_hw_to_sband_rate(rx_status); 3069 rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu); 3070 rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu); 3071 rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status); 3072 rtw89_core_validate_rx_signal(rx_status); 3073 rtw89_core_update_rx_freq_from_ie(rtwdev, skb_ppdu, rx_status); 3074 rtw89_core_correct_mcc_chan(rtwdev, desc_info, rx_status, phy_ppdu); 3075 3076 /* In low power mode, it does RX in thread context. */ 3077 local_bh_disable(); 3078 ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi); 3079 local_bh_enable(); 3080 rtwdev->napi_budget_countdown--; 3081 } 3082 3083 static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev, 3084 struct rtw89_rx_phy_ppdu *phy_ppdu, 3085 struct rtw89_rx_desc_info *desc_info, 3086 struct sk_buff *skb) 3087 { 3088 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; 3089 int curr = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band]; 3090 struct sk_buff *skb_ppdu = NULL, *tmp; 3091 struct ieee80211_rx_status *rx_status; 3092 3093 if (curr > RTW89_MAX_PPDU_CNT) 3094 return; 3095 3096 skb_queue_walk_safe(&rtwdev->ppdu_sts.rx_queue[band], skb_ppdu, tmp) { 3097 skb_unlink(skb_ppdu, &rtwdev->ppdu_sts.rx_queue[band]); 3098 rx_status = IEEE80211_SKB_RXCB(skb_ppdu); 3099 if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status)) 3100 rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status); 3101 rtw89_correct_cck_chan(rtwdev, rx_status); 3102 rtw89_core_rx_to_mac80211(rtwdev, phy_ppdu, desc_info, skb_ppdu, rx_status); 3103 } 3104 } 3105 3106 static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev, 3107 struct rtw89_rx_desc_info *desc_info, 3108 struct sk_buff *skb) 3109 { 3110 struct rtw89_rx_phy_ppdu phy_ppdu = {.buf = skb->data, .valid = false, 3111 .len = skb->len, 3112 .to_self = desc_info->addr1_match, 3113 .rate = desc_info->data_rate, 3114 .mac_id = desc_info->mac_id, 3115 .phy_idx = desc_info->bb_sel}; 3116 int ret; 3117 3118 if (desc_info->mac_info_valid) { 3119 ret = rtw89_core_rx_process_mac_ppdu(rtwdev, skb, &phy_ppdu); 3120 if (ret) 3121 goto out; 3122 } 3123 3124 ret = rtw89_core_rx_process_phy_ppdu(rtwdev, &phy_ppdu); 3125 if (ret) 3126 goto out; 3127 3128 rtw89_core_rx_process_phy_sts(rtwdev, &phy_ppdu); 3129 3130 out: 3131 rtw89_core_rx_pending_skb(rtwdev, &phy_ppdu, desc_info, skb); 3132 dev_kfree_skb_any(skb); 3133 } 3134 3135 static void rtw89_core_rx_process_report(struct rtw89_dev *rtwdev, 3136 struct rtw89_rx_desc_info *desc_info, 3137 struct sk_buff *skb) 3138 { 3139 switch (desc_info->pkt_type) { 3140 case RTW89_CORE_RX_TYPE_C2H: 3141 rtw89_fw_c2h_irqsafe(rtwdev, skb); 3142 break; 3143 case RTW89_CORE_RX_TYPE_PPDU_STAT: 3144 rtw89_core_rx_process_ppdu_sts(rtwdev, desc_info, skb); 3145 break; 3146 default: 3147 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "unhandled pkt_type=%d\n", 3148 desc_info->pkt_type); 3149 dev_kfree_skb_any(skb); 3150 break; 3151 } 3152 } 3153 3154 void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, 3155 struct rtw89_rx_desc_info *desc_info, 3156 u8 *data, u32 data_offset) 3157 { 3158 const struct rtw89_chip_info *chip = rtwdev->chip; 3159 struct rtw89_rxdesc_short *rxd_s; 3160 struct rtw89_rxdesc_long *rxd_l; 3161 u8 shift_len, drv_info_len; 3162 3163 rxd_s = (struct rtw89_rxdesc_short *)(data + data_offset); 3164 desc_info->pkt_size = le32_get_bits(rxd_s->dword0, AX_RXD_RPKT_LEN_MASK); 3165 desc_info->drv_info_size = le32_get_bits(rxd_s->dword0, AX_RXD_DRV_INFO_SIZE_MASK); 3166 desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, AX_RXD_LONG_RXD); 3167 desc_info->pkt_type = le32_get_bits(rxd_s->dword0, AX_RXD_RPKT_TYPE_MASK); 3168 desc_info->mac_info_valid = le32_get_bits(rxd_s->dword0, AX_RXD_MAC_INFO_VLD); 3169 if (chip->chip_id == RTL8852C) 3170 desc_info->bw = le32_get_bits(rxd_s->dword1, AX_RXD_BW_v1_MASK); 3171 else 3172 desc_info->bw = le32_get_bits(rxd_s->dword1, AX_RXD_BW_MASK); 3173 desc_info->data_rate = le32_get_bits(rxd_s->dword1, AX_RXD_RX_DATARATE_MASK); 3174 desc_info->gi_ltf = le32_get_bits(rxd_s->dword1, AX_RXD_RX_GI_LTF_MASK); 3175 desc_info->user_id = le32_get_bits(rxd_s->dword1, AX_RXD_USER_ID_MASK); 3176 desc_info->sr_en = le32_get_bits(rxd_s->dword1, AX_RXD_SR_EN); 3177 desc_info->ppdu_cnt = le32_get_bits(rxd_s->dword1, AX_RXD_PPDU_CNT_MASK); 3178 desc_info->ppdu_type = le32_get_bits(rxd_s->dword1, AX_RXD_PPDU_TYPE_MASK); 3179 desc_info->free_run_cnt = le32_get_bits(rxd_s->dword2, AX_RXD_FREERUN_CNT_MASK); 3180 desc_info->icv_err = le32_get_bits(rxd_s->dword3, AX_RXD_ICV_ERR); 3181 desc_info->crc32_err = le32_get_bits(rxd_s->dword3, AX_RXD_CRC32_ERR); 3182 desc_info->hw_dec = le32_get_bits(rxd_s->dword3, AX_RXD_HW_DEC); 3183 desc_info->sw_dec = le32_get_bits(rxd_s->dword3, AX_RXD_SW_DEC); 3184 desc_info->addr1_match = le32_get_bits(rxd_s->dword3, AX_RXD_A1_MATCH); 3185 3186 shift_len = desc_info->shift << 1; /* 2-byte unit */ 3187 drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */ 3188 desc_info->offset = data_offset + shift_len + drv_info_len; 3189 if (desc_info->long_rxdesc) 3190 desc_info->rxd_len = sizeof(struct rtw89_rxdesc_long); 3191 else 3192 desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short); 3193 desc_info->ready = true; 3194 3195 if (!desc_info->long_rxdesc) 3196 return; 3197 3198 rxd_l = (struct rtw89_rxdesc_long *)(data + data_offset); 3199 desc_info->frame_type = le32_get_bits(rxd_l->dword4, AX_RXD_TYPE_MASK); 3200 desc_info->addr_cam_valid = le32_get_bits(rxd_l->dword5, AX_RXD_ADDR_CAM_VLD); 3201 desc_info->addr_cam_id = le32_get_bits(rxd_l->dword5, AX_RXD_ADDR_CAM_MASK); 3202 desc_info->sec_cam_id = le32_get_bits(rxd_l->dword5, AX_RXD_SEC_CAM_IDX_MASK); 3203 desc_info->mac_id = le32_get_bits(rxd_l->dword5, AX_RXD_MAC_ID_MASK); 3204 desc_info->rx_pl_id = le32_get_bits(rxd_l->dword5, AX_RXD_RX_PL_ID_MASK); 3205 } 3206 EXPORT_SYMBOL(rtw89_core_query_rxdesc); 3207 3208 void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev, 3209 struct rtw89_rx_desc_info *desc_info, 3210 u8 *data, u32 data_offset) 3211 { 3212 struct rtw89_rxdesc_phy_rpt_v2 *rxd_rpt; 3213 struct rtw89_rxdesc_short_v2 *rxd_s; 3214 struct rtw89_rxdesc_long_v2 *rxd_l; 3215 u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len; 3216 3217 rxd_s = (struct rtw89_rxdesc_short_v2 *)(data + data_offset); 3218 3219 desc_info->pkt_size = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_LEN_MASK); 3220 desc_info->drv_info_size = le32_get_bits(rxd_s->dword0, BE_RXD_DRV_INFO_SZ_MASK); 3221 desc_info->phy_rpt_size = le32_get_bits(rxd_s->dword0, BE_RXD_PHY_RPT_SZ_MASK); 3222 desc_info->hdr_cnv_size = le32_get_bits(rxd_s->dword0, BE_RXD_HDR_CNV_SZ_MASK); 3223 desc_info->shift = le32_get_bits(rxd_s->dword0, BE_RXD_SHIFT_MASK); 3224 desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, BE_RXD_LONG_RXD); 3225 desc_info->pkt_type = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_TYPE_MASK); 3226 desc_info->bb_sel = le32_get_bits(rxd_s->dword0, BE_RXD_BB_SEL); 3227 if (desc_info->pkt_type == RTW89_CORE_RX_TYPE_PPDU_STAT) 3228 desc_info->mac_info_valid = true; 3229 3230 desc_info->frame_type = le32_get_bits(rxd_s->dword2, BE_RXD_TYPE_MASK); 3231 desc_info->mac_id = le32_get_bits(rxd_s->dword2, BE_RXD_MAC_ID_MASK); 3232 desc_info->addr_cam_valid = le32_get_bits(rxd_s->dword2, BE_RXD_ADDR_CAM_VLD); 3233 3234 desc_info->icv_err = le32_get_bits(rxd_s->dword3, BE_RXD_ICV_ERR); 3235 desc_info->crc32_err = le32_get_bits(rxd_s->dword3, BE_RXD_CRC32_ERR); 3236 desc_info->hw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_HW_DEC); 3237 desc_info->sw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_SW_DEC); 3238 desc_info->addr1_match = le32_get_bits(rxd_s->dword3, BE_RXD_A1_MATCH); 3239 3240 desc_info->bw = le32_get_bits(rxd_s->dword4, BE_RXD_BW_MASK); 3241 desc_info->data_rate = le32_get_bits(rxd_s->dword4, BE_RXD_RX_DATARATE_MASK); 3242 desc_info->gi_ltf = le32_get_bits(rxd_s->dword4, BE_RXD_RX_GI_LTF_MASK); 3243 desc_info->ppdu_cnt = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_CNT_MASK); 3244 desc_info->ppdu_type = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_TYPE_MASK); 3245 3246 desc_info->free_run_cnt = le32_to_cpu(rxd_s->dword5); 3247 3248 shift_len = desc_info->shift << 1; /* 2-byte unit */ 3249 drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */ 3250 phy_rtp_len = desc_info->phy_rpt_size << 3; /* 8-byte unit */ 3251 hdr_cnv_len = desc_info->hdr_cnv_size << 4; /* 16-byte unit */ 3252 desc_info->offset = data_offset + shift_len + drv_info_len + 3253 phy_rtp_len + hdr_cnv_len; 3254 3255 if (desc_info->long_rxdesc) 3256 desc_info->rxd_len = sizeof(struct rtw89_rxdesc_long_v2); 3257 else 3258 desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2); 3259 desc_info->ready = true; 3260 3261 if (phy_rtp_len == sizeof(*rxd_rpt)) { 3262 rxd_rpt = (struct rtw89_rxdesc_phy_rpt_v2 *)(data + data_offset + 3263 desc_info->rxd_len); 3264 desc_info->rssi = le32_get_bits(rxd_rpt->dword0, BE_RXD_PHY_RSSI); 3265 } 3266 3267 if (!desc_info->long_rxdesc) 3268 return; 3269 3270 rxd_l = (struct rtw89_rxdesc_long_v2 *)(data + data_offset); 3271 3272 desc_info->sr_en = le32_get_bits(rxd_l->dword6, BE_RXD_SR_EN); 3273 desc_info->user_id = le32_get_bits(rxd_l->dword6, BE_RXD_USER_ID_MASK); 3274 desc_info->addr_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_ADDR_CAM_MASK); 3275 desc_info->sec_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_SEC_CAM_IDX_MASK); 3276 3277 desc_info->rx_pl_id = le32_get_bits(rxd_l->dword7, BE_RXD_RX_PL_ID_MASK); 3278 } 3279 EXPORT_SYMBOL(rtw89_core_query_rxdesc_v2); 3280 3281 struct rtw89_core_iter_rx_status { 3282 struct rtw89_dev *rtwdev; 3283 struct ieee80211_rx_status *rx_status; 3284 struct rtw89_rx_desc_info *desc_info; 3285 u8 mac_id; 3286 }; 3287 3288 static 3289 void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta) 3290 { 3291 struct rtw89_core_iter_rx_status *iter_data = 3292 (struct rtw89_core_iter_rx_status *)data; 3293 struct ieee80211_rx_status *rx_status = iter_data->rx_status; 3294 struct rtw89_rx_desc_info *desc_info = iter_data->desc_info; 3295 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 3296 struct rtw89_sta_link *rtwsta_link; 3297 u8 mac_id = iter_data->mac_id; 3298 3299 rtwsta_link = rtw89_sta_get_link_inst(rtwsta, desc_info->bb_sel); 3300 if (unlikely(!rtwsta_link)) 3301 return; 3302 3303 if (mac_id != rtwsta_link->mac_id) 3304 return; 3305 3306 rtwsta_link->rx_status = *rx_status; 3307 rtwsta_link->rx_hw_rate = desc_info->data_rate; 3308 } 3309 3310 static void rtw89_core_stats_sta_rx_status(struct rtw89_dev *rtwdev, 3311 struct rtw89_rx_desc_info *desc_info, 3312 struct ieee80211_rx_status *rx_status) 3313 { 3314 struct rtw89_core_iter_rx_status iter_data; 3315 3316 if (!desc_info->addr1_match || !desc_info->long_rxdesc) 3317 return; 3318 3319 if (desc_info->frame_type != RTW89_RX_TYPE_DATA) 3320 return; 3321 3322 iter_data.rtwdev = rtwdev; 3323 iter_data.rx_status = rx_status; 3324 iter_data.desc_info = desc_info; 3325 iter_data.mac_id = desc_info->mac_id; 3326 ieee80211_iterate_stations_atomic(rtwdev->hw, 3327 rtw89_core_stats_sta_rx_status_iter, 3328 &iter_data); 3329 } 3330 3331 static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, 3332 struct sk_buff *skb, 3333 struct rtw89_rx_desc_info *desc_info, 3334 struct ieee80211_rx_status *rx_status) 3335 { 3336 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3337 const struct cfg80211_chan_def *chandef = 3338 rtw89_chandef_get(rtwdev, RTW89_CHANCTX_0); 3339 u16 data_rate; 3340 u8 data_rate_mode; 3341 bool eht = false; 3342 u8 gi; 3343 3344 /* currently using single PHY */ 3345 rx_status->freq = chandef->chan->center_freq; 3346 rx_status->band = chandef->chan->band; 3347 3348 if (ieee80211_is_beacon(hdr->frame_control) || 3349 ieee80211_is_probe_resp(hdr->frame_control)) 3350 rx_status->boottime_ns = ktime_get_boottime_ns(); 3351 3352 if (rtwdev->scanning && 3353 RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { 3354 const struct rtw89_chan *cur = rtw89_scan_chan_get(rtwdev); 3355 u8 chan = cur->primary_channel; 3356 u8 band = cur->band_type; 3357 enum nl80211_band nl_band; 3358 3359 nl_band = rtw89_hw_to_nl80211_band(band); 3360 rx_status->freq = ieee80211_channel_to_frequency(chan, nl_band); 3361 rx_status->band = nl_band; 3362 } 3363 3364 if (desc_info->icv_err || desc_info->crc32_err) 3365 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 3366 3367 if (desc_info->hw_dec && 3368 !(desc_info->sw_dec || desc_info->icv_err)) 3369 rx_status->flag |= RX_FLAG_DECRYPTED; 3370 3371 rx_status->bw = rtw89_hw_to_rate_info_bw(desc_info->bw); 3372 3373 data_rate = desc_info->data_rate; 3374 data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate); 3375 if (data_rate_mode == DATA_RATE_MODE_NON_HT) { 3376 rx_status->encoding = RX_ENC_LEGACY; 3377 rx_status->rate_idx = rtw89_get_data_not_ht_idx(rtwdev, data_rate); 3378 /* convert rate_idx after we get the correct band */ 3379 } else if (data_rate_mode == DATA_RATE_MODE_HT) { 3380 rx_status->encoding = RX_ENC_HT; 3381 rx_status->rate_idx = rtw89_get_data_ht_mcs(rtwdev, data_rate); 3382 if (desc_info->gi_ltf) 3383 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 3384 } else if (data_rate_mode == DATA_RATE_MODE_VHT) { 3385 rx_status->encoding = RX_ENC_VHT; 3386 rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); 3387 rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1; 3388 if (desc_info->gi_ltf) 3389 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 3390 } else if (data_rate_mode == DATA_RATE_MODE_HE) { 3391 rx_status->encoding = RX_ENC_HE; 3392 rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); 3393 rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1; 3394 } else if (data_rate_mode == DATA_RATE_MODE_EHT) { 3395 rx_status->encoding = RX_ENC_EHT; 3396 rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); 3397 rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1; 3398 eht = true; 3399 } else { 3400 rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); 3401 } 3402 3403 /* he_gi is used to match ppdu, so we always fill it. */ 3404 gi = rtw89_rxdesc_to_nl_he_eht_gi(rtwdev, desc_info->gi_ltf, true, eht); 3405 if (eht) 3406 rx_status->eht.gi = gi; 3407 else 3408 rx_status->he_gi = gi; 3409 rx_status->flag |= RX_FLAG_MACTIME_START; 3410 rx_status->mactime = desc_info->free_run_cnt; 3411 3412 rtw89_chip_phy_rpt_to_rssi(rtwdev, desc_info, rx_status); 3413 rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status); 3414 } 3415 3416 static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev) 3417 { 3418 const struct rtw89_chip_info *chip = rtwdev->chip; 3419 3420 if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE) 3421 return RTW89_PS_MODE_NONE; 3422 3423 if (rtw89_disable_ps_mode || !chip->ps_mode_supported || 3424 RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw)) 3425 return RTW89_PS_MODE_NONE; 3426 3427 if ((chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED)) && 3428 !RTW89_CHK_FW_FEATURE(NO_LPS_PG, &rtwdev->fw)) 3429 return RTW89_PS_MODE_PWR_GATED; 3430 3431 if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_CLK_GATED)) 3432 return RTW89_PS_MODE_CLK_GATED; 3433 3434 if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_RFOFF)) 3435 return RTW89_PS_MODE_RFOFF; 3436 3437 return RTW89_PS_MODE_NONE; 3438 } 3439 3440 static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev, 3441 struct rtw89_rx_desc_info *desc_info) 3442 { 3443 struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts; 3444 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; 3445 struct ieee80211_rx_status *rx_status; 3446 struct sk_buff *skb_ppdu, *tmp; 3447 3448 skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) { 3449 skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]); 3450 rx_status = IEEE80211_SKB_RXCB(skb_ppdu); 3451 rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb_ppdu, rx_status); 3452 } 3453 } 3454 3455 static 3456 void rtw89_core_rx_pkt_hdl(struct rtw89_dev *rtwdev, const struct sk_buff *skb, 3457 const struct rtw89_rx_desc_info *desc) 3458 { 3459 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3460 struct rtw89_sta_link *rtwsta_link; 3461 struct ieee80211_sta *sta; 3462 struct rtw89_sta *rtwsta; 3463 u8 macid = desc->mac_id; 3464 3465 if (!refcount_read(&rtwdev->refcount_ap_info)) 3466 return; 3467 3468 rcu_read_lock(); 3469 3470 rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid); 3471 if (!rtwsta_link) 3472 goto out; 3473 3474 rtwsta = rtwsta_link->rtwsta; 3475 if (!test_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags)) 3476 goto out; 3477 3478 sta = rtwsta_to_sta(rtwsta); 3479 if (ieee80211_is_pspoll(hdr->frame_control)) 3480 ieee80211_sta_pspoll(sta); 3481 else if (ieee80211_has_pm(hdr->frame_control) && 3482 (ieee80211_is_data_qos(hdr->frame_control) || 3483 ieee80211_is_qos_nullfunc(hdr->frame_control))) 3484 ieee80211_sta_uapsd_trigger(sta, ieee80211_get_tid(hdr)); 3485 3486 out: 3487 rcu_read_unlock(); 3488 } 3489 3490 void rtw89_core_rx(struct rtw89_dev *rtwdev, 3491 struct rtw89_rx_desc_info *desc_info, 3492 struct sk_buff *skb) 3493 { 3494 struct ieee80211_rx_status *rx_status; 3495 struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts; 3496 u8 ppdu_cnt = desc_info->ppdu_cnt; 3497 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; 3498 3499 if (desc_info->pkt_type != RTW89_CORE_RX_TYPE_WIFI) { 3500 rtw89_core_rx_process_report(rtwdev, desc_info, skb); 3501 return; 3502 } 3503 3504 if (ppdu_sts->curr_rx_ppdu_cnt[band] != ppdu_cnt) { 3505 rtw89_core_flush_ppdu_rx_queue(rtwdev, desc_info); 3506 ppdu_sts->curr_rx_ppdu_cnt[band] = ppdu_cnt; 3507 } 3508 3509 rx_status = IEEE80211_SKB_RXCB(skb); 3510 memset(rx_status, 0, sizeof(*rx_status)); 3511 rtw89_core_update_rx_status(rtwdev, skb, desc_info, rx_status); 3512 rtw89_core_rx_pkt_hdl(rtwdev, skb, desc_info); 3513 if (desc_info->long_rxdesc && 3514 BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) 3515 skb_queue_tail(&ppdu_sts->rx_queue[band], skb); 3516 else 3517 rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb, rx_status); 3518 } 3519 EXPORT_SYMBOL(rtw89_core_rx); 3520 3521 void rtw89_core_napi_start(struct rtw89_dev *rtwdev) 3522 { 3523 if (test_and_set_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 3524 return; 3525 3526 napi_enable(&rtwdev->napi); 3527 } 3528 EXPORT_SYMBOL(rtw89_core_napi_start); 3529 3530 void rtw89_core_napi_stop(struct rtw89_dev *rtwdev) 3531 { 3532 if (!test_and_clear_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 3533 return; 3534 3535 napi_synchronize(&rtwdev->napi); 3536 napi_disable(&rtwdev->napi); 3537 } 3538 EXPORT_SYMBOL(rtw89_core_napi_stop); 3539 3540 int rtw89_core_napi_init(struct rtw89_dev *rtwdev) 3541 { 3542 rtwdev->netdev = alloc_netdev_dummy(0); 3543 if (!rtwdev->netdev) 3544 return -ENOMEM; 3545 3546 netif_napi_add(rtwdev->netdev, &rtwdev->napi, 3547 rtwdev->hci.ops->napi_poll); 3548 return 0; 3549 } 3550 EXPORT_SYMBOL(rtw89_core_napi_init); 3551 3552 void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev) 3553 { 3554 rtw89_core_napi_stop(rtwdev); 3555 netif_napi_del(&rtwdev->napi); 3556 free_netdev(rtwdev->netdev); 3557 } 3558 EXPORT_SYMBOL(rtw89_core_napi_deinit); 3559 3560 static void rtw89_core_ba_work(struct work_struct *work) 3561 { 3562 struct rtw89_dev *rtwdev = 3563 container_of(work, struct rtw89_dev, ba_work); 3564 struct rtw89_txq *rtwtxq, *tmp; 3565 int ret; 3566 3567 spin_lock_bh(&rtwdev->ba_lock); 3568 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) { 3569 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 3570 struct ieee80211_sta *sta = txq->sta; 3571 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 3572 u8 tid = txq->tid; 3573 3574 if (!sta) { 3575 rtw89_warn(rtwdev, "cannot start BA without sta\n"); 3576 goto skip_ba_work; 3577 } 3578 3579 if (rtwsta->disassoc) { 3580 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 3581 "cannot start BA with disassoc sta\n"); 3582 goto skip_ba_work; 3583 } 3584 3585 ret = ieee80211_start_tx_ba_session(sta, tid, 0); 3586 if (ret) { 3587 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 3588 "failed to setup BA session for %pM:%2d: %d\n", 3589 sta->addr, tid, ret); 3590 if (ret == -EINVAL) 3591 set_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags); 3592 } 3593 skip_ba_work: 3594 list_del_init(&rtwtxq->list); 3595 } 3596 spin_unlock_bh(&rtwdev->ba_lock); 3597 } 3598 3599 void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev, 3600 struct ieee80211_sta *sta) 3601 { 3602 struct rtw89_txq *rtwtxq, *tmp; 3603 3604 spin_lock_bh(&rtwdev->ba_lock); 3605 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) { 3606 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 3607 3608 if (sta == txq->sta) 3609 list_del_init(&rtwtxq->list); 3610 } 3611 spin_unlock_bh(&rtwdev->ba_lock); 3612 } 3613 3614 void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev, 3615 struct ieee80211_sta *sta) 3616 { 3617 struct rtw89_txq *rtwtxq, *tmp; 3618 3619 spin_lock_bh(&rtwdev->ba_lock); 3620 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) { 3621 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 3622 3623 if (sta == txq->sta) { 3624 clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags); 3625 list_del_init(&rtwtxq->list); 3626 } 3627 } 3628 spin_unlock_bh(&rtwdev->ba_lock); 3629 } 3630 3631 void rtw89_core_free_sta_pending_roc_tx(struct rtw89_dev *rtwdev, 3632 struct ieee80211_sta *sta) 3633 { 3634 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 3635 struct sk_buff *skb, *tmp; 3636 3637 skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) { 3638 skb_unlink(skb, &rtwsta->roc_queue); 3639 dev_kfree_skb_any(skb); 3640 } 3641 } 3642 3643 static void rtw89_core_stop_tx_ba_session(struct rtw89_dev *rtwdev, 3644 struct rtw89_txq *rtwtxq) 3645 { 3646 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 3647 struct ieee80211_sta *sta = txq->sta; 3648 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 3649 3650 if (unlikely(!rtwsta) || unlikely(rtwsta->disassoc)) 3651 return; 3652 3653 if (!test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags) || 3654 test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags)) 3655 return; 3656 3657 spin_lock_bh(&rtwdev->ba_lock); 3658 if (!test_and_set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags)) 3659 list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list); 3660 spin_unlock_bh(&rtwdev->ba_lock); 3661 3662 ieee80211_stop_tx_ba_session(sta, txq->tid); 3663 cancel_delayed_work(&rtwdev->forbid_ba_work); 3664 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->forbid_ba_work, 3665 RTW89_FORBID_BA_TIMER); 3666 } 3667 3668 static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev, 3669 struct rtw89_txq *rtwtxq, 3670 struct sk_buff *skb) 3671 { 3672 struct ieee80211_hw *hw = rtwdev->hw; 3673 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 3674 struct ieee80211_sta *sta = txq->sta; 3675 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 3676 3677 if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags)) 3678 return; 3679 3680 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) { 3681 rtw89_core_stop_tx_ba_session(rtwdev, rtwtxq); 3682 return; 3683 } 3684 3685 if (unlikely(!sta)) 3686 return; 3687 3688 if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags))) 3689 return; 3690 3691 if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags)) { 3692 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_AMPDU; 3693 return; 3694 } 3695 3696 spin_lock_bh(&rtwdev->ba_lock); 3697 if (!rtwsta->disassoc && list_empty(&rtwtxq->list)) { 3698 list_add_tail(&rtwtxq->list, &rtwdev->ba_list); 3699 ieee80211_queue_work(hw, &rtwdev->ba_work); 3700 } 3701 spin_unlock_bh(&rtwdev->ba_lock); 3702 } 3703 3704 static void rtw89_core_txq_push(struct rtw89_dev *rtwdev, 3705 struct rtw89_txq *rtwtxq, 3706 unsigned long frame_cnt, 3707 unsigned long byte_cnt) 3708 { 3709 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 3710 struct ieee80211_vif *vif = txq->vif; 3711 struct ieee80211_sta *sta = txq->sta; 3712 struct sk_buff *skb; 3713 unsigned long i; 3714 int ret; 3715 3716 rcu_read_lock(); 3717 for (i = 0; i < frame_cnt; i++) { 3718 skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq); 3719 if (!skb) { 3720 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n"); 3721 goto out; 3722 } 3723 rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb); 3724 ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL); 3725 if (ret) { 3726 rtw89_err(rtwdev, "failed to push txq: %d\n", ret); 3727 ieee80211_free_txskb(rtwdev->hw, skb); 3728 break; 3729 } 3730 } 3731 out: 3732 rcu_read_unlock(); 3733 } 3734 3735 static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid) 3736 { 3737 u8 qsel, ch_dma; 3738 3739 qsel = rtw89_core_get_qsel(rtwdev, tid); 3740 ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); 3741 3742 return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma); 3743 } 3744 3745 static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev, 3746 struct ieee80211_txq *txq, 3747 unsigned long *frame_cnt, 3748 bool *sched_txq, bool *reinvoke) 3749 { 3750 struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv; 3751 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(txq->sta); 3752 struct rtw89_sta_link *rtwsta_link; 3753 3754 if (!rtwsta) 3755 return false; 3756 3757 rtwsta_link = rtw89_get_designated_link(rtwsta); 3758 if (unlikely(!rtwsta_link)) { 3759 rtw89_err(rtwdev, "agg wait: find no designated link\n"); 3760 return false; 3761 } 3762 3763 if (rtwsta_link->max_agg_wait <= 0) 3764 return false; 3765 3766 if (rtwdev->stats.tx_tfc_lv <= RTW89_TFC_MID) 3767 return false; 3768 3769 if (*frame_cnt > 1) { 3770 *frame_cnt -= 1; 3771 *sched_txq = true; 3772 *reinvoke = true; 3773 rtwtxq->wait_cnt = 1; 3774 return false; 3775 } 3776 3777 if (*frame_cnt == 1 && rtwtxq->wait_cnt < rtwsta_link->max_agg_wait) { 3778 *reinvoke = true; 3779 rtwtxq->wait_cnt++; 3780 return true; 3781 } 3782 3783 rtwtxq->wait_cnt = 0; 3784 return false; 3785 } 3786 3787 static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinvoke) 3788 { 3789 struct ieee80211_hw *hw = rtwdev->hw; 3790 struct ieee80211_txq *txq; 3791 struct rtw89_vif *rtwvif; 3792 struct rtw89_txq *rtwtxq; 3793 unsigned long frame_cnt; 3794 unsigned long byte_cnt; 3795 u32 tx_resource; 3796 bool sched_txq; 3797 3798 ieee80211_txq_schedule_start(hw, ac); 3799 while ((txq = ieee80211_next_txq(hw, ac))) { 3800 rtwtxq = (struct rtw89_txq *)txq->drv_priv; 3801 rtwvif = vif_to_rtwvif(txq->vif); 3802 3803 if (rtwvif->offchan) { 3804 ieee80211_return_txq(hw, txq, true); 3805 continue; 3806 } 3807 tx_resource = rtw89_check_and_reclaim_tx_resource(rtwdev, txq->tid); 3808 sched_txq = false; 3809 3810 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); 3811 if (rtw89_core_txq_agg_wait(rtwdev, txq, &frame_cnt, &sched_txq, reinvoke)) { 3812 ieee80211_return_txq(hw, txq, true); 3813 continue; 3814 } 3815 frame_cnt = min_t(unsigned long, frame_cnt, tx_resource); 3816 rtw89_core_txq_push(rtwdev, rtwtxq, frame_cnt, byte_cnt); 3817 ieee80211_return_txq(hw, txq, sched_txq); 3818 if (frame_cnt != 0) 3819 rtw89_core_tx_kick_off(rtwdev, rtw89_core_get_qsel(rtwdev, txq->tid)); 3820 3821 /* bound of tx_resource could get stuck due to burst traffic */ 3822 if (frame_cnt == tx_resource) 3823 *reinvoke = true; 3824 } 3825 ieee80211_txq_schedule_end(hw, ac); 3826 } 3827 3828 static void rtw89_ips_work(struct wiphy *wiphy, struct wiphy_work *work) 3829 { 3830 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 3831 ips_work); 3832 3833 lockdep_assert_wiphy(wiphy); 3834 3835 rtw89_enter_ips_by_hwflags(rtwdev); 3836 } 3837 3838 static void rtw89_core_txq_work(struct work_struct *w) 3839 { 3840 struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work); 3841 bool reinvoke = false; 3842 u8 ac; 3843 3844 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 3845 rtw89_core_txq_schedule(rtwdev, ac, &reinvoke); 3846 3847 if (reinvoke) { 3848 /* reinvoke to process the last frame */ 3849 mod_delayed_work(rtwdev->txq_wq, &rtwdev->txq_reinvoke_work, 1); 3850 } 3851 } 3852 3853 static void rtw89_core_txq_reinvoke_work(struct work_struct *w) 3854 { 3855 struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, 3856 txq_reinvoke_work.work); 3857 3858 queue_work(rtwdev->txq_wq, &rtwdev->txq_work); 3859 } 3860 3861 static void rtw89_forbid_ba_work(struct work_struct *w) 3862 { 3863 struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, 3864 forbid_ba_work.work); 3865 struct rtw89_txq *rtwtxq, *tmp; 3866 3867 spin_lock_bh(&rtwdev->ba_lock); 3868 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) { 3869 clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags); 3870 list_del_init(&rtwtxq->list); 3871 } 3872 spin_unlock_bh(&rtwdev->ba_lock); 3873 } 3874 3875 static void rtw89_core_sta_pending_tx_iter(void *data, 3876 struct ieee80211_sta *sta) 3877 { 3878 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 3879 struct rtw89_dev *rtwdev = rtwsta->rtwdev; 3880 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 3881 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3882 struct rtw89_vif_link *target = data; 3883 struct rtw89_vif_link *rtwvif_link; 3884 struct sk_buff *skb, *tmp; 3885 unsigned int link_id; 3886 int qsel, ret; 3887 3888 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 3889 if (rtwvif_link->chanctx_idx == target->chanctx_idx) 3890 goto bottom; 3891 3892 return; 3893 3894 bottom: 3895 if (skb_queue_len(&rtwsta->roc_queue) == 0) 3896 return; 3897 3898 skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) { 3899 skb_unlink(skb, &rtwsta->roc_queue); 3900 3901 ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel); 3902 if (ret) { 3903 rtw89_warn(rtwdev, "pending tx failed with %d\n", ret); 3904 dev_kfree_skb_any(skb); 3905 } else { 3906 rtw89_core_tx_kick_off(rtwdev, qsel); 3907 } 3908 } 3909 } 3910 3911 static void rtw89_core_handle_sta_pending_tx(struct rtw89_dev *rtwdev, 3912 struct rtw89_vif_link *rtwvif_link) 3913 { 3914 ieee80211_iterate_stations_atomic(rtwdev->hw, 3915 rtw89_core_sta_pending_tx_iter, 3916 rtwvif_link); 3917 } 3918 3919 int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3920 bool qos, bool ps, int timeout) 3921 { 3922 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3923 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 3924 struct rtw89_sta_link *rtwsta_link; 3925 struct rtw89_tx_wait_info *wait; 3926 struct ieee80211_sta *sta; 3927 struct ieee80211_hdr *hdr; 3928 struct rtw89_sta *rtwsta; 3929 struct sk_buff *skb; 3930 int ret, qsel; 3931 3932 if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) 3933 return 0; 3934 3935 wait = kzalloc(sizeof(*wait), GFP_KERNEL); 3936 if (!wait) 3937 return -ENOMEM; 3938 3939 init_completion(&wait->completion); 3940 3941 rcu_read_lock(); 3942 sta = ieee80211_find_sta(vif, vif->cfg.ap_addr); 3943 if (!sta) { 3944 ret = -EINVAL; 3945 goto out; 3946 } 3947 rtwsta = sta_to_rtwsta(sta); 3948 3949 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos); 3950 if (!skb) { 3951 ret = -ENOMEM; 3952 goto out; 3953 } 3954 3955 wait->skb = skb; 3956 3957 hdr = (struct ieee80211_hdr *)skb->data; 3958 if (ps) 3959 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 3960 3961 rtwsta_link = rtwsta->links[rtwvif_link->link_id]; 3962 if (unlikely(!rtwsta_link)) { 3963 ret = -ENOLINK; 3964 dev_kfree_skb_any(skb); 3965 goto out; 3966 } 3967 3968 ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true, 3969 wait); 3970 if (ret) { 3971 rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret); 3972 dev_kfree_skb_any(skb); 3973 goto out; 3974 } 3975 3976 rcu_read_unlock(); 3977 3978 return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel, 3979 timeout); 3980 out: 3981 rcu_read_unlock(); 3982 kfree(wait); 3983 3984 return ret; 3985 } 3986 3987 void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3988 { 3989 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3990 struct rtw89_chanctx_pause_parm pause_parm = { 3991 .rsn = RTW89_CHANCTX_PAUSE_REASON_ROC, 3992 }; 3993 struct ieee80211_hw *hw = rtwdev->hw; 3994 struct rtw89_roc *roc = &rtwvif->roc; 3995 struct rtw89_vif_link *rtwvif_link; 3996 struct cfg80211_chan_def roc_chan; 3997 struct rtw89_vif *tmp_vif; 3998 u32 reg; 3999 int ret; 4000 4001 lockdep_assert_wiphy(hw->wiphy); 4002 4003 rtw89_leave_ips_by_hwflags(rtwdev); 4004 rtw89_leave_lps(rtwdev); 4005 4006 rtwvif_link = rtw89_get_designated_link(rtwvif); 4007 if (unlikely(!rtwvif_link)) { 4008 rtw89_err(rtwdev, "roc start: find no designated link\n"); 4009 return; 4010 } 4011 4012 roc->link_id = rtwvif_link->link_id; 4013 4014 pause_parm.trigger = rtwvif_link; 4015 rtw89_chanctx_pause(rtwdev, &pause_parm); 4016 4017 ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, true, 4018 RTW89_ROC_TX_TIMEOUT); 4019 if (ret) 4020 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 4021 "roc send null-1 failed: %d\n", ret); 4022 4023 rtw89_for_each_rtwvif(rtwdev, tmp_vif) { 4024 struct rtw89_vif_link *tmp_link; 4025 unsigned int link_id; 4026 4027 rtw89_vif_for_each_link(tmp_vif, tmp_link, link_id) { 4028 if (tmp_link->chanctx_idx == rtwvif_link->chanctx_idx) { 4029 tmp_vif->offchan = true; 4030 break; 4031 } 4032 } 4033 } 4034 4035 cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT); 4036 rtw89_config_roc_chandef(rtwdev, rtwvif_link, &roc_chan); 4037 rtw89_set_channel(rtwdev); 4038 4039 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 4040 rtw89_write32_clr(rtwdev, reg, B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH); 4041 4042 ieee80211_ready_on_channel(hw); 4043 wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work); 4044 wiphy_delayed_work_queue(hw->wiphy, &rtwvif->roc.roc_work, 4045 msecs_to_jiffies(rtwvif->roc.duration)); 4046 } 4047 4048 void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 4049 { 4050 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4051 struct ieee80211_hw *hw = rtwdev->hw; 4052 struct rtw89_roc *roc = &rtwvif->roc; 4053 struct rtw89_vif_link *rtwvif_link; 4054 struct rtw89_vif *tmp_vif; 4055 u32 reg; 4056 int ret; 4057 4058 lockdep_assert_wiphy(hw->wiphy); 4059 4060 ieee80211_remain_on_channel_expired(hw); 4061 4062 rtw89_leave_ips_by_hwflags(rtwdev); 4063 rtw89_leave_lps(rtwdev); 4064 4065 rtwvif_link = rtwvif->links[roc->link_id]; 4066 if (unlikely(!rtwvif_link)) { 4067 rtw89_err(rtwdev, "roc end: find no link (link id %u)\n", 4068 roc->link_id); 4069 return; 4070 } 4071 4072 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 4073 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); 4074 4075 roc->state = RTW89_ROC_IDLE; 4076 rtw89_config_roc_chandef(rtwdev, rtwvif_link, NULL); 4077 rtw89_chanctx_proceed(rtwdev, NULL); 4078 ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false, 4079 RTW89_ROC_TX_TIMEOUT); 4080 if (ret) 4081 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 4082 "roc send null-0 failed: %d\n", ret); 4083 4084 rtw89_for_each_rtwvif(rtwdev, tmp_vif) 4085 tmp_vif->offchan = false; 4086 4087 rtw89_core_handle_sta_pending_tx(rtwdev, rtwvif_link); 4088 queue_work(rtwdev->txq_wq, &rtwdev->txq_work); 4089 4090 if (hw->conf.flags & IEEE80211_CONF_IDLE) 4091 wiphy_delayed_work_queue(hw->wiphy, &roc->roc_work, 4092 msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT)); 4093 } 4094 4095 void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work) 4096 { 4097 struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif, 4098 roc.roc_work.work); 4099 struct rtw89_dev *rtwdev = rtwvif->rtwdev; 4100 struct rtw89_roc *roc = &rtwvif->roc; 4101 4102 lockdep_assert_wiphy(wiphy); 4103 4104 switch (roc->state) { 4105 case RTW89_ROC_IDLE: 4106 rtw89_enter_ips_by_hwflags(rtwdev); 4107 break; 4108 case RTW89_ROC_MGMT: 4109 case RTW89_ROC_NORMAL: 4110 rtw89_roc_end(rtwdev, rtwvif); 4111 break; 4112 default: 4113 break; 4114 } 4115 } 4116 4117 static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev, 4118 u32 throughput, u64 cnt, 4119 enum rtw89_tfc_interval interval) 4120 { 4121 u64 cnt_level; 4122 4123 switch (interval) { 4124 default: 4125 case RTW89_TFC_INTERVAL_100MS: 4126 cnt_level = 5; 4127 break; 4128 case RTW89_TFC_INTERVAL_2SEC: 4129 cnt_level = 100; 4130 break; 4131 } 4132 4133 if (cnt < cnt_level) 4134 return RTW89_TFC_IDLE; 4135 if (throughput > 50) 4136 return RTW89_TFC_HIGH; 4137 if (throughput > 10) 4138 return RTW89_TFC_MID; 4139 if (throughput > 2) 4140 return RTW89_TFC_LOW; 4141 return RTW89_TFC_ULTRA_LOW; 4142 } 4143 4144 static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev, 4145 struct rtw89_traffic_stats *stats, 4146 enum rtw89_tfc_interval interval) 4147 { 4148 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 4149 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 4150 4151 stats->tx_throughput_raw = rtw89_bytes_to_mbps(stats->tx_unicast, interval); 4152 stats->rx_throughput_raw = rtw89_bytes_to_mbps(stats->rx_unicast, interval); 4153 4154 ewma_tp_add(&stats->tx_ewma_tp, stats->tx_throughput_raw); 4155 ewma_tp_add(&stats->rx_ewma_tp, stats->rx_throughput_raw); 4156 4157 stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp); 4158 stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp); 4159 stats->tx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->tx_throughput, 4160 stats->tx_cnt, interval); 4161 stats->rx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->rx_throughput, 4162 stats->rx_cnt, interval); 4163 stats->tx_avg_len = stats->tx_cnt ? 4164 DIV_ROUND_DOWN_ULL(stats->tx_unicast, stats->tx_cnt) : 0; 4165 stats->rx_avg_len = stats->rx_cnt ? 4166 DIV_ROUND_DOWN_ULL(stats->rx_unicast, stats->rx_cnt) : 0; 4167 4168 stats->tx_unicast = 0; 4169 stats->rx_unicast = 0; 4170 stats->tx_cnt = 0; 4171 stats->rx_cnt = 0; 4172 stats->rx_tf_periodic = stats->rx_tf_acc; 4173 stats->rx_tf_acc = 0; 4174 4175 if (tx_tfc_lv != stats->tx_tfc_lv || rx_tfc_lv != stats->rx_tfc_lv) 4176 return true; 4177 4178 return false; 4179 } 4180 4181 static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev) 4182 { 4183 struct rtw89_vif_link *rtwvif_link; 4184 struct rtw89_vif *rtwvif; 4185 unsigned int link_id; 4186 bool tfc_changed; 4187 4188 tfc_changed = rtw89_traffic_stats_calc(rtwdev, &rtwdev->stats, 4189 RTW89_TFC_INTERVAL_2SEC); 4190 4191 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 4192 rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats, 4193 RTW89_TFC_INTERVAL_2SEC); 4194 4195 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 4196 rtw89_fw_h2c_tp_offload(rtwdev, rtwvif_link); 4197 } 4198 4199 return tfc_changed; 4200 } 4201 4202 static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev) 4203 { 4204 struct ieee80211_vif *vif; 4205 struct rtw89_vif *rtwvif; 4206 4207 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 4208 if (rtwvif->tdls_peer) 4209 continue; 4210 if (rtwvif->offchan) 4211 continue; 4212 4213 if (rtwvif->stats_ps.tx_tfc_lv >= RTW89_TFC_MID || 4214 rtwvif->stats_ps.rx_tfc_lv >= RTW89_TFC_MID) 4215 continue; 4216 4217 vif = rtwvif_to_vif(rtwvif); 4218 4219 if (!(vif->type == NL80211_IFTYPE_STATION || 4220 vif->type == NL80211_IFTYPE_P2P_CLIENT)) 4221 continue; 4222 4223 if (!rtw89_core_bcn_track_can_lps(rtwdev)) 4224 continue; 4225 4226 rtw89_enter_lps(rtwdev, rtwvif, true); 4227 } 4228 } 4229 4230 static void rtw89_core_rfk_track(struct rtw89_dev *rtwdev) 4231 { 4232 enum rtw89_entity_mode mode; 4233 4234 mode = rtw89_get_entity_mode(rtwdev); 4235 if (mode == RTW89_ENTITY_MODE_MCC) 4236 return; 4237 4238 rtw89_chip_rfk_track(rtwdev); 4239 } 4240 4241 void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, 4242 struct rtw89_vif_link *rtwvif_link, 4243 struct ieee80211_bss_conf *bss_conf) 4244 { 4245 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); 4246 4247 if (mode == RTW89_ENTITY_MODE_MCC) 4248 rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_P2P_PS_CHANGE); 4249 else 4250 rtw89_process_p2p_ps(rtwdev, rtwvif_link, bss_conf); 4251 } 4252 4253 void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, 4254 struct rtw89_traffic_stats *stats) 4255 { 4256 stats->tx_unicast = 0; 4257 stats->rx_unicast = 0; 4258 stats->tx_cnt = 0; 4259 stats->rx_cnt = 0; 4260 ewma_tp_init(&stats->tx_ewma_tp); 4261 ewma_tp_init(&stats->rx_ewma_tp); 4262 } 4263 4264 #define RTW89_MLSR_GOTO_2GHZ_THRESHOLD -53 4265 #define RTW89_MLSR_EXIT_2GHZ_THRESHOLD -38 4266 static void rtw89_core_mlsr_link_decision(struct rtw89_dev *rtwdev, 4267 struct rtw89_vif *rtwvif) 4268 { 4269 unsigned int sel_link_id = IEEE80211_MLD_MAX_NUM_LINKS; 4270 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 4271 struct rtw89_vif_link *rtwvif_link; 4272 const struct rtw89_chan *chan; 4273 unsigned long usable_links; 4274 unsigned int link_id; 4275 u8 decided_bands; 4276 u8 rssi; 4277 4278 rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi); 4279 if (unlikely(!rssi)) 4280 return; 4281 4282 if (RTW89_RSSI_RAW_TO_DBM(rssi) >= RTW89_MLSR_EXIT_2GHZ_THRESHOLD) 4283 decided_bands = BIT(RTW89_BAND_5G) | BIT(RTW89_BAND_6G); 4284 else if (RTW89_RSSI_RAW_TO_DBM(rssi) <= RTW89_MLSR_GOTO_2GHZ_THRESHOLD) 4285 decided_bands = BIT(RTW89_BAND_2G); 4286 else 4287 return; 4288 4289 usable_links = ieee80211_vif_usable_links(vif); 4290 4291 rtwvif_link = rtw89_get_designated_link(rtwvif); 4292 if (unlikely(!rtwvif_link)) 4293 goto select; 4294 4295 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 4296 if (decided_bands & BIT(chan->band_type)) 4297 return; 4298 4299 usable_links &= ~BIT(rtwvif_link->link_id); 4300 4301 select: 4302 rcu_read_lock(); 4303 4304 for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { 4305 struct ieee80211_bss_conf *link_conf; 4306 struct ieee80211_channel *channel; 4307 enum rtw89_band band; 4308 4309 link_conf = rcu_dereference(vif->link_conf[link_id]); 4310 if (unlikely(!link_conf)) 4311 continue; 4312 4313 channel = link_conf->chanreq.oper.chan; 4314 if (unlikely(!channel)) 4315 continue; 4316 4317 band = rtw89_nl80211_to_hw_band(channel->band); 4318 if (decided_bands & BIT(band)) { 4319 sel_link_id = link_id; 4320 break; 4321 } 4322 } 4323 4324 rcu_read_unlock(); 4325 4326 if (sel_link_id == IEEE80211_MLD_MAX_NUM_LINKS) 4327 return; 4328 4329 rtw89_core_mlsr_switch(rtwdev, rtwvif, sel_link_id); 4330 } 4331 4332 static void rtw89_core_mlo_track(struct rtw89_dev *rtwdev) 4333 { 4334 struct rtw89_hal *hal = &rtwdev->hal; 4335 struct ieee80211_vif *vif; 4336 struct rtw89_vif *rtwvif; 4337 4338 if (hal->disabled_dm_bitmap & BIT(RTW89_DM_MLO)) 4339 return; 4340 4341 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 4342 vif = rtwvif_to_vif(rtwvif); 4343 if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) 4344 continue; 4345 4346 switch (rtwvif->mlo_mode) { 4347 case RTW89_MLO_MODE_MLSR: 4348 rtw89_core_mlsr_link_decision(rtwdev, rtwvif); 4349 break; 4350 default: 4351 break; 4352 } 4353 } 4354 } 4355 4356 static void rtw89_track_ps_work(struct wiphy *wiphy, struct wiphy_work *work) 4357 { 4358 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 4359 track_ps_work.work); 4360 struct rtw89_vif *rtwvif; 4361 4362 lockdep_assert_wiphy(wiphy); 4363 4364 if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags)) 4365 return; 4366 4367 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 4368 return; 4369 4370 wiphy_delayed_work_queue(wiphy, &rtwdev->track_ps_work, 4371 RTW89_TRACK_PS_WORK_PERIOD); 4372 4373 rtw89_for_each_rtwvif(rtwdev, rtwvif) 4374 rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats_ps, 4375 RTW89_TFC_INTERVAL_100MS); 4376 4377 if (rtwdev->scanning) 4378 return; 4379 4380 if (rtwdev->lps_enabled && !rtwdev->btc.lps) 4381 rtw89_enter_lps_track(rtwdev); 4382 } 4383 4384 static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work) 4385 { 4386 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 4387 track_work.work); 4388 bool tfc_changed; 4389 4390 lockdep_assert_wiphy(wiphy); 4391 4392 if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags)) 4393 return; 4394 4395 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 4396 return; 4397 4398 wiphy_delayed_work_queue(wiphy, &rtwdev->track_work, 4399 RTW89_TRACK_WORK_PERIOD); 4400 4401 tfc_changed = rtw89_traffic_stats_track(rtwdev); 4402 if (rtwdev->scanning) 4403 return; 4404 4405 rtw89_leave_lps(rtwdev); 4406 4407 if (tfc_changed) { 4408 rtw89_hci_recalc_int_mit(rtwdev); 4409 rtw89_btc_ntfy_wl_sta(rtwdev); 4410 } 4411 rtw89_mac_bf_monitor_track(rtwdev); 4412 rtw89_core_bcn_track(rtwdev); 4413 rtw89_phy_stat_track(rtwdev); 4414 rtw89_phy_env_monitor_track(rtwdev); 4415 rtw89_phy_dig(rtwdev); 4416 rtw89_core_rfk_track(rtwdev); 4417 rtw89_phy_ra_update(rtwdev); 4418 rtw89_phy_cfo_track(rtwdev); 4419 rtw89_phy_tx_path_div_track(rtwdev); 4420 rtw89_phy_antdiv_track(rtwdev); 4421 rtw89_phy_ul_tb_ctrl_track(rtwdev); 4422 rtw89_phy_edcca_track(rtwdev); 4423 rtw89_sar_track(rtwdev); 4424 rtw89_chanctx_track(rtwdev); 4425 rtw89_core_rfkill_poll(rtwdev, false); 4426 rtw89_core_mlo_track(rtwdev); 4427 4428 if (rtwdev->lps_enabled && !rtwdev->btc.lps) 4429 rtw89_enter_lps_track(rtwdev); 4430 } 4431 4432 u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size) 4433 { 4434 unsigned long bit; 4435 4436 bit = find_first_zero_bit(addr, size); 4437 if (bit < size) 4438 set_bit(bit, addr); 4439 4440 return bit; 4441 } 4442 4443 void rtw89_core_release_bit_map(unsigned long *addr, u8 bit) 4444 { 4445 clear_bit(bit, addr); 4446 } 4447 4448 void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits) 4449 { 4450 bitmap_zero(addr, nbits); 4451 } 4452 4453 int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev, 4454 struct rtw89_sta_link *rtwsta_link, u8 tid, 4455 u8 *cam_idx) 4456 { 4457 const struct rtw89_chip_info *chip = rtwdev->chip; 4458 struct rtw89_cam_info *cam_info = &rtwdev->cam_info; 4459 struct rtw89_ba_cam_entry *entry = NULL, *tmp; 4460 u8 idx; 4461 int i; 4462 4463 lockdep_assert_wiphy(rtwdev->hw->wiphy); 4464 4465 idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num); 4466 if (idx == chip->bacam_num) { 4467 /* allocate a static BA CAM to tid=0/5, so replace the existing 4468 * one if BA CAM is full. Hardware will process the original tid 4469 * automatically. 4470 */ 4471 if (tid != 0 && tid != 5) 4472 return -ENOSPC; 4473 4474 for_each_set_bit(i, cam_info->ba_cam_map, chip->bacam_num) { 4475 tmp = &cam_info->ba_cam_entry[i]; 4476 if (tmp->tid == 0 || tmp->tid == 5) 4477 continue; 4478 4479 idx = i; 4480 entry = tmp; 4481 list_del(&entry->list); 4482 break; 4483 } 4484 4485 if (!entry) 4486 return -ENOSPC; 4487 } else { 4488 entry = &cam_info->ba_cam_entry[idx]; 4489 } 4490 4491 entry->tid = tid; 4492 list_add_tail(&entry->list, &rtwsta_link->ba_cam_list); 4493 4494 *cam_idx = idx; 4495 4496 return 0; 4497 } 4498 4499 int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev, 4500 struct rtw89_sta_link *rtwsta_link, u8 tid, 4501 u8 *cam_idx) 4502 { 4503 struct rtw89_cam_info *cam_info = &rtwdev->cam_info; 4504 struct rtw89_ba_cam_entry *entry = NULL, *tmp; 4505 u8 idx; 4506 4507 lockdep_assert_wiphy(rtwdev->hw->wiphy); 4508 4509 list_for_each_entry_safe(entry, tmp, &rtwsta_link->ba_cam_list, list) { 4510 if (entry->tid != tid) 4511 continue; 4512 4513 idx = entry - cam_info->ba_cam_entry; 4514 list_del(&entry->list); 4515 4516 rtw89_core_release_bit_map(cam_info->ba_cam_map, idx); 4517 *cam_idx = idx; 4518 return 0; 4519 } 4520 4521 return -ENOENT; 4522 } 4523 4524 #define RTW89_TYPE_MAPPING(_type) \ 4525 case NL80211_IFTYPE_ ## _type: \ 4526 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_ ## _type; \ 4527 break 4528 void rtw89_vif_type_mapping(struct rtw89_vif_link *rtwvif_link, bool assoc) 4529 { 4530 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4531 const struct ieee80211_bss_conf *bss_conf; 4532 4533 switch (vif->type) { 4534 case NL80211_IFTYPE_STATION: 4535 if (vif->p2p) 4536 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_P2P_CLIENT; 4537 else 4538 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_STATION; 4539 break; 4540 case NL80211_IFTYPE_AP: 4541 if (vif->p2p) 4542 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_P2P_GO; 4543 else 4544 rtwvif_link->wifi_role = RTW89_WIFI_ROLE_AP; 4545 break; 4546 RTW89_TYPE_MAPPING(ADHOC); 4547 RTW89_TYPE_MAPPING(MONITOR); 4548 RTW89_TYPE_MAPPING(MESH_POINT); 4549 default: 4550 WARN_ON(1); 4551 break; 4552 } 4553 4554 switch (vif->type) { 4555 case NL80211_IFTYPE_AP: 4556 case NL80211_IFTYPE_MESH_POINT: 4557 rtwvif_link->net_type = RTW89_NET_TYPE_AP_MODE; 4558 rtwvif_link->self_role = RTW89_SELF_ROLE_AP; 4559 break; 4560 case NL80211_IFTYPE_ADHOC: 4561 rtwvif_link->net_type = RTW89_NET_TYPE_AD_HOC; 4562 rtwvif_link->self_role = RTW89_SELF_ROLE_CLIENT; 4563 break; 4564 case NL80211_IFTYPE_STATION: 4565 if (assoc) { 4566 rtwvif_link->net_type = RTW89_NET_TYPE_INFRA; 4567 4568 rcu_read_lock(); 4569 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 4570 rtwvif_link->trigger = bss_conf->he_support; 4571 rcu_read_unlock(); 4572 } else { 4573 rtwvif_link->net_type = RTW89_NET_TYPE_NO_LINK; 4574 rtwvif_link->trigger = false; 4575 } 4576 rtwvif_link->self_role = RTW89_SELF_ROLE_CLIENT; 4577 rtwvif_link->addr_cam.sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL; 4578 break; 4579 case NL80211_IFTYPE_MONITOR: 4580 break; 4581 default: 4582 WARN_ON(1); 4583 break; 4584 } 4585 } 4586 4587 int rtw89_core_sta_link_add(struct rtw89_dev *rtwdev, 4588 struct rtw89_vif_link *rtwvif_link, 4589 struct rtw89_sta_link *rtwsta_link) 4590 { 4591 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4592 const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link); 4593 struct rtw89_hal *hal = &rtwdev->hal; 4594 u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num; 4595 int i; 4596 int ret; 4597 4598 rtwsta_link->prev_rssi = 0; 4599 INIT_LIST_HEAD(&rtwsta_link->ba_cam_list); 4600 ewma_rssi_init(&rtwsta_link->avg_rssi); 4601 ewma_snr_init(&rtwsta_link->avg_snr); 4602 ewma_evm_init(&rtwsta_link->evm_1ss); 4603 for (i = 0; i < ant_num; i++) { 4604 ewma_rssi_init(&rtwsta_link->rssi[i]); 4605 ewma_evm_init(&rtwsta_link->evm_min[i]); 4606 ewma_evm_init(&rtwsta_link->evm_max[i]); 4607 } 4608 4609 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { 4610 /* must do rtw89_reg_6ghz_recalc() before rfk channel */ 4611 ret = rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, true); 4612 if (ret) 4613 return ret; 4614 4615 rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link, 4616 BTC_ROLE_MSTS_STA_CONN_START); 4617 rtw89_chip_rfk_channel(rtwdev, rtwvif_link); 4618 4619 if (vif->p2p) { 4620 rtw89_mac_get_tx_retry_limit(rtwdev, rtwsta_link, 4621 &rtwsta_link->tx_retry); 4622 rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false, 60); 4623 } 4624 rtw89_phy_dig_suspend(rtwdev); 4625 } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { 4626 ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta_link->mac_id, false); 4627 if (ret) { 4628 rtw89_warn(rtwdev, "failed to send h2c macid pause\n"); 4629 return ret; 4630 } 4631 4632 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, rtwsta_link, 4633 RTW89_ROLE_CREATE); 4634 if (ret) { 4635 rtw89_warn(rtwdev, "failed to send h2c role info\n"); 4636 return ret; 4637 } 4638 4639 ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link); 4640 if (ret) 4641 return ret; 4642 4643 ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif_link, rtwsta_link); 4644 if (ret) 4645 return ret; 4646 } 4647 4648 return 0; 4649 } 4650 4651 int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev, 4652 struct rtw89_vif_link *rtwvif_link, 4653 struct rtw89_sta_link *rtwsta_link) 4654 { 4655 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4656 4657 rtw89_assoc_link_clr(rtwsta_link); 4658 4659 if (vif->type == NL80211_IFTYPE_STATION) { 4660 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false); 4661 rtw89_core_bcn_track_reset(rtwdev); 4662 } 4663 4664 if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 4665 rtw89_p2p_noa_once_deinit(rtwvif_link); 4666 4667 return 0; 4668 } 4669 4670 int rtw89_core_sta_link_disconnect(struct rtw89_dev *rtwdev, 4671 struct rtw89_vif_link *rtwvif_link, 4672 struct rtw89_sta_link *rtwsta_link) 4673 { 4674 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4675 const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link); 4676 int ret; 4677 4678 rtw89_mac_bf_monitor_calc(rtwdev, rtwsta_link, true); 4679 rtw89_mac_bf_disassoc(rtwdev, rtwvif_link, rtwsta_link); 4680 4681 if (vif->type == NL80211_IFTYPE_AP || sta->tdls) 4682 rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta_link->addr_cam); 4683 if (sta->tdls) 4684 rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta_link->bssid_cam); 4685 4686 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { 4687 rtw89_vif_type_mapping(rtwvif_link, false); 4688 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, true); 4689 } 4690 4691 ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link); 4692 if (ret) { 4693 rtw89_warn(rtwdev, "failed to send h2c cmac table\n"); 4694 return ret; 4695 } 4696 4697 ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, rtwsta_link, true); 4698 if (ret) { 4699 rtw89_warn(rtwdev, "failed to send h2c join info\n"); 4700 return ret; 4701 } 4702 4703 /* update cam aid mac_id net_type */ 4704 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL); 4705 if (ret) { 4706 rtw89_warn(rtwdev, "failed to send h2c cam\n"); 4707 return ret; 4708 } 4709 4710 return ret; 4711 } 4712 4713 static bool rtw89_sta_link_can_er(struct rtw89_dev *rtwdev, 4714 struct ieee80211_bss_conf *bss_conf, 4715 struct ieee80211_link_sta *link_sta) 4716 { 4717 if (!bss_conf->he_support || 4718 bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE) 4719 return false; 4720 4721 if (rtwdev->chip->chip_id == RTL8852C && 4722 rtw89_sta_link_has_su_mu_4xhe08(link_sta) && 4723 !rtw89_sta_link_has_er_su_4xhe08(link_sta)) 4724 return false; 4725 4726 return true; 4727 } 4728 4729 int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev, 4730 struct rtw89_vif_link *rtwvif_link, 4731 struct rtw89_sta_link *rtwsta_link) 4732 { 4733 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4734 const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link); 4735 struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif_link, 4736 rtwsta_link); 4737 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 4738 rtwvif_link->chanctx_idx); 4739 struct ieee80211_link_sta *link_sta; 4740 int ret; 4741 4742 if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { 4743 if (sta->tdls) { 4744 rcu_read_lock(); 4745 4746 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 4747 ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif_link, bssid_cam, 4748 link_sta->addr); 4749 if (ret) { 4750 rtw89_warn(rtwdev, "failed to send h2c init bssid cam for TDLS\n"); 4751 rcu_read_unlock(); 4752 return ret; 4753 } 4754 4755 rcu_read_unlock(); 4756 } 4757 4758 ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta_link->addr_cam, bssid_cam); 4759 if (ret) { 4760 rtw89_warn(rtwdev, "failed to send h2c init addr cam\n"); 4761 return ret; 4762 } 4763 } 4764 4765 ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, rtwsta_link); 4766 if (ret) { 4767 rtw89_warn(rtwdev, "failed to send h2c cmac table\n"); 4768 return ret; 4769 } 4770 4771 ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, rtwsta_link, false); 4772 if (ret) { 4773 rtw89_warn(rtwdev, "failed to send h2c join info\n"); 4774 return ret; 4775 } 4776 4777 /* update cam aid mac_id net_type */ 4778 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, rtwsta_link, NULL); 4779 if (ret) { 4780 rtw89_warn(rtwdev, "failed to send h2c cam\n"); 4781 return ret; 4782 } 4783 4784 rtw89_phy_ra_assoc(rtwdev, rtwsta_link); 4785 rtw89_mac_bf_assoc(rtwdev, rtwvif_link, rtwsta_link); 4786 rtw89_mac_bf_monitor_calc(rtwdev, rtwsta_link, false); 4787 4788 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { 4789 struct ieee80211_bss_conf *bss_conf; 4790 4791 rcu_read_lock(); 4792 4793 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4794 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 4795 rtwsta_link->er_cap = rtw89_sta_link_can_er(rtwdev, bss_conf, link_sta); 4796 4797 rcu_read_unlock(); 4798 4799 rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link, 4800 BTC_ROLE_MSTS_STA_CONN_END); 4801 rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta_link->htc_template, chan); 4802 rtw89_phy_ul_tb_assoc(rtwdev, rtwvif_link); 4803 rtw89_core_bcn_track_assoc(rtwdev, rtwvif_link); 4804 4805 ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif_link, rtwsta_link->mac_id); 4806 if (ret) { 4807 rtw89_warn(rtwdev, "failed to send h2c general packet\n"); 4808 return ret; 4809 } 4810 4811 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true); 4812 4813 if (vif->p2p) 4814 rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false, 4815 rtwsta_link->tx_retry); 4816 rtw89_phy_dig_resume(rtwdev, false); 4817 } 4818 4819 rtw89_assoc_link_set(rtwsta_link); 4820 return ret; 4821 } 4822 4823 int rtw89_core_sta_link_remove(struct rtw89_dev *rtwdev, 4824 struct rtw89_vif_link *rtwvif_link, 4825 struct rtw89_sta_link *rtwsta_link) 4826 { 4827 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4828 const struct ieee80211_sta *sta = rtwsta_link_to_sta(rtwsta_link); 4829 int ret; 4830 4831 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { 4832 rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, false); 4833 rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link, 4834 BTC_ROLE_MSTS_STA_DIS_CONN); 4835 4836 if (vif->p2p) 4837 rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false, 4838 rtwsta_link->tx_retry); 4839 } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { 4840 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, rtwsta_link, 4841 RTW89_ROLE_REMOVE); 4842 if (ret) { 4843 rtw89_warn(rtwdev, "failed to send h2c role info\n"); 4844 return ret; 4845 } 4846 } 4847 4848 return 0; 4849 } 4850 4851 static void _rtw89_core_set_tid_config(struct rtw89_dev *rtwdev, 4852 struct ieee80211_sta *sta, 4853 struct cfg80211_tid_cfg *tid_conf) 4854 { 4855 struct ieee80211_txq *txq; 4856 struct rtw89_txq *rtwtxq; 4857 u32 mask = tid_conf->mask; 4858 u8 tids = tid_conf->tids; 4859 int tids_nbit = BITS_PER_BYTE; 4860 int i; 4861 4862 for (i = 0; i < tids_nbit; i++, tids >>= 1) { 4863 if (!tids) 4864 break; 4865 4866 if (!(tids & BIT(0))) 4867 continue; 4868 4869 txq = sta->txq[i]; 4870 rtwtxq = (struct rtw89_txq *)txq->drv_priv; 4871 4872 if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) { 4873 if (tid_conf->ampdu == NL80211_TID_CONFIG_ENABLE) { 4874 clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags); 4875 } else { 4876 if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags)) 4877 ieee80211_stop_tx_ba_session(sta, txq->tid); 4878 spin_lock_bh(&rtwdev->ba_lock); 4879 list_del_init(&rtwtxq->list); 4880 set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags); 4881 spin_unlock_bh(&rtwdev->ba_lock); 4882 } 4883 } 4884 4885 if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL) && tids == 0xff) { 4886 if (tid_conf->amsdu == NL80211_TID_CONFIG_ENABLE) 4887 sta->max_amsdu_subframes = 0; 4888 else 4889 sta->max_amsdu_subframes = 1; 4890 } 4891 } 4892 } 4893 4894 void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev, 4895 struct ieee80211_sta *sta, 4896 struct cfg80211_tid_config *tid_config) 4897 { 4898 int i; 4899 4900 for (i = 0; i < tid_config->n_tid_conf; i++) 4901 _rtw89_core_set_tid_config(rtwdev, sta, 4902 &tid_config->tid_conf[i]); 4903 } 4904 4905 static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev, 4906 struct ieee80211_sta_ht_cap *ht_cap) 4907 { 4908 static const __le16 highest[RF_PATH_MAX] = { 4909 cpu_to_le16(150), cpu_to_le16(300), cpu_to_le16(450), cpu_to_le16(600), 4910 }; 4911 struct rtw89_hal *hal = &rtwdev->hal; 4912 u8 nss = hal->rx_nss; 4913 int i; 4914 4915 ht_cap->ht_supported = true; 4916 ht_cap->cap = 0; 4917 ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 | 4918 IEEE80211_HT_CAP_MAX_AMSDU | 4919 IEEE80211_HT_CAP_TX_STBC | 4920 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 4921 ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING; 4922 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 4923 IEEE80211_HT_CAP_DSSSCCK40 | 4924 IEEE80211_HT_CAP_SGI_40; 4925 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4926 ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; 4927 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 4928 for (i = 0; i < nss; i++) 4929 ht_cap->mcs.rx_mask[i] = 0xFF; 4930 ht_cap->mcs.rx_mask[4] = 0x01; 4931 ht_cap->mcs.rx_highest = highest[nss - 1]; 4932 } 4933 4934 static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev, 4935 struct ieee80211_sta_vht_cap *vht_cap) 4936 { 4937 static const __le16 highest_bw80[RF_PATH_MAX] = { 4938 cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733), 4939 }; 4940 static const __le16 highest_bw160[RF_PATH_MAX] = { 4941 cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467), 4942 }; 4943 const struct rtw89_chip_info *chip = rtwdev->chip; 4944 const __le16 *highest = chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160) ? 4945 highest_bw160 : highest_bw80; 4946 struct rtw89_hal *hal = &rtwdev->hal; 4947 u16 tx_mcs_map = 0, rx_mcs_map = 0; 4948 u8 sts_cap = 3; 4949 int i; 4950 4951 for (i = 0; i < 8; i++) { 4952 if (i < hal->tx_nss) 4953 tx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4954 else 4955 tx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4956 if (i < hal->rx_nss) 4957 rx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4958 else 4959 rx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4960 } 4961 4962 vht_cap->vht_supported = true; 4963 vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | 4964 IEEE80211_VHT_CAP_SHORT_GI_80 | 4965 IEEE80211_VHT_CAP_RXSTBC_1 | 4966 IEEE80211_VHT_CAP_HTC_VHT | 4967 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | 4968 0; 4969 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 4970 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; 4971 vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 4972 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; 4973 vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4974 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) 4975 vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | 4976 IEEE80211_VHT_CAP_SHORT_GI_160; 4977 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map); 4978 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map); 4979 vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1]; 4980 vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1]; 4981 4982 if (ieee80211_hw_check(rtwdev->hw, SUPPORTS_VHT_EXT_NSS_BW)) 4983 vht_cap->vht_mcs.tx_highest |= 4984 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); 4985 } 4986 4987 static void rtw89_init_he_cap(struct rtw89_dev *rtwdev, 4988 enum nl80211_band band, 4989 enum nl80211_iftype iftype, 4990 struct ieee80211_sband_iftype_data *iftype_data) 4991 { 4992 const struct rtw89_chip_info *chip = rtwdev->chip; 4993 struct rtw89_hal *hal = &rtwdev->hal; 4994 bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) || 4995 (chip->chip_id == RTL8852B && hal->cv == CHIP_CAV); 4996 struct ieee80211_sta_he_cap *he_cap; 4997 int nss = hal->rx_nss; 4998 u8 *mac_cap_info; 4999 u8 *phy_cap_info; 5000 u16 mcs_map = 0; 5001 int i; 5002 5003 for (i = 0; i < 8; i++) { 5004 if (i < nss) 5005 mcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2); 5006 else 5007 mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2); 5008 } 5009 5010 he_cap = &iftype_data->he_cap; 5011 mac_cap_info = he_cap->he_cap_elem.mac_cap_info; 5012 phy_cap_info = he_cap->he_cap_elem.phy_cap_info; 5013 5014 he_cap->has_he = true; 5015 mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE; 5016 if (iftype == NL80211_IFTYPE_STATION) 5017 mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US; 5018 mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK | 5019 IEEE80211_HE_MAC_CAP2_BSR; 5020 mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2; 5021 if (iftype == NL80211_IFTYPE_AP) 5022 mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL; 5023 mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS | 5024 IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU; 5025 if (iftype == NL80211_IFTYPE_STATION) 5026 mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX; 5027 if (band == NL80211_BAND_2GHZ) { 5028 phy_cap_info[0] = 5029 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; 5030 } else { 5031 phy_cap_info[0] = 5032 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; 5033 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) 5034 phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; 5035 } 5036 phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | 5037 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | 5038 IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; 5039 phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | 5040 IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | 5041 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | 5042 IEEE80211_HE_PHY_CAP2_DOPPLER_TX; 5043 phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM; 5044 if (iftype == NL80211_IFTYPE_STATION) 5045 phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM | 5046 IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2; 5047 if (iftype == NL80211_IFTYPE_AP) 5048 phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU; 5049 phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | 5050 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4; 5051 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) 5052 phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4; 5053 phy_cap_info[5] = no_ng16 ? 0 : 5054 IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | 5055 IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; 5056 phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | 5057 IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | 5058 IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | 5059 IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE; 5060 phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | 5061 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | 5062 IEEE80211_HE_PHY_CAP7_MAX_NC_1; 5063 phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | 5064 IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI | 5065 IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996; 5066 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) 5067 phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | 5068 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; 5069 phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | 5070 IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | 5071 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | 5072 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | 5073 u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US, 5074 IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); 5075 if (iftype == NL80211_IFTYPE_STATION) 5076 phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; 5077 he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map); 5078 he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map); 5079 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) { 5080 he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map); 5081 he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map); 5082 } 5083 5084 if (band == NL80211_BAND_6GHZ) { 5085 __le16 capa; 5086 5087 capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE, 5088 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) | 5089 le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K, 5090 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) | 5091 le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454, 5092 IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); 5093 iftype_data->he_6ghz_capa.capa = capa; 5094 } 5095 } 5096 5097 static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev, 5098 enum nl80211_band band, 5099 enum nl80211_iftype iftype, 5100 struct ieee80211_sband_iftype_data *iftype_data) 5101 { 5102 const struct rtw89_chip_info *chip = rtwdev->chip; 5103 struct ieee80211_eht_cap_elem_fixed *eht_cap_elem; 5104 struct ieee80211_eht_mcs_nss_supp *eht_nss; 5105 struct ieee80211_sta_eht_cap *eht_cap; 5106 struct rtw89_hal *hal = &rtwdev->hal; 5107 bool support_mcs_12_13 = true; 5108 bool support_320mhz = false; 5109 u8 val, val_mcs13; 5110 int sts = 8; 5111 5112 if (chip->chip_gen == RTW89_CHIP_AX) 5113 return; 5114 5115 if (hal->no_mcs_12_13) 5116 support_mcs_12_13 = false; 5117 5118 if (band == NL80211_BAND_6GHZ && 5119 chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_320)) 5120 support_320mhz = true; 5121 5122 eht_cap = &iftype_data->eht_cap; 5123 eht_cap_elem = &eht_cap->eht_cap_elem; 5124 eht_nss = &eht_cap->eht_mcs_nss_supp; 5125 5126 eht_cap->has_eht = true; 5127 5128 eht_cap_elem->mac_cap_info[0] = 5129 u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991, 5130 IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); 5131 eht_cap_elem->mac_cap_info[1] = 0; 5132 5133 eht_cap_elem->phy_cap_info[0] = 5134 IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | 5135 IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; 5136 if (support_320mhz) 5137 eht_cap_elem->phy_cap_info[0] |= 5138 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 5139 5140 eht_cap_elem->phy_cap_info[0] |= 5141 u8_encode_bits(u8_get_bits(sts - 1, BIT(0)), 5142 IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK); 5143 eht_cap_elem->phy_cap_info[1] = 5144 u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)), 5145 IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) | 5146 u8_encode_bits(sts - 1, 5147 IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK); 5148 if (support_320mhz) 5149 eht_cap_elem->phy_cap_info[1] |= 5150 u8_encode_bits(sts - 1, 5151 IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); 5152 5153 eht_cap_elem->phy_cap_info[2] = 0; 5154 5155 eht_cap_elem->phy_cap_info[3] = 5156 IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | 5157 IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | 5158 IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | 5159 IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK; 5160 5161 eht_cap_elem->phy_cap_info[4] = 5162 IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | 5163 u8_encode_bits(1, IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK); 5164 5165 eht_cap_elem->phy_cap_info[5] = 5166 u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US, 5167 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 5168 5169 eht_cap_elem->phy_cap_info[6] = 0; 5170 eht_cap_elem->phy_cap_info[7] = 0; 5171 eht_cap_elem->phy_cap_info[8] = 0; 5172 5173 val = u8_encode_bits(hal->rx_nss, IEEE80211_EHT_MCS_NSS_RX) | 5174 u8_encode_bits(hal->tx_nss, IEEE80211_EHT_MCS_NSS_TX); 5175 val_mcs13 = support_mcs_12_13 ? val : 0; 5176 5177 eht_nss->bw._80.rx_tx_mcs9_max_nss = val; 5178 eht_nss->bw._80.rx_tx_mcs11_max_nss = val; 5179 eht_nss->bw._80.rx_tx_mcs13_max_nss = val_mcs13; 5180 eht_nss->bw._160.rx_tx_mcs9_max_nss = val; 5181 eht_nss->bw._160.rx_tx_mcs11_max_nss = val; 5182 eht_nss->bw._160.rx_tx_mcs13_max_nss = val_mcs13; 5183 if (support_320mhz) { 5184 eht_nss->bw._320.rx_tx_mcs9_max_nss = val; 5185 eht_nss->bw._320.rx_tx_mcs11_max_nss = val; 5186 eht_nss->bw._320.rx_tx_mcs13_max_nss = val_mcs13; 5187 } 5188 } 5189 5190 #define RTW89_SBAND_IFTYPES_NR 2 5191 5192 static int rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev, 5193 enum nl80211_band band, 5194 struct ieee80211_supported_band *sband) 5195 { 5196 struct ieee80211_sband_iftype_data *iftype_data; 5197 enum nl80211_iftype iftype; 5198 int idx = 0; 5199 5200 iftype_data = devm_kcalloc(rtwdev->dev, RTW89_SBAND_IFTYPES_NR, 5201 sizeof(*iftype_data), GFP_KERNEL); 5202 if (!iftype_data) 5203 return -ENOMEM; 5204 5205 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { 5206 switch (iftype) { 5207 case NL80211_IFTYPE_STATION: 5208 case NL80211_IFTYPE_AP: 5209 break; 5210 default: 5211 continue; 5212 } 5213 5214 if (idx >= RTW89_SBAND_IFTYPES_NR) { 5215 rtw89_warn(rtwdev, "run out of iftype_data\n"); 5216 break; 5217 } 5218 5219 iftype_data[idx].types_mask = BIT(iftype); 5220 5221 rtw89_init_he_cap(rtwdev, band, iftype, &iftype_data[idx]); 5222 rtw89_init_eht_cap(rtwdev, band, iftype, &iftype_data[idx]); 5223 5224 idx++; 5225 } 5226 5227 _ieee80211_set_sband_iftype_data(sband, iftype_data, idx); 5228 return 0; 5229 } 5230 5231 static struct ieee80211_supported_band * 5232 rtw89_core_sband_dup(struct rtw89_dev *rtwdev, 5233 const struct ieee80211_supported_band *sband) 5234 { 5235 struct ieee80211_supported_band *dup; 5236 5237 dup = devm_kmemdup(rtwdev->dev, sband, sizeof(*sband), GFP_KERNEL); 5238 if (!dup) 5239 return NULL; 5240 5241 dup->channels = devm_kmemdup(rtwdev->dev, sband->channels, 5242 sizeof(*sband->channels) * sband->n_channels, 5243 GFP_KERNEL); 5244 if (!dup->channels) 5245 return NULL; 5246 5247 dup->bitrates = devm_kmemdup(rtwdev->dev, sband->bitrates, 5248 sizeof(*sband->bitrates) * sband->n_bitrates, 5249 GFP_KERNEL); 5250 if (!dup->bitrates) 5251 return NULL; 5252 5253 return dup; 5254 } 5255 5256 static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev) 5257 { 5258 struct ieee80211_hw *hw = rtwdev->hw; 5259 struct ieee80211_supported_band *sband; 5260 u8 support_bands = rtwdev->chip->support_bands; 5261 int ret; 5262 5263 if (support_bands & BIT(NL80211_BAND_2GHZ)) { 5264 sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_2ghz); 5265 if (!sband) 5266 return -ENOMEM; 5267 rtw89_init_ht_cap(rtwdev, &sband->ht_cap); 5268 ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband); 5269 if (ret) 5270 return ret; 5271 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; 5272 } 5273 5274 if (support_bands & BIT(NL80211_BAND_5GHZ)) { 5275 sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_5ghz); 5276 if (!sband) 5277 return -ENOMEM; 5278 rtw89_init_ht_cap(rtwdev, &sband->ht_cap); 5279 rtw89_init_vht_cap(rtwdev, &sband->vht_cap); 5280 ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband); 5281 if (ret) 5282 return ret; 5283 hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; 5284 } 5285 5286 if (support_bands & BIT(NL80211_BAND_6GHZ)) { 5287 sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_6ghz); 5288 if (!sband) 5289 return -ENOMEM; 5290 ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband); 5291 if (ret) 5292 return ret; 5293 hw->wiphy->bands[NL80211_BAND_6GHZ] = sband; 5294 } 5295 5296 return 0; 5297 } 5298 5299 static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev) 5300 { 5301 int i; 5302 5303 for (i = 0; i < RTW89_PHY_NUM; i++) 5304 skb_queue_head_init(&rtwdev->ppdu_sts.rx_queue[i]); 5305 for (i = 0; i < RTW89_PHY_NUM; i++) 5306 rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX; 5307 } 5308 5309 void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work) 5310 { 5311 struct rtw89_dev *rtwdev; 5312 struct rtw89_vif_link *rtwvif_link = container_of(work, struct rtw89_vif_link, 5313 update_beacon_work); 5314 5315 lockdep_assert_wiphy(wiphy); 5316 5317 if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE) 5318 return; 5319 5320 rtwdev = rtwvif_link->rtwvif->rtwdev; 5321 5322 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link); 5323 } 5324 5325 void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work) 5326 { 5327 struct rtw89_vif_link *rtwvif_link = 5328 container_of(work, struct rtw89_vif_link, csa_beacon_work.work); 5329 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5330 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 5331 struct rtw89_dev *rtwdev = rtwvif->rtwdev; 5332 struct ieee80211_bss_conf *bss_conf; 5333 unsigned int delay; 5334 5335 lockdep_assert_wiphy(wiphy); 5336 5337 if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE) 5338 return; 5339 5340 rcu_read_lock(); 5341 5342 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 5343 if (!bss_conf->csa_active) { 5344 rcu_read_unlock(); 5345 return; 5346 } 5347 5348 delay = ieee80211_tu_to_usec(bss_conf->beacon_int); 5349 5350 rcu_read_unlock(); 5351 5352 if (!ieee80211_beacon_cntdwn_is_complete(vif, rtwvif_link->link_id)) { 5353 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link); 5354 5355 wiphy_delayed_work_queue(wiphy, &rtwvif_link->csa_beacon_work, 5356 usecs_to_jiffies(delay)); 5357 } else { 5358 ieee80211_csa_finish(vif, rtwvif_link->link_id); 5359 } 5360 } 5361 5362 struct rtw89_wait_response * 5363 rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond) 5364 { 5365 struct rtw89_wait_response *prep; 5366 unsigned int cur; 5367 5368 /* use -EPERM _iff_ telling eval side not to make any changes */ 5369 5370 cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond); 5371 if (cur != RTW89_WAIT_COND_IDLE) 5372 return ERR_PTR(-EPERM); 5373 5374 prep = kzalloc(sizeof(*prep), GFP_KERNEL); 5375 if (!prep) 5376 return ERR_PTR(-ENOMEM); 5377 5378 init_completion(&prep->completion); 5379 5380 rcu_assign_pointer(wait->resp, prep); 5381 5382 return prep; 5383 } 5384 5385 int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait, 5386 struct rtw89_wait_response *prep, int err) 5387 { 5388 unsigned long time_left; 5389 5390 if (IS_ERR(prep)) { 5391 err = err ?: PTR_ERR(prep); 5392 5393 /* special error case: no permission to reset anything */ 5394 if (PTR_ERR(prep) == -EPERM) 5395 return err; 5396 5397 goto reset; 5398 } 5399 5400 if (err) 5401 goto cleanup; 5402 5403 time_left = wait_for_completion_timeout(&prep->completion, 5404 RTW89_WAIT_FOR_COND_TIMEOUT); 5405 if (time_left == 0) { 5406 err = -ETIMEDOUT; 5407 goto cleanup; 5408 } 5409 5410 wait->data = prep->data; 5411 5412 cleanup: 5413 rcu_assign_pointer(wait->resp, NULL); 5414 kfree_rcu(prep, rcu_head); 5415 5416 reset: 5417 atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE); 5418 5419 if (err) 5420 return err; 5421 5422 if (wait->data.err) 5423 return -EFAULT; 5424 5425 return 0; 5426 } 5427 5428 static void rtw89_complete_cond_resp(struct rtw89_wait_response *resp, 5429 const struct rtw89_completion_data *data) 5430 { 5431 resp->data = *data; 5432 complete(&resp->completion); 5433 } 5434 5435 void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond, 5436 const struct rtw89_completion_data *data) 5437 { 5438 struct rtw89_wait_response *resp; 5439 unsigned int cur; 5440 5441 guard(rcu)(); 5442 5443 resp = rcu_dereference(wait->resp); 5444 if (!resp) 5445 return; 5446 5447 cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE); 5448 if (cur != cond) 5449 return; 5450 5451 rtw89_complete_cond_resp(resp, data); 5452 } 5453 5454 void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event) 5455 { 5456 u16 bt_req_len; 5457 5458 switch (event) { 5459 case RTW89_BTC_HMSG_SET_BT_REQ_SLOT: 5460 bt_req_len = rtw89_coex_query_bt_req_len(rtwdev, RTW89_PHY_0); 5461 rtw89_debug(rtwdev, RTW89_DBG_BTC, 5462 "coex updates BT req len to %d TU\n", bt_req_len); 5463 rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BT_SLOT_CHANGE); 5464 break; 5465 default: 5466 if (event < NUM_OF_RTW89_BTC_HMSG) 5467 rtw89_debug(rtwdev, RTW89_DBG_BTC, 5468 "unhandled BTC HMSG event: %d\n", event); 5469 else 5470 rtw89_warn(rtwdev, 5471 "unrecognized BTC HMSG event: %d\n", event); 5472 break; 5473 } 5474 } 5475 5476 void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks) 5477 { 5478 const struct dmi_system_id *match; 5479 enum rtw89_quirks quirk; 5480 5481 if (!quirks) 5482 return; 5483 5484 for (match = dmi_first_match(quirks); match; match = dmi_first_match(match + 1)) { 5485 quirk = (uintptr_t)match->driver_data; 5486 if (quirk >= NUM_OF_RTW89_QUIRKS) 5487 continue; 5488 5489 set_bit(quirk, rtwdev->quirks); 5490 } 5491 } 5492 EXPORT_SYMBOL(rtw89_check_quirks); 5493 5494 int rtw89_core_start(struct rtw89_dev *rtwdev) 5495 { 5496 int ret; 5497 5498 rtw89_phy_init_bb_afe(rtwdev); 5499 5500 ret = rtw89_mac_init(rtwdev); 5501 if (ret) { 5502 rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret); 5503 return ret; 5504 } 5505 5506 rtw89_btc_ntfy_poweron(rtwdev); 5507 5508 /* efuse process */ 5509 5510 /* pre-config BB/RF, BB reset/RFC reset */ 5511 ret = rtw89_chip_reset_bb_rf(rtwdev); 5512 if (ret) 5513 return ret; 5514 5515 rtw89_phy_init_bb_reg(rtwdev); 5516 rtw89_chip_bb_postinit(rtwdev); 5517 rtw89_phy_init_rf_reg(rtwdev, false); 5518 5519 rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL); 5520 5521 rtw89_phy_dm_init(rtwdev); 5522 5523 rtw89_mac_cfg_ppdu_status_bands(rtwdev, true); 5524 rtw89_mac_cfg_phy_rpt_bands(rtwdev, true); 5525 rtw89_mac_update_rts_threshold(rtwdev); 5526 5527 ret = rtw89_hci_start(rtwdev); 5528 if (ret) { 5529 rtw89_err(rtwdev, "failed to start hci\n"); 5530 return ret; 5531 } 5532 5533 wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->track_work, 5534 RTW89_TRACK_WORK_PERIOD); 5535 wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->track_ps_work, 5536 RTW89_TRACK_PS_WORK_PERIOD); 5537 5538 set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 5539 5540 rtw89_chip_rfk_init_late(rtwdev); 5541 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON); 5542 rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable); 5543 rtw89_fw_h2c_init_ba_cam(rtwdev); 5544 rtw89_tas_fw_timer_enable(rtwdev, true); 5545 5546 return 0; 5547 } 5548 5549 void rtw89_core_stop(struct rtw89_dev *rtwdev) 5550 { 5551 struct wiphy *wiphy = rtwdev->hw->wiphy; 5552 struct rtw89_btc *btc = &rtwdev->btc; 5553 5554 lockdep_assert_wiphy(wiphy); 5555 5556 /* Prvent to stop twice; enter_ips and ops_stop */ 5557 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 5558 return; 5559 5560 rtw89_tas_fw_timer_enable(rtwdev, false); 5561 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF); 5562 5563 clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 5564 5565 wiphy_work_cancel(wiphy, &rtwdev->c2h_work); 5566 wiphy_work_cancel(wiphy, &rtwdev->cancel_6ghz_probe_work); 5567 wiphy_work_cancel(wiphy, &btc->eapol_notify_work); 5568 wiphy_work_cancel(wiphy, &btc->arp_notify_work); 5569 wiphy_work_cancel(wiphy, &btc->dhcp_notify_work); 5570 wiphy_work_cancel(wiphy, &btc->icmp_notify_work); 5571 cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work); 5572 wiphy_delayed_work_cancel(wiphy, &rtwdev->tx_wait_work); 5573 wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work); 5574 wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work); 5575 wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work); 5576 wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_act1_work); 5577 wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_bt_devinfo_work); 5578 wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_rfk_chk_work); 5579 wiphy_delayed_work_cancel(wiphy, &rtwdev->cfo_track_work); 5580 wiphy_delayed_work_cancel(wiphy, &rtwdev->mcc_prepare_done_work); 5581 cancel_delayed_work_sync(&rtwdev->forbid_ba_work); 5582 wiphy_delayed_work_cancel(wiphy, &rtwdev->antdiv_work); 5583 5584 rtw89_btc_ntfy_poweroff(rtwdev); 5585 rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, true); 5586 rtw89_mac_flush_txq(rtwdev, BIT(rtwdev->hw->queues) - 1, true); 5587 rtw89_hci_stop(rtwdev); 5588 rtw89_hci_deinit(rtwdev); 5589 rtw89_mac_pwr_off(rtwdev); 5590 rtw89_hci_reset(rtwdev); 5591 } 5592 5593 u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev) 5594 { 5595 const struct rtw89_chip_info *chip = rtwdev->chip; 5596 u8 mac_id_num; 5597 u8 mac_id; 5598 5599 if (rtwdev->support_mlo) 5600 mac_id_num = chip->support_macid_num / chip->support_link_num; 5601 else 5602 mac_id_num = chip->support_macid_num; 5603 5604 mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num); 5605 if (mac_id == mac_id_num) 5606 return RTW89_MAX_MAC_ID_NUM; 5607 5608 set_bit(mac_id, rtwdev->mac_id_map); 5609 return mac_id; 5610 } 5611 5612 void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id) 5613 { 5614 clear_bit(mac_id, rtwdev->mac_id_map); 5615 } 5616 5617 void rtw89_init_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 5618 u8 mac_id, u8 port) 5619 { 5620 const struct rtw89_chip_info *chip = rtwdev->chip; 5621 u8 support_link_num = chip->support_link_num; 5622 u8 support_mld_num = 0; 5623 unsigned int link_id; 5624 u8 index; 5625 5626 bitmap_zero(rtwvif->links_inst_map, __RTW89_MLD_MAX_LINK_NUM); 5627 for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) 5628 rtwvif->links[link_id] = NULL; 5629 5630 rtwvif->rtwdev = rtwdev; 5631 5632 if (rtwdev->support_mlo) { 5633 rtwvif->links_inst_valid_num = support_link_num; 5634 support_mld_num = chip->support_macid_num / support_link_num; 5635 } else { 5636 rtwvif->links_inst_valid_num = 1; 5637 } 5638 5639 for (index = 0; index < rtwvif->links_inst_valid_num; index++) { 5640 struct rtw89_vif_link *inst = &rtwvif->links_inst[index]; 5641 5642 inst->rtwvif = rtwvif; 5643 inst->mac_id = mac_id + index * support_mld_num; 5644 inst->mac_idx = RTW89_MAC_0 + index; 5645 inst->phy_idx = RTW89_PHY_0 + index; 5646 5647 /* multi-link use the same port id on different HW bands */ 5648 inst->port = port; 5649 } 5650 } 5651 5652 void rtw89_init_sta(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 5653 struct rtw89_sta *rtwsta, u8 mac_id) 5654 { 5655 const struct rtw89_chip_info *chip = rtwdev->chip; 5656 u8 support_link_num = chip->support_link_num; 5657 u8 support_mld_num = 0; 5658 unsigned int link_id; 5659 u8 index; 5660 5661 bitmap_zero(rtwsta->links_inst_map, __RTW89_MLD_MAX_LINK_NUM); 5662 for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) 5663 rtwsta->links[link_id] = NULL; 5664 5665 rtwsta->rtwdev = rtwdev; 5666 rtwsta->rtwvif = rtwvif; 5667 5668 if (rtwdev->support_mlo) { 5669 rtwsta->links_inst_valid_num = support_link_num; 5670 support_mld_num = chip->support_macid_num / support_link_num; 5671 } else { 5672 rtwsta->links_inst_valid_num = 1; 5673 } 5674 5675 for (index = 0; index < rtwsta->links_inst_valid_num; index++) { 5676 struct rtw89_sta_link *inst = &rtwsta->links_inst[index]; 5677 5678 inst->rtwvif_link = &rtwvif->links_inst[index]; 5679 5680 inst->rtwsta = rtwsta; 5681 inst->mac_id = mac_id + index * support_mld_num; 5682 } 5683 } 5684 5685 struct rtw89_vif_link *rtw89_vif_set_link(struct rtw89_vif *rtwvif, 5686 unsigned int link_id) 5687 { 5688 struct rtw89_vif_link *rtwvif_link = rtwvif->links[link_id]; 5689 u8 index; 5690 int ret; 5691 5692 if (rtwvif_link) 5693 return rtwvif_link; 5694 5695 index = find_first_zero_bit(rtwvif->links_inst_map, 5696 rtwvif->links_inst_valid_num); 5697 if (index == rtwvif->links_inst_valid_num) { 5698 ret = -EBUSY; 5699 goto err; 5700 } 5701 5702 rtwvif_link = &rtwvif->links_inst[index]; 5703 rtwvif_link->link_id = link_id; 5704 5705 set_bit(index, rtwvif->links_inst_map); 5706 rtwvif->links[link_id] = rtwvif_link; 5707 list_add_tail(&rtwvif_link->dlink_schd, &rtwvif->dlink_pool); 5708 return rtwvif_link; 5709 5710 err: 5711 rtw89_err(rtwvif->rtwdev, "vif (link_id %u) failed to set link: %d\n", 5712 link_id, ret); 5713 return NULL; 5714 } 5715 5716 void rtw89_vif_unset_link(struct rtw89_vif *rtwvif, unsigned int link_id) 5717 { 5718 struct rtw89_vif_link **container = &rtwvif->links[link_id]; 5719 struct rtw89_vif_link *link = *container; 5720 u8 index; 5721 5722 if (!link) 5723 return; 5724 5725 index = rtw89_vif_link_inst_get_index(link); 5726 clear_bit(index, rtwvif->links_inst_map); 5727 *container = NULL; 5728 list_del(&link->dlink_schd); 5729 } 5730 5731 struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta, 5732 unsigned int link_id) 5733 { 5734 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 5735 struct rtw89_vif_link *rtwvif_link = rtwvif->links[link_id]; 5736 struct rtw89_sta_link *rtwsta_link = rtwsta->links[link_id]; 5737 u8 index; 5738 int ret; 5739 5740 if (rtwsta_link) 5741 return rtwsta_link; 5742 5743 if (!rtwvif_link) { 5744 ret = -ENOLINK; 5745 goto err; 5746 } 5747 5748 index = rtw89_vif_link_inst_get_index(rtwvif_link); 5749 if (test_bit(index, rtwsta->links_inst_map)) { 5750 ret = -EBUSY; 5751 goto err; 5752 } 5753 5754 rtwsta_link = &rtwsta->links_inst[index]; 5755 rtwsta_link->link_id = link_id; 5756 5757 set_bit(index, rtwsta->links_inst_map); 5758 rtwsta->links[link_id] = rtwsta_link; 5759 list_add_tail(&rtwsta_link->dlink_schd, &rtwsta->dlink_pool); 5760 return rtwsta_link; 5761 5762 err: 5763 rtw89_err(rtwsta->rtwdev, "sta (link_id %u) failed to set link: %d\n", 5764 link_id, ret); 5765 return NULL; 5766 } 5767 5768 void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id) 5769 { 5770 struct rtw89_sta_link **container = &rtwsta->links[link_id]; 5771 struct rtw89_sta_link *link = *container; 5772 u8 index; 5773 5774 if (!link) 5775 return; 5776 5777 index = rtw89_sta_link_inst_get_index(link); 5778 clear_bit(index, rtwsta->links_inst_map); 5779 *container = NULL; 5780 list_del(&link->dlink_schd); 5781 } 5782 5783 int rtw89_core_init(struct rtw89_dev *rtwdev) 5784 { 5785 struct rtw89_btc *btc = &rtwdev->btc; 5786 u8 band; 5787 5788 INIT_LIST_HEAD(&rtwdev->ba_list); 5789 INIT_LIST_HEAD(&rtwdev->forbid_ba_list); 5790 INIT_LIST_HEAD(&rtwdev->rtwvifs_list); 5791 INIT_LIST_HEAD(&rtwdev->early_h2c_list); 5792 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 5793 if (!(rtwdev->chip->support_bands & BIT(band))) 5794 continue; 5795 INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]); 5796 } 5797 INIT_LIST_HEAD(&rtwdev->scan_info.chan_list); 5798 INIT_LIST_HEAD(&rtwdev->tx_waits); 5799 INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work); 5800 INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); 5801 INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); 5802 wiphy_delayed_work_init(&rtwdev->track_work, rtw89_track_work); 5803 wiphy_delayed_work_init(&rtwdev->track_ps_work, rtw89_track_ps_work); 5804 wiphy_delayed_work_init(&rtwdev->chanctx_work, rtw89_chanctx_work); 5805 wiphy_delayed_work_init(&rtwdev->coex_act1_work, rtw89_coex_act1_work); 5806 wiphy_delayed_work_init(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work); 5807 wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); 5808 wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work); 5809 wiphy_delayed_work_init(&rtwdev->mcc_prepare_done_work, rtw89_mcc_prepare_done_work); 5810 wiphy_delayed_work_init(&rtwdev->tx_wait_work, rtw89_tx_wait_work); 5811 INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work); 5812 wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work); 5813 rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); 5814 if (!rtwdev->txq_wq) 5815 return -ENOMEM; 5816 spin_lock_init(&rtwdev->ba_lock); 5817 spin_lock_init(&rtwdev->rpwm_lock); 5818 mutex_init(&rtwdev->rf_mutex); 5819 rtwdev->total_sta_assoc = 0; 5820 5821 rtw89_init_wait(&rtwdev->mcc.wait); 5822 rtw89_init_wait(&rtwdev->mlo.wait); 5823 rtw89_init_wait(&rtwdev->mac.fw_ofld_wait); 5824 rtw89_init_wait(&rtwdev->wow.wait); 5825 rtw89_init_wait(&rtwdev->mac.ps_wait); 5826 5827 wiphy_work_init(&rtwdev->c2h_work, rtw89_fw_c2h_work); 5828 wiphy_work_init(&rtwdev->ips_work, rtw89_ips_work); 5829 wiphy_work_init(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work); 5830 INIT_WORK(&rtwdev->load_firmware_work, rtw89_load_firmware_work); 5831 5832 skb_queue_head_init(&rtwdev->c2h_queue); 5833 rtw89_core_ppdu_sts_init(rtwdev); 5834 rtw89_traffic_stats_init(rtwdev, &rtwdev->stats); 5835 5836 rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR; 5837 rtwdev->dbcc_en = false; 5838 rtwdev->mlo_dbcc_mode = MLO_DBCC_NOT_SUPPORT; 5839 rtwdev->mac.qta_mode = RTW89_QTA_SCC; 5840 5841 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 5842 rtwdev->dbcc_en = true; 5843 rtwdev->mac.qta_mode = RTW89_QTA_DBCC; 5844 rtwdev->mlo_dbcc_mode = MLO_1_PLUS_1_1RF; 5845 } 5846 5847 rtwdev->bbs[RTW89_PHY_0].phy_idx = RTW89_PHY_0; 5848 rtwdev->bbs[RTW89_PHY_1].phy_idx = RTW89_PHY_1; 5849 5850 wiphy_work_init(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work); 5851 wiphy_work_init(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work); 5852 wiphy_work_init(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work); 5853 wiphy_work_init(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work); 5854 5855 init_completion(&rtwdev->fw.req.completion); 5856 init_completion(&rtwdev->rfk_wait.completion); 5857 5858 schedule_work(&rtwdev->load_firmware_work); 5859 5860 rtw89_ser_init(rtwdev); 5861 rtw89_entity_init(rtwdev); 5862 rtw89_sar_init(rtwdev); 5863 rtw89_phy_ant_gain_init(rtwdev); 5864 5865 return 0; 5866 } 5867 EXPORT_SYMBOL(rtw89_core_init); 5868 5869 void rtw89_core_deinit(struct rtw89_dev *rtwdev) 5870 { 5871 rtw89_ser_deinit(rtwdev); 5872 rtw89_unload_firmware(rtwdev); 5873 __rtw89_fw_free_all_early_h2c(rtwdev); 5874 5875 destroy_workqueue(rtwdev->txq_wq); 5876 mutex_destroy(&rtwdev->rf_mutex); 5877 } 5878 EXPORT_SYMBOL(rtw89_core_deinit); 5879 5880 void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 5881 const u8 *mac_addr, bool hw_scan) 5882 { 5883 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5884 rtwvif_link->chanctx_idx); 5885 struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx); 5886 5887 rtwdev->scanning = true; 5888 5889 ether_addr_copy(rtwvif_link->mac_addr, mac_addr); 5890 rtw89_btc_ntfy_scan_start(rtwdev, rtwvif_link->phy_idx, chan->band_type); 5891 rtw89_chip_rfk_scan(rtwdev, rtwvif_link, true); 5892 rtw89_hci_recalc_int_mit(rtwdev); 5893 rtw89_phy_config_edcca(rtwdev, bb, true); 5894 rtw89_tas_scan(rtwdev, true); 5895 5896 rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, mac_addr); 5897 } 5898 5899 void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, 5900 struct rtw89_vif_link *rtwvif_link, bool hw_scan) 5901 { 5902 struct ieee80211_bss_conf *bss_conf; 5903 struct rtw89_bb_ctx *bb; 5904 int ret; 5905 5906 if (!rtwvif_link) 5907 return; 5908 5909 rcu_read_lock(); 5910 5911 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 5912 ether_addr_copy(rtwvif_link->mac_addr, bss_conf->addr); 5913 5914 rcu_read_unlock(); 5915 5916 rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL); 5917 5918 rtw89_chip_rfk_scan(rtwdev, rtwvif_link, false); 5919 rtw89_btc_ntfy_scan_finish(rtwdev, rtwvif_link->phy_idx); 5920 bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx); 5921 rtw89_phy_config_edcca(rtwdev, bb, false); 5922 rtw89_tas_scan(rtwdev, false); 5923 5924 if (hw_scan) { 5925 ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, false, false, 5926 RTW89_SCAN_NULL_TIMEOUT); 5927 if (ret) 5928 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 5929 "scan send null-0 failed: %d\n", ret); 5930 } 5931 5932 rtwdev->scanning = false; 5933 rtw89_for_each_active_bb(rtwdev, bb) 5934 bb->dig.bypass_dig = true; 5935 if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) 5936 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->ips_work); 5937 } 5938 5939 static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev) 5940 { 5941 const struct rtw89_chip_info *chip = rtwdev->chip; 5942 int ret; 5943 u8 val; 5944 u8 cv; 5945 5946 cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK); 5947 if (chip->chip_id == RTL8852A && cv <= CHIP_CBV) { 5948 if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD) 5949 cv = CHIP_CAV; 5950 else 5951 cv = CHIP_CBV; 5952 } 5953 5954 rtwdev->hal.cv = cv; 5955 5956 if (rtw89_is_rtl885xb(rtwdev)) { 5957 ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CV, &val); 5958 if (ret) 5959 return; 5960 5961 rtwdev->hal.acv = u8_get_bits(val, XTAL_SI_ACV_MASK); 5962 } 5963 } 5964 5965 static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev) 5966 { 5967 const struct rtw89_chip_info *chip = rtwdev->chip; 5968 5969 rtwdev->hal.support_cckpd = 5970 !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) && 5971 !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV); 5972 rtwdev->hal.support_igi = 5973 rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV; 5974 5975 if (test_bit(RTW89_QUIRK_THERMAL_PROT_120C, rtwdev->quirks)) 5976 rtwdev->hal.thermal_prot_th = chip->thermal_th[1]; 5977 else if (test_bit(RTW89_QUIRK_THERMAL_PROT_110C, rtwdev->quirks)) 5978 rtwdev->hal.thermal_prot_th = chip->thermal_th[0]; 5979 else 5980 rtwdev->hal.thermal_prot_th = 0; 5981 } 5982 5983 static void rtw89_core_setup_rfe_parms(struct rtw89_dev *rtwdev) 5984 { 5985 const struct rtw89_chip_info *chip = rtwdev->chip; 5986 const struct rtw89_rfe_parms_conf *conf = chip->rfe_parms_conf; 5987 struct rtw89_efuse *efuse = &rtwdev->efuse; 5988 const struct rtw89_rfe_parms *sel; 5989 u8 rfe_type = efuse->rfe_type; 5990 5991 if (!conf) { 5992 sel = chip->dflt_parms; 5993 goto out; 5994 } 5995 5996 while (conf->rfe_parms) { 5997 if (rfe_type == conf->rfe_type) { 5998 sel = conf->rfe_parms; 5999 goto out; 6000 } 6001 conf++; 6002 } 6003 6004 sel = chip->dflt_parms; 6005 6006 out: 6007 rtwdev->rfe_parms = rtw89_load_rfe_data_from_fw(rtwdev, sel); 6008 rtw89_load_txpwr_table(rtwdev, rtwdev->rfe_parms->byr_tbl); 6009 } 6010 6011 int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6012 unsigned int link_id) 6013 { 6014 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 6015 u16 usable_links = ieee80211_vif_usable_links(vif); 6016 u16 active_links = vif->active_links; 6017 struct rtw89_vif_link *target, *cur; 6018 int ret; 6019 6020 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6021 6022 if (unlikely(!ieee80211_vif_is_mld(vif))) 6023 return -EOPNOTSUPP; 6024 6025 if (unlikely(link_id >= IEEE80211_MLD_MAX_NUM_LINKS || 6026 !(usable_links & BIT(link_id)))) { 6027 rtw89_warn(rtwdev, "%s: link id %u is not usable\n", __func__, 6028 link_id); 6029 return -ENOLINK; 6030 } 6031 6032 if (active_links == BIT(link_id)) 6033 return 0; 6034 6035 rtw89_debug(rtwdev, RTW89_DBG_STATE, "%s: switch to link id %u MLSR\n", 6036 __func__, link_id); 6037 6038 rtw89_leave_lps(rtwdev); 6039 6040 ieee80211_stop_queues(rtwdev->hw); 6041 flush_work(&rtwdev->txq_work); 6042 6043 cur = rtw89_get_designated_link(rtwvif); 6044 6045 ret = ieee80211_set_active_links(vif, active_links | BIT(link_id)); 6046 if (ret) { 6047 rtw89_err(rtwdev, "%s: failed to activate link id %u\n", 6048 __func__, link_id); 6049 goto wake_queue; 6050 } 6051 6052 target = rtwvif->links[link_id]; 6053 if (unlikely(!target)) { 6054 rtw89_err(rtwdev, "%s: failed to confirm link id %u\n", 6055 __func__, link_id); 6056 6057 ieee80211_set_active_links(vif, active_links); 6058 ret = -EFAULT; 6059 goto wake_queue; 6060 } 6061 6062 if (likely(cur)) 6063 rtw89_fw_h2c_mlo_link_cfg(rtwdev, cur, false); 6064 6065 rtw89_fw_h2c_mlo_link_cfg(rtwdev, target, true); 6066 6067 ret = ieee80211_set_active_links(vif, BIT(link_id)); 6068 if (ret) 6069 rtw89_err(rtwdev, "%s: failed to inactivate links 0x%x\n", 6070 __func__, active_links); 6071 6072 rtw89_chip_rfk_channel(rtwdev, target); 6073 6074 rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR; 6075 6076 wake_queue: 6077 ieee80211_wake_queues(rtwdev->hw); 6078 6079 return ret; 6080 } 6081 6082 static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) 6083 { 6084 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6085 int ret; 6086 6087 ret = rtw89_mac_partial_init(rtwdev, false); 6088 if (ret) 6089 return ret; 6090 6091 ret = mac->parse_efuse_map(rtwdev); 6092 if (ret) 6093 return ret; 6094 6095 ret = mac->parse_phycap_map(rtwdev); 6096 if (ret) 6097 return ret; 6098 6099 ret = rtw89_mac_setup_phycap(rtwdev); 6100 if (ret) 6101 return ret; 6102 6103 rtw89_core_setup_phycap(rtwdev); 6104 6105 rtw89_hci_mac_pre_deinit(rtwdev); 6106 6107 return 0; 6108 } 6109 6110 static int rtw89_chip_board_info_setup(struct rtw89_dev *rtwdev) 6111 { 6112 rtw89_chip_fem_setup(rtwdev); 6113 6114 return 0; 6115 } 6116 6117 static bool rtw89_chip_has_rfkill(struct rtw89_dev *rtwdev) 6118 { 6119 return !!rtwdev->chip->rfkill_init; 6120 } 6121 6122 static void rtw89_core_rfkill_init(struct rtw89_dev *rtwdev) 6123 { 6124 const struct rtw89_rfkill_regs *regs = rtwdev->chip->rfkill_init; 6125 6126 rtw89_write16_mask(rtwdev, regs->pinmux.addr, 6127 regs->pinmux.mask, regs->pinmux.data); 6128 rtw89_write16_mask(rtwdev, regs->mode.addr, 6129 regs->mode.mask, regs->mode.data); 6130 } 6131 6132 static bool rtw89_core_rfkill_get(struct rtw89_dev *rtwdev) 6133 { 6134 const struct rtw89_reg_def *reg = &rtwdev->chip->rfkill_get; 6135 6136 return !rtw89_read8_mask(rtwdev, reg->addr, reg->mask); 6137 } 6138 6139 static void rtw89_rfkill_polling_init(struct rtw89_dev *rtwdev) 6140 { 6141 if (!rtw89_chip_has_rfkill(rtwdev)) 6142 return; 6143 6144 rtw89_core_rfkill_init(rtwdev); 6145 rtw89_core_rfkill_poll(rtwdev, true); 6146 wiphy_rfkill_start_polling(rtwdev->hw->wiphy); 6147 } 6148 6149 static void rtw89_rfkill_polling_deinit(struct rtw89_dev *rtwdev) 6150 { 6151 if (!rtw89_chip_has_rfkill(rtwdev)) 6152 return; 6153 6154 wiphy_rfkill_stop_polling(rtwdev->hw->wiphy); 6155 } 6156 6157 void rtw89_core_rfkill_poll(struct rtw89_dev *rtwdev, bool force) 6158 { 6159 bool prev, blocked; 6160 6161 if (!rtw89_chip_has_rfkill(rtwdev)) 6162 return; 6163 6164 prev = test_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags); 6165 blocked = rtw89_core_rfkill_get(rtwdev); 6166 6167 if (!force && prev == blocked) 6168 return; 6169 6170 rtw89_info(rtwdev, "rfkill hardware state changed to %s\n", 6171 blocked ? "disable" : "enable"); 6172 6173 if (blocked) 6174 set_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags); 6175 else 6176 clear_bit(RTW89_FLAG_HW_RFKILL_STATE, rtwdev->flags); 6177 6178 wiphy_rfkill_set_hw_state(rtwdev->hw->wiphy, blocked); 6179 } 6180 6181 int rtw89_chip_info_setup(struct rtw89_dev *rtwdev) 6182 { 6183 int ret; 6184 6185 rtw89_read_chip_ver(rtwdev); 6186 6187 ret = rtw89_mac_pwr_on(rtwdev); 6188 if (ret) { 6189 rtw89_err(rtwdev, "failed to power on\n"); 6190 return ret; 6191 } 6192 6193 ret = rtw89_wait_firmware_completion(rtwdev); 6194 if (ret) { 6195 rtw89_err(rtwdev, "failed to wait firmware completion\n"); 6196 goto out; 6197 } 6198 6199 ret = rtw89_fw_recognize(rtwdev); 6200 if (ret) { 6201 rtw89_err(rtwdev, "failed to recognize firmware\n"); 6202 goto out; 6203 } 6204 6205 ret = rtw89_chip_efuse_info_setup(rtwdev); 6206 if (ret) 6207 goto out; 6208 6209 ret = rtw89_fw_recognize_elements(rtwdev); 6210 if (ret) { 6211 rtw89_err(rtwdev, "failed to recognize firmware elements\n"); 6212 goto out; 6213 } 6214 6215 ret = rtw89_chip_board_info_setup(rtwdev); 6216 if (ret) 6217 goto out; 6218 6219 rtw89_core_setup_rfe_parms(rtwdev); 6220 rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev); 6221 6222 out: 6223 rtw89_mac_pwr_off(rtwdev); 6224 6225 return ret; 6226 } 6227 EXPORT_SYMBOL(rtw89_chip_info_setup); 6228 6229 void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, 6230 struct rtw89_vif_link *rtwvif_link) 6231 { 6232 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 6233 const struct rtw89_chip_info *chip = rtwdev->chip; 6234 struct ieee80211_bss_conf *bss_conf; 6235 6236 rcu_read_lock(); 6237 6238 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 6239 if (!bss_conf->he_support || !vif->cfg.assoc) { 6240 rcu_read_unlock(); 6241 return; 6242 } 6243 6244 rcu_read_unlock(); 6245 6246 if (chip->ops->set_txpwr_ul_tb_offset) 6247 chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif_link->mac_idx); 6248 } 6249 6250 static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) 6251 { 6252 const struct rtw89_chip_info *chip = rtwdev->chip; 6253 u8 n = rtwdev->support_mlo ? chip->support_link_num : 1; 6254 struct ieee80211_hw *hw = rtwdev->hw; 6255 struct rtw89_efuse *efuse = &rtwdev->efuse; 6256 struct rtw89_hal *hal = &rtwdev->hal; 6257 int ret; 6258 int tx_headroom = IEEE80211_HT_CTL_LEN; 6259 6260 if (rtwdev->hci.type == RTW89_HCI_TYPE_USB) 6261 tx_headroom += chip->txwd_body_size + chip->txwd_info_size; 6262 6263 hw->vif_data_size = struct_size_t(struct rtw89_vif, links_inst, n); 6264 hw->sta_data_size = struct_size_t(struct rtw89_sta, links_inst, n); 6265 hw->txq_data_size = sizeof(struct rtw89_txq); 6266 hw->chanctx_data_size = sizeof(struct rtw89_chanctx_cfg); 6267 6268 SET_IEEE80211_PERM_ADDR(hw, efuse->addr); 6269 6270 hw->extra_tx_headroom = tx_headroom; 6271 hw->queues = IEEE80211_NUM_ACS; 6272 hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM; 6273 hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM; 6274 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; 6275 6276 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | 6277 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 6278 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC; 6279 6280 ieee80211_hw_set(hw, SIGNAL_DBM); 6281 ieee80211_hw_set(hw, HAS_RATE_CONTROL); 6282 ieee80211_hw_set(hw, MFP_CAPABLE); 6283 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 6284 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 6285 ieee80211_hw_set(hw, RX_INCLUDES_FCS); 6286 ieee80211_hw_set(hw, TX_AMSDU); 6287 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 6288 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 6289 ieee80211_hw_set(hw, SUPPORTS_PS); 6290 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 6291 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 6292 ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); 6293 ieee80211_hw_set(hw, WANT_MONITOR_VIF); 6294 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 6295 6296 if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) 6297 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); 6298 6299 if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 6300 ieee80211_hw_set(hw, CONNECTION_MONITOR); 6301 6302 if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) 6303 ieee80211_hw_set(hw, AP_LINK_PS); 6304 6305 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 6306 BIT(NL80211_IFTYPE_AP) | 6307 BIT(NL80211_IFTYPE_P2P_CLIENT) | 6308 BIT(NL80211_IFTYPE_P2P_GO); 6309 6310 if (hal->ant_diversity) { 6311 hw->wiphy->available_antennas_tx = 0x3; 6312 hw->wiphy->available_antennas_rx = 0x3; 6313 } else { 6314 hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1; 6315 hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1; 6316 } 6317 6318 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | 6319 WIPHY_FLAG_TDLS_EXTERNAL_SETUP | 6320 WIPHY_FLAG_AP_UAPSD | 6321 WIPHY_FLAG_HAS_CHANNEL_SWITCH | 6322 WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; 6323 6324 if (!chip->support_rnr) 6325 hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; 6326 6327 if (chip->chip_gen == RTW89_CHIP_BE) 6328 hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; 6329 6330 if (rtwdev->support_mlo) { 6331 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; 6332 hw->wiphy->iftype_ext_capab = rtw89_iftypes_ext_capa; 6333 hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(rtw89_iftypes_ext_capa); 6334 } 6335 6336 hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 6337 6338 hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID; 6339 hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN; 6340 6341 #ifdef CONFIG_PM 6342 hw->wiphy->wowlan = rtwdev->chip->wowlan_stub; 6343 hw->wiphy->max_sched_scan_ssids = RTW89_SCANOFLD_MAX_SSID; 6344 #endif 6345 6346 hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL); 6347 hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL); 6348 hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL); 6349 hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL); 6350 hw->wiphy->max_remain_on_channel_duration = 1000; 6351 6352 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); 6353 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); 6354 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); 6355 6356 ret = rtw89_core_set_supported_band(rtwdev); 6357 if (ret) { 6358 rtw89_err(rtwdev, "failed to set supported band\n"); 6359 return ret; 6360 } 6361 6362 ret = rtw89_regd_setup(rtwdev); 6363 if (ret) { 6364 rtw89_err(rtwdev, "failed to set up regd\n"); 6365 return ret; 6366 } 6367 6368 hw->wiphy->sar_capa = &rtw89_sar_capa; 6369 6370 ret = ieee80211_register_hw(hw); 6371 if (ret) { 6372 rtw89_err(rtwdev, "failed to register hw\n"); 6373 return ret; 6374 } 6375 6376 ret = rtw89_regd_init_hint(rtwdev); 6377 if (ret) { 6378 rtw89_err(rtwdev, "failed to init regd\n"); 6379 goto err_unregister_hw; 6380 } 6381 6382 rtw89_rfkill_polling_init(rtwdev); 6383 6384 return 0; 6385 6386 err_unregister_hw: 6387 ieee80211_unregister_hw(hw); 6388 6389 return ret; 6390 } 6391 6392 static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev) 6393 { 6394 struct ieee80211_hw *hw = rtwdev->hw; 6395 6396 rtw89_rfkill_polling_deinit(rtwdev); 6397 ieee80211_unregister_hw(hw); 6398 } 6399 6400 int rtw89_core_register(struct rtw89_dev *rtwdev) 6401 { 6402 int ret; 6403 6404 ret = rtw89_core_register_hw(rtwdev); 6405 if (ret) { 6406 rtw89_err(rtwdev, "failed to register core hw\n"); 6407 return ret; 6408 } 6409 6410 rtw89_phy_dm_init_data(rtwdev); 6411 rtw89_debugfs_init(rtwdev); 6412 6413 return 0; 6414 } 6415 EXPORT_SYMBOL(rtw89_core_register); 6416 6417 void rtw89_core_unregister(struct rtw89_dev *rtwdev) 6418 { 6419 rtw89_core_unregister_hw(rtwdev); 6420 6421 rtw89_debugfs_deinit(rtwdev); 6422 } 6423 EXPORT_SYMBOL(rtw89_core_unregister); 6424 6425 struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, 6426 u32 bus_data_size, 6427 const struct rtw89_chip_info *chip, 6428 const struct rtw89_chip_variant *variant) 6429 { 6430 struct rtw89_fw_info early_fw = {}; 6431 const struct firmware *firmware; 6432 struct ieee80211_hw *hw; 6433 struct rtw89_dev *rtwdev; 6434 struct ieee80211_ops *ops; 6435 u32 driver_data_size; 6436 int fw_format = -1; 6437 bool support_mlo; 6438 bool no_chanctx; 6439 6440 firmware = rtw89_early_fw_feature_recognize(device, chip, &early_fw, &fw_format); 6441 6442 ops = kmemdup(&rtw89_ops, sizeof(rtw89_ops), GFP_KERNEL); 6443 if (!ops) 6444 goto err; 6445 6446 no_chanctx = chip->support_chanctx_num == 0 || 6447 !RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &early_fw) || 6448 !RTW89_CHK_FW_FEATURE(BEACON_FILTER, &early_fw); 6449 6450 if (no_chanctx) { 6451 ops->add_chanctx = ieee80211_emulate_add_chanctx; 6452 ops->remove_chanctx = ieee80211_emulate_remove_chanctx; 6453 ops->change_chanctx = ieee80211_emulate_change_chanctx; 6454 ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx; 6455 ops->assign_vif_chanctx = NULL; 6456 ops->unassign_vif_chanctx = NULL; 6457 ops->remain_on_channel = NULL; 6458 ops->cancel_remain_on_channel = NULL; 6459 } 6460 6461 if (!chip->support_noise) 6462 ops->get_survey = NULL; 6463 6464 driver_data_size = sizeof(struct rtw89_dev) + bus_data_size; 6465 hw = ieee80211_alloc_hw(driver_data_size, ops); 6466 if (!hw) 6467 goto err; 6468 6469 /* Currently, our AP_LINK_PS handling only works for non-MLD softap 6470 * or MLD-single-link softap. If RTW89_MLD_NON_STA_LINK_NUM enlarges, 6471 * please tweak entire AP_LINKS_PS handling before supporting MLO. 6472 */ 6473 support_mlo = !no_chanctx && chip->support_link_num && 6474 RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &early_fw) && 6475 RTW89_MLD_NON_STA_LINK_NUM == 1; 6476 6477 hw->wiphy->iface_combinations = rtw89_iface_combs; 6478 6479 if (no_chanctx || chip->support_chanctx_num == 1) 6480 hw->wiphy->n_iface_combinations = 1; 6481 else 6482 hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw89_iface_combs); 6483 6484 rtwdev = hw->priv; 6485 rtwdev->hw = hw; 6486 rtwdev->dev = device; 6487 rtwdev->ops = ops; 6488 rtwdev->chip = chip; 6489 rtwdev->variant = variant; 6490 rtwdev->fw.req.firmware = firmware; 6491 rtwdev->fw.fw_format = fw_format; 6492 rtwdev->support_mlo = support_mlo; 6493 6494 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s chanctx\n", 6495 no_chanctx ? "without" : "with"); 6496 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s MLO cap\n", 6497 support_mlo ? "with" : "without"); 6498 6499 return rtwdev; 6500 6501 err: 6502 kfree(ops); 6503 release_firmware(firmware); 6504 return NULL; 6505 } 6506 EXPORT_SYMBOL(rtw89_alloc_ieee80211_hw); 6507 6508 void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev) 6509 { 6510 kfree(rtwdev->ops); 6511 kfree(rtwdev->rfe_data); 6512 release_firmware(rtwdev->fw.req.firmware); 6513 ieee80211_free_hw(rtwdev->hw); 6514 } 6515 EXPORT_SYMBOL(rtw89_free_ieee80211_hw); 6516 6517 MODULE_AUTHOR("Realtek Corporation"); 6518 MODULE_DESCRIPTION("Realtek 802.11ax wireless core module"); 6519 MODULE_LICENSE("Dual BSD/GPL"); 6520