1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #if defined(__FreeBSD__) 6 #define LINUXKPI_PARAM_PREFIX rtw89_ 7 #endif 8 9 #include <linux/ip.h> 10 #include <linux/udp.h> 11 12 #include "cam.h" 13 #include "chan.h" 14 #include "coex.h" 15 #include "core.h" 16 #include "efuse.h" 17 #include "fw.h" 18 #include "mac.h" 19 #include "phy.h" 20 #include "ps.h" 21 #include "reg.h" 22 #include "sar.h" 23 #include "ser.h" 24 #include "txrx.h" 25 #include "util.h" 26 27 static bool rtw89_disable_ps_mode; 28 module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644); 29 MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode"); 30 31 #define RTW89_DEF_CHAN(_freq, _hw_val, _flags, _band) \ 32 { .center_freq = _freq, .hw_value = _hw_val, .flags = _flags, .band = _band, } 33 #define RTW89_DEF_CHAN_2G(_freq, _hw_val) \ 34 RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_2GHZ) 35 #define RTW89_DEF_CHAN_5G(_freq, _hw_val) \ 36 RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_5GHZ) 37 #define RTW89_DEF_CHAN_5G_NO_HT40MINUS(_freq, _hw_val) \ 38 RTW89_DEF_CHAN(_freq, _hw_val, IEEE80211_CHAN_NO_HT40MINUS, NL80211_BAND_5GHZ) 39 #define RTW89_DEF_CHAN_6G(_freq, _hw_val) \ 40 RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_6GHZ) 41 42 static struct ieee80211_channel rtw89_channels_2ghz[] = { 43 RTW89_DEF_CHAN_2G(2412, 1), 44 RTW89_DEF_CHAN_2G(2417, 2), 45 RTW89_DEF_CHAN_2G(2422, 3), 46 RTW89_DEF_CHAN_2G(2427, 4), 47 RTW89_DEF_CHAN_2G(2432, 5), 48 RTW89_DEF_CHAN_2G(2437, 6), 49 RTW89_DEF_CHAN_2G(2442, 7), 50 RTW89_DEF_CHAN_2G(2447, 8), 51 RTW89_DEF_CHAN_2G(2452, 9), 52 RTW89_DEF_CHAN_2G(2457, 10), 53 RTW89_DEF_CHAN_2G(2462, 11), 54 RTW89_DEF_CHAN_2G(2467, 12), 55 RTW89_DEF_CHAN_2G(2472, 13), 56 RTW89_DEF_CHAN_2G(2484, 14), 57 }; 58 59 static struct ieee80211_channel rtw89_channels_5ghz[] = { 60 RTW89_DEF_CHAN_5G(5180, 36), 61 RTW89_DEF_CHAN_5G(5200, 40), 62 RTW89_DEF_CHAN_5G(5220, 44), 63 RTW89_DEF_CHAN_5G(5240, 48), 64 RTW89_DEF_CHAN_5G(5260, 52), 65 RTW89_DEF_CHAN_5G(5280, 56), 66 RTW89_DEF_CHAN_5G(5300, 60), 67 RTW89_DEF_CHAN_5G(5320, 64), 68 RTW89_DEF_CHAN_5G(5500, 100), 69 RTW89_DEF_CHAN_5G(5520, 104), 70 RTW89_DEF_CHAN_5G(5540, 108), 71 RTW89_DEF_CHAN_5G(5560, 112), 72 RTW89_DEF_CHAN_5G(5580, 116), 73 RTW89_DEF_CHAN_5G(5600, 120), 74 RTW89_DEF_CHAN_5G(5620, 124), 75 RTW89_DEF_CHAN_5G(5640, 128), 76 RTW89_DEF_CHAN_5G(5660, 132), 77 RTW89_DEF_CHAN_5G(5680, 136), 78 RTW89_DEF_CHAN_5G(5700, 140), 79 RTW89_DEF_CHAN_5G(5720, 144), 80 RTW89_DEF_CHAN_5G(5745, 149), 81 RTW89_DEF_CHAN_5G(5765, 153), 82 RTW89_DEF_CHAN_5G(5785, 157), 83 RTW89_DEF_CHAN_5G(5805, 161), 84 RTW89_DEF_CHAN_5G_NO_HT40MINUS(5825, 165), 85 }; 86 87 static struct ieee80211_channel rtw89_channels_6ghz[] = { 88 RTW89_DEF_CHAN_6G(5955, 1), 89 RTW89_DEF_CHAN_6G(5975, 5), 90 RTW89_DEF_CHAN_6G(5995, 9), 91 RTW89_DEF_CHAN_6G(6015, 13), 92 RTW89_DEF_CHAN_6G(6035, 17), 93 RTW89_DEF_CHAN_6G(6055, 21), 94 RTW89_DEF_CHAN_6G(6075, 25), 95 RTW89_DEF_CHAN_6G(6095, 29), 96 RTW89_DEF_CHAN_6G(6115, 33), 97 RTW89_DEF_CHAN_6G(6135, 37), 98 RTW89_DEF_CHAN_6G(6155, 41), 99 RTW89_DEF_CHAN_6G(6175, 45), 100 RTW89_DEF_CHAN_6G(6195, 49), 101 RTW89_DEF_CHAN_6G(6215, 53), 102 RTW89_DEF_CHAN_6G(6235, 57), 103 RTW89_DEF_CHAN_6G(6255, 61), 104 RTW89_DEF_CHAN_6G(6275, 65), 105 RTW89_DEF_CHAN_6G(6295, 69), 106 RTW89_DEF_CHAN_6G(6315, 73), 107 RTW89_DEF_CHAN_6G(6335, 77), 108 RTW89_DEF_CHAN_6G(6355, 81), 109 RTW89_DEF_CHAN_6G(6375, 85), 110 RTW89_DEF_CHAN_6G(6395, 89), 111 RTW89_DEF_CHAN_6G(6415, 93), 112 RTW89_DEF_CHAN_6G(6435, 97), 113 RTW89_DEF_CHAN_6G(6455, 101), 114 RTW89_DEF_CHAN_6G(6475, 105), 115 RTW89_DEF_CHAN_6G(6495, 109), 116 RTW89_DEF_CHAN_6G(6515, 113), 117 RTW89_DEF_CHAN_6G(6535, 117), 118 RTW89_DEF_CHAN_6G(6555, 121), 119 RTW89_DEF_CHAN_6G(6575, 125), 120 RTW89_DEF_CHAN_6G(6595, 129), 121 RTW89_DEF_CHAN_6G(6615, 133), 122 RTW89_DEF_CHAN_6G(6635, 137), 123 RTW89_DEF_CHAN_6G(6655, 141), 124 RTW89_DEF_CHAN_6G(6675, 145), 125 RTW89_DEF_CHAN_6G(6695, 149), 126 RTW89_DEF_CHAN_6G(6715, 153), 127 RTW89_DEF_CHAN_6G(6735, 157), 128 RTW89_DEF_CHAN_6G(6755, 161), 129 RTW89_DEF_CHAN_6G(6775, 165), 130 RTW89_DEF_CHAN_6G(6795, 169), 131 RTW89_DEF_CHAN_6G(6815, 173), 132 RTW89_DEF_CHAN_6G(6835, 177), 133 RTW89_DEF_CHAN_6G(6855, 181), 134 RTW89_DEF_CHAN_6G(6875, 185), 135 RTW89_DEF_CHAN_6G(6895, 189), 136 RTW89_DEF_CHAN_6G(6915, 193), 137 RTW89_DEF_CHAN_6G(6935, 197), 138 RTW89_DEF_CHAN_6G(6955, 201), 139 RTW89_DEF_CHAN_6G(6975, 205), 140 RTW89_DEF_CHAN_6G(6995, 209), 141 RTW89_DEF_CHAN_6G(7015, 213), 142 RTW89_DEF_CHAN_6G(7035, 217), 143 RTW89_DEF_CHAN_6G(7055, 221), 144 RTW89_DEF_CHAN_6G(7075, 225), 145 RTW89_DEF_CHAN_6G(7095, 229), 146 RTW89_DEF_CHAN_6G(7115, 233), 147 }; 148 149 static struct ieee80211_rate rtw89_bitrates[] = { 150 { .bitrate = 10, .hw_value = 0x00, }, 151 { .bitrate = 20, .hw_value = 0x01, }, 152 { .bitrate = 55, .hw_value = 0x02, }, 153 { .bitrate = 110, .hw_value = 0x03, }, 154 { .bitrate = 60, .hw_value = 0x04, }, 155 { .bitrate = 90, .hw_value = 0x05, }, 156 { .bitrate = 120, .hw_value = 0x06, }, 157 { .bitrate = 180, .hw_value = 0x07, }, 158 { .bitrate = 240, .hw_value = 0x08, }, 159 { .bitrate = 360, .hw_value = 0x09, }, 160 { .bitrate = 480, .hw_value = 0x0a, }, 161 { .bitrate = 540, .hw_value = 0x0b, }, 162 }; 163 164 bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate) 165 { 166 struct ieee80211_rate rate; 167 168 if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) { 169 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate); 170 return false; 171 } 172 173 rate = rtw89_bitrates[rpt_rate]; 174 *bitrate = rate.bitrate; 175 176 return true; 177 } 178 179 static struct ieee80211_supported_band rtw89_sband_2ghz = { 180 .band = NL80211_BAND_2GHZ, 181 .channels = rtw89_channels_2ghz, 182 .n_channels = ARRAY_SIZE(rtw89_channels_2ghz), 183 .bitrates = rtw89_bitrates, 184 .n_bitrates = ARRAY_SIZE(rtw89_bitrates), 185 .ht_cap = {0}, 186 .vht_cap = {0}, 187 }; 188 189 static struct ieee80211_supported_band rtw89_sband_5ghz = { 190 .band = NL80211_BAND_5GHZ, 191 .channels = rtw89_channels_5ghz, 192 .n_channels = ARRAY_SIZE(rtw89_channels_5ghz), 193 194 /* 5G has no CCK rates, 1M/2M/5.5M/11M */ 195 .bitrates = rtw89_bitrates + 4, 196 .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4, 197 .ht_cap = {0}, 198 .vht_cap = {0}, 199 }; 200 201 static struct ieee80211_supported_band rtw89_sband_6ghz = { 202 .band = NL80211_BAND_6GHZ, 203 .channels = rtw89_channels_6ghz, 204 .n_channels = ARRAY_SIZE(rtw89_channels_6ghz), 205 206 /* 6G has no CCK rates, 1M/2M/5.5M/11M */ 207 .bitrates = rtw89_bitrates + 4, 208 .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4, 209 }; 210 211 static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev, 212 struct rtw89_traffic_stats *stats, 213 struct sk_buff *skb, bool tx) 214 { 215 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 216 217 if (!ieee80211_is_data(hdr->frame_control)) 218 return; 219 220 if (is_broadcast_ether_addr(hdr->addr1) || 221 is_multicast_ether_addr(hdr->addr1)) 222 return; 223 224 if (tx) { 225 stats->tx_cnt++; 226 stats->tx_unicast += skb->len; 227 } else { 228 stats->rx_cnt++; 229 stats->rx_unicast += skb->len; 230 } 231 } 232 233 void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef) 234 { 235 cfg80211_chandef_create(chandef, &rtw89_channels_2ghz[0], 236 NL80211_CHAN_NO_HT); 237 } 238 239 static void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, 240 struct rtw89_chan *chan) 241 { 242 struct ieee80211_channel *channel = chandef->chan; 243 enum nl80211_chan_width width = chandef->width; 244 u32 primary_freq, center_freq; 245 u8 center_chan; 246 u8 bandwidth = RTW89_CHANNEL_WIDTH_20; 247 u32 offset; 248 u8 band; 249 250 center_chan = channel->hw_value; 251 primary_freq = channel->center_freq; 252 center_freq = chandef->center_freq1; 253 254 switch (width) { 255 case NL80211_CHAN_WIDTH_20_NOHT: 256 case NL80211_CHAN_WIDTH_20: 257 bandwidth = RTW89_CHANNEL_WIDTH_20; 258 break; 259 case NL80211_CHAN_WIDTH_40: 260 bandwidth = RTW89_CHANNEL_WIDTH_40; 261 if (primary_freq > center_freq) { 262 center_chan -= 2; 263 } else { 264 center_chan += 2; 265 } 266 break; 267 case NL80211_CHAN_WIDTH_80: 268 case NL80211_CHAN_WIDTH_160: 269 bandwidth = nl_to_rtw89_bandwidth(width); 270 if (primary_freq > center_freq) { 271 offset = (primary_freq - center_freq - 10) / 20; 272 center_chan -= 2 + offset * 4; 273 } else { 274 offset = (center_freq - primary_freq - 10) / 20; 275 center_chan += 2 + offset * 4; 276 } 277 break; 278 default: 279 center_chan = 0; 280 break; 281 } 282 283 switch (channel->band) { 284 default: 285 case NL80211_BAND_2GHZ: 286 band = RTW89_BAND_2G; 287 break; 288 case NL80211_BAND_5GHZ: 289 band = RTW89_BAND_5G; 290 break; 291 case NL80211_BAND_6GHZ: 292 band = RTW89_BAND_6G; 293 break; 294 } 295 296 rtw89_chan_create(chan, center_chan, channel->hw_value, band, bandwidth); 297 } 298 299 void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) 300 { 301 const struct rtw89_chip_info *chip = rtwdev->chip; 302 const struct rtw89_chan *chan; 303 enum rtw89_sub_entity_idx sub_entity_idx; 304 enum rtw89_phy_idx phy_idx; 305 enum rtw89_entity_mode mode; 306 bool entity_active; 307 308 entity_active = rtw89_get_entity_state(rtwdev); 309 if (!entity_active) 310 return; 311 312 mode = rtw89_get_entity_mode(rtwdev); 313 if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode)) 314 return; 315 316 sub_entity_idx = RTW89_SUB_ENTITY_0; 317 phy_idx = RTW89_PHY_0; 318 chan = rtw89_chan_get(rtwdev, sub_entity_idx); 319 if (chip->ops->set_txpwr) 320 chip->ops->set_txpwr(rtwdev, chan, phy_idx); 321 } 322 323 void rtw89_set_channel(struct rtw89_dev *rtwdev) 324 { 325 const struct rtw89_chip_info *chip = rtwdev->chip; 326 const struct cfg80211_chan_def *chandef; 327 enum rtw89_sub_entity_idx sub_entity_idx; 328 enum rtw89_mac_idx mac_idx; 329 enum rtw89_phy_idx phy_idx; 330 struct rtw89_chan chan; 331 struct rtw89_channel_help_params bak; 332 enum rtw89_entity_mode mode; 333 bool band_changed; 334 bool entity_active; 335 336 entity_active = rtw89_get_entity_state(rtwdev); 337 338 mode = rtw89_entity_recalc(rtwdev); 339 if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode)) 340 return; 341 342 sub_entity_idx = RTW89_SUB_ENTITY_0; 343 mac_idx = RTW89_MAC_0; 344 phy_idx = RTW89_PHY_0; 345 chandef = rtw89_chandef_get(rtwdev, sub_entity_idx); 346 rtw89_get_channel_params(chandef, &chan); 347 if (WARN(chan.channel == 0, "Invalid channel\n")) 348 return; 349 350 band_changed = rtw89_assign_entity_chan(rtwdev, sub_entity_idx, &chan); 351 352 rtw89_chip_set_channel_prepare(rtwdev, &bak, &chan, mac_idx, phy_idx); 353 354 chip->ops->set_channel(rtwdev, &chan, mac_idx, phy_idx); 355 356 rtw89_core_set_chip_txpwr(rtwdev); 357 358 rtw89_chip_set_channel_done(rtwdev, &bak, &chan, mac_idx, phy_idx); 359 360 if (!entity_active || band_changed) { 361 rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan.band_type); 362 rtw89_chip_rfk_band_changed(rtwdev, phy_idx); 363 } 364 365 rtw89_set_entity_state(rtwdev, true); 366 } 367 368 static enum rtw89_core_tx_type 369 rtw89_core_get_tx_type(struct rtw89_dev *rtwdev, 370 struct sk_buff *skb) 371 { 372 struct ieee80211_hdr *hdr = (void *)skb->data; 373 __le16 fc = hdr->frame_control; 374 375 if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc)) 376 return RTW89_CORE_TX_TYPE_MGMT; 377 378 return RTW89_CORE_TX_TYPE_DATA; 379 } 380 381 static void 382 rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev, 383 struct rtw89_core_tx_request *tx_req, 384 enum btc_pkt_type pkt_type) 385 { 386 struct ieee80211_sta *sta = tx_req->sta; 387 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 388 struct sk_buff *skb = tx_req->skb; 389 struct rtw89_sta *rtwsta; 390 u8 ampdu_num; 391 u8 tid; 392 393 if (pkt_type == PACKET_EAPOL) { 394 desc_info->bk = true; 395 return; 396 } 397 398 if (!(IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU)) 399 return; 400 401 if (!sta) { 402 rtw89_warn(rtwdev, "cannot set ampdu info without sta\n"); 403 return; 404 } 405 406 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 407 rtwsta = (struct rtw89_sta *)sta->drv_priv; 408 409 ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ? 410 rtwsta->ampdu_params[tid].agg_num : 411 4 << sta->deflink.ht_cap.ampdu_factor) - 1); 412 413 desc_info->agg_en = true; 414 desc_info->ampdu_density = sta->deflink.ht_cap.ampdu_density; 415 desc_info->ampdu_num = ampdu_num; 416 } 417 418 static void 419 rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, 420 struct rtw89_core_tx_request *tx_req) 421 { 422 const struct rtw89_chip_info *chip = rtwdev->chip; 423 struct ieee80211_vif *vif = tx_req->vif; 424 struct ieee80211_sta *sta = tx_req->sta; 425 struct ieee80211_tx_info *info; 426 struct ieee80211_key_conf *key; 427 struct rtw89_vif *rtwvif; 428 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 429 struct rtw89_addr_cam_entry *addr_cam; 430 struct rtw89_sec_cam_entry *sec_cam; 431 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 432 struct sk_buff *skb = tx_req->skb; 433 u8 sec_type = RTW89_SEC_KEY_TYPE_NONE; 434 u64 pn64; 435 436 if (!vif) { 437 rtw89_warn(rtwdev, "cannot set sec key without vif\n"); 438 return; 439 } 440 441 rtwvif = (struct rtw89_vif *)vif->drv_priv; 442 addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta); 443 444 info = IEEE80211_SKB_CB(skb); 445 key = info->control.hw_key; 446 sec_cam = addr_cam->sec_entries[key->hw_key_idx]; 447 if (!sec_cam) { 448 rtw89_warn(rtwdev, "sec cam entry is empty\n"); 449 return; 450 } 451 452 switch (key->cipher) { 453 case WLAN_CIPHER_SUITE_WEP40: 454 sec_type = RTW89_SEC_KEY_TYPE_WEP40; 455 break; 456 case WLAN_CIPHER_SUITE_WEP104: 457 sec_type = RTW89_SEC_KEY_TYPE_WEP104; 458 break; 459 case WLAN_CIPHER_SUITE_TKIP: 460 sec_type = RTW89_SEC_KEY_TYPE_TKIP; 461 break; 462 case WLAN_CIPHER_SUITE_CCMP: 463 sec_type = RTW89_SEC_KEY_TYPE_CCMP128; 464 break; 465 case WLAN_CIPHER_SUITE_CCMP_256: 466 sec_type = RTW89_SEC_KEY_TYPE_CCMP256; 467 break; 468 case WLAN_CIPHER_SUITE_GCMP: 469 sec_type = RTW89_SEC_KEY_TYPE_GCMP128; 470 break; 471 case WLAN_CIPHER_SUITE_GCMP_256: 472 sec_type = RTW89_SEC_KEY_TYPE_GCMP256; 473 break; 474 default: 475 rtw89_warn(rtwdev, "key cipher not supported %d\n", key->cipher); 476 return; 477 } 478 479 desc_info->sec_en = true; 480 desc_info->sec_keyid = key->keyidx; 481 desc_info->sec_type = sec_type; 482 desc_info->sec_cam_idx = sec_cam->sec_cam_idx; 483 484 if (!chip->hw_sec_hdr) 485 return; 486 487 pn64 = atomic64_inc_return(&key->tx_pn); 488 desc_info->sec_seq[0] = pn64; 489 desc_info->sec_seq[1] = pn64 >> 8; 490 desc_info->sec_seq[2] = pn64 >> 16; 491 desc_info->sec_seq[3] = pn64 >> 24; 492 desc_info->sec_seq[4] = pn64 >> 32; 493 desc_info->sec_seq[5] = pn64 >> 40; 494 desc_info->wp_offset = 1; /* in unit of 8 bytes for security header */ 495 } 496 497 static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev, 498 struct rtw89_core_tx_request *tx_req) 499 { 500 struct sk_buff *skb = tx_req->skb; 501 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 502 struct ieee80211_vif *vif = tx_info->control.vif; 503 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 504 u16 lowest_rate = chan->band_type == RTW89_BAND_2G ? 505 RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6; 506 507 if (!vif || !vif->bss_conf.basic_rates || !tx_req->sta) 508 return lowest_rate; 509 510 return __ffs(vif->bss_conf.basic_rates) + lowest_rate; 511 } 512 513 static void 514 rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, 515 struct rtw89_core_tx_request *tx_req) 516 { 517 struct ieee80211_vif *vif = tx_req->vif; 518 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 519 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 520 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 521 u8 qsel, ch_dma; 522 523 qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT; 524 ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); 525 526 desc_info->qsel = qsel; 527 desc_info->ch_dma = ch_dma; 528 desc_info->port = desc_info->hiq ? rtwvif->port : 0; 529 desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL; 530 desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE; 531 532 /* fixed data rate for mgmt frames */ 533 desc_info->en_wd_info = true; 534 desc_info->use_rate = true; 535 desc_info->dis_data_fb = true; 536 desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req); 537 538 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 539 "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n", 540 desc_info->data_rate, chan->channel, chan->band_type, 541 chan->band_width); 542 } 543 544 static void 545 rtw89_core_tx_update_h2c_info(struct rtw89_dev *rtwdev, 546 struct rtw89_core_tx_request *tx_req) 547 { 548 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 549 550 desc_info->is_bmc = false; 551 desc_info->wd_page = false; 552 desc_info->ch_dma = RTW89_DMA_H2C; 553 } 554 555 static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc) 556 { 557 static const u8 rtw89_bandwidth_to_om[] = { 558 [RTW89_CHANNEL_WIDTH_20] = HTC_OM_CHANNEL_WIDTH_20, 559 [RTW89_CHANNEL_WIDTH_40] = HTC_OM_CHANNEL_WIDTH_40, 560 [RTW89_CHANNEL_WIDTH_80] = HTC_OM_CHANNEL_WIDTH_80, 561 [RTW89_CHANNEL_WIDTH_160] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80, 562 [RTW89_CHANNEL_WIDTH_80_80] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80, 563 }; 564 const struct rtw89_chip_info *chip = rtwdev->chip; 565 struct rtw89_hal *hal = &rtwdev->hal; 566 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 567 u8 om_bandwidth; 568 569 if (!chip->dis_2g_40m_ul_ofdma || 570 chan->band_type != RTW89_BAND_2G || 571 chan->band_width != RTW89_CHANNEL_WIDTH_40) 572 return; 573 574 om_bandwidth = chan->band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ? 575 rtw89_bandwidth_to_om[chan->band_width] : 0; 576 *htc = le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) | 577 le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_OM, RTW89_HTC_MASK_CTL_ID) | 578 le32_encode_bits(hal->rx_nss - 1, RTW89_HTC_MASK_HTC_OM_RX_NSS) | 579 le32_encode_bits(om_bandwidth, RTW89_HTC_MASK_HTC_OM_CH_WIDTH) | 580 le32_encode_bits(1, RTW89_HTC_MASK_HTC_OM_UL_MU_DIS) | 581 le32_encode_bits(hal->tx_nss - 1, RTW89_HTC_MASK_HTC_OM_TX_NSTS) | 582 le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_ER_SU_DIS) | 583 le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR) | 584 le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS); 585 } 586 587 static bool 588 __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev, 589 struct rtw89_core_tx_request *tx_req, 590 enum btc_pkt_type pkt_type) 591 { 592 struct ieee80211_sta *sta = tx_req->sta; 593 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 594 struct sk_buff *skb = tx_req->skb; 595 struct ieee80211_hdr *hdr = (void *)skb->data; 596 __le16 fc = hdr->frame_control; 597 598 /* AP IOT issue with EAPoL, ARP and DHCP */ 599 if (pkt_type < PACKET_MAX) 600 return false; 601 602 if (!sta || !sta->deflink.he_cap.has_he) 603 return false; 604 605 if (!ieee80211_is_data_qos(fc)) 606 return false; 607 608 if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN) 609 return false; 610 611 if (rtwsta && rtwsta->ra_report.might_fallback_legacy) 612 return false; 613 614 return true; 615 } 616 617 static void 618 __rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev *rtwdev, 619 struct rtw89_core_tx_request *tx_req) 620 { 621 struct ieee80211_sta *sta = tx_req->sta; 622 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 623 struct sk_buff *skb = tx_req->skb; 624 struct ieee80211_hdr *hdr = (void *)skb->data; 625 __le16 fc = hdr->frame_control; 626 void *data; 627 __le32 *htc; 628 u8 *qc; 629 int hdr_len; 630 631 hdr_len = ieee80211_has_a4(fc) ? 32 : 26; 632 data = skb_push(skb, IEEE80211_HT_CTL_LEN); 633 #if defined(__linux__) 634 memmove(data, data + IEEE80211_HT_CTL_LEN, hdr_len); 635 #elif defined(__FreeBSD__) 636 memmove(data, (u8 *)data + IEEE80211_HT_CTL_LEN, hdr_len); 637 #endif 638 639 hdr = data; 640 #if defined(__linux__) 641 htc = data + hdr_len; 642 #elif defined(__FreeBSD__) 643 htc = (__le32 *)((u8 *)data + hdr_len); 644 #endif 645 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_ORDER); 646 *htc = rtwsta->htc_template ? rtwsta->htc_template : 647 le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) | 648 le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_CAS, RTW89_HTC_MASK_CTL_ID); 649 650 #if defined(__linux__) 651 qc = data + hdr_len - IEEE80211_QOS_CTL_LEN; 652 #elif defined(__FreeBSD__) 653 qc = (u8 *)data + hdr_len - IEEE80211_QOS_CTL_LEN; 654 #endif 655 qc[0] |= IEEE80211_QOS_CTL_EOSP; 656 } 657 658 static void 659 rtw89_core_tx_update_he_qos_htc(struct rtw89_dev *rtwdev, 660 struct rtw89_core_tx_request *tx_req, 661 enum btc_pkt_type pkt_type) 662 { 663 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 664 struct ieee80211_vif *vif = tx_req->vif; 665 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 666 667 if (!__rtw89_core_tx_check_he_qos_htc(rtwdev, tx_req, pkt_type)) 668 goto desc_bk; 669 670 __rtw89_core_tx_adjust_he_qos_htc(rtwdev, tx_req); 671 672 desc_info->pkt_size += IEEE80211_HT_CTL_LEN; 673 desc_info->a_ctrl_bsr = true; 674 675 desc_bk: 676 if (!rtwvif || rtwvif->last_a_ctrl == desc_info->a_ctrl_bsr) 677 return; 678 679 rtwvif->last_a_ctrl = desc_info->a_ctrl_bsr; 680 desc_info->bk = true; 681 } 682 683 static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev, 684 struct rtw89_core_tx_request *tx_req) 685 { 686 struct ieee80211_vif *vif = tx_req->vif; 687 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 688 struct ieee80211_sta *sta = tx_req->sta; 689 struct rtw89_sta *rtwsta; 690 691 if (!sta) 692 return rtwvif->mac_id; 693 694 rtwsta = (struct rtw89_sta *)sta->drv_priv; 695 return rtwsta->mac_id; 696 } 697 698 static void 699 rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev, 700 struct rtw89_core_tx_request *tx_req) 701 { 702 struct ieee80211_vif *vif = tx_req->vif; 703 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 704 struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern; 705 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 706 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 707 struct sk_buff *skb = tx_req->skb; 708 u8 tid, tid_indicate; 709 u8 qsel, ch_dma; 710 711 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 712 tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid); 713 qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid); 714 ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); 715 716 desc_info->ch_dma = ch_dma; 717 desc_info->tid_indicate = tid_indicate; 718 desc_info->qsel = qsel; 719 desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req); 720 desc_info->port = desc_info->hiq ? rtwvif->port : 0; 721 722 /* enable wd_info for AMPDU */ 723 desc_info->en_wd_info = true; 724 725 if (IEEE80211_SKB_CB(skb)->control.hw_key) 726 rtw89_core_tx_update_sec_key(rtwdev, tx_req); 727 728 if (rate_pattern->enable) 729 desc_info->data_retry_lowest_rate = rate_pattern->rate; 730 else if (chan->band_type == RTW89_BAND_2G) 731 desc_info->data_retry_lowest_rate = RTW89_HW_RATE_CCK1; 732 else 733 desc_info->data_retry_lowest_rate = RTW89_HW_RATE_OFDM6; 734 } 735 736 static enum btc_pkt_type 737 rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev, 738 struct rtw89_core_tx_request *tx_req) 739 { 740 struct sk_buff *skb = tx_req->skb; 741 struct udphdr *udphdr; 742 743 if (IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { 744 ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.eapol_notify_work); 745 return PACKET_EAPOL; 746 } 747 748 if (skb->protocol == htons(ETH_P_ARP)) { 749 ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.arp_notify_work); 750 return PACKET_ARP; 751 } 752 753 if (skb->protocol == htons(ETH_P_IP) && 754 ip_hdr(skb)->protocol == IPPROTO_UDP) { 755 udphdr = udp_hdr(skb); 756 if (((udphdr->source == htons(67) && udphdr->dest == htons(68)) || 757 (udphdr->source == htons(68) && udphdr->dest == htons(67))) && 758 skb->len > 282) { 759 ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.dhcp_notify_work); 760 return PACKET_DHCP; 761 } 762 } 763 764 if (skb->protocol == htons(ETH_P_IP) && 765 ip_hdr(skb)->protocol == IPPROTO_ICMP) { 766 ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.icmp_notify_work); 767 return PACKET_ICMP; 768 } 769 770 return PACKET_MAX; 771 } 772 773 static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev, 774 struct rtw89_tx_desc_info *desc_info, 775 struct sk_buff *skb) 776 { 777 struct ieee80211_hdr *hdr = (void *)skb->data; 778 __le16 fc = hdr->frame_control; 779 780 desc_info->hdr_llc_len = ieee80211_hdrlen(fc); 781 desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */ 782 } 783 784 static void 785 rtw89_core_tx_wake(struct rtw89_dev *rtwdev, 786 struct rtw89_core_tx_request *tx_req) 787 { 788 if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw)) 789 return; 790 791 if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags)) 792 return; 793 794 if (tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT) 795 return; 796 797 rtw89_mac_notify_wake(rtwdev); 798 } 799 800 static void 801 rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, 802 struct rtw89_core_tx_request *tx_req) 803 { 804 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 805 struct sk_buff *skb = tx_req->skb; 806 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 807 struct ieee80211_hdr *hdr = (void *)skb->data; 808 enum rtw89_core_tx_type tx_type; 809 enum btc_pkt_type pkt_type; 810 bool is_bmc; 811 u16 seq; 812 813 seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; 814 if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) { 815 tx_type = rtw89_core_get_tx_type(rtwdev, skb); 816 tx_req->tx_type = tx_type; 817 } 818 is_bmc = (is_broadcast_ether_addr(hdr->addr1) || 819 is_multicast_ether_addr(hdr->addr1)); 820 821 desc_info->seq = seq; 822 desc_info->pkt_size = skb->len; 823 desc_info->is_bmc = is_bmc; 824 desc_info->wd_page = true; 825 desc_info->hiq = info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM; 826 827 switch (tx_req->tx_type) { 828 case RTW89_CORE_TX_TYPE_MGMT: 829 rtw89_core_tx_update_mgmt_info(rtwdev, tx_req); 830 break; 831 case RTW89_CORE_TX_TYPE_DATA: 832 rtw89_core_tx_update_data_info(rtwdev, tx_req); 833 pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req); 834 rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type); 835 rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, pkt_type); 836 rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb); 837 break; 838 case RTW89_CORE_TX_TYPE_FWCMD: 839 rtw89_core_tx_update_h2c_info(rtwdev, tx_req); 840 break; 841 } 842 } 843 844 void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel) 845 { 846 u8 ch_dma; 847 848 ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); 849 850 rtw89_hci_tx_kick_off(rtwdev, ch_dma); 851 } 852 853 int rtw89_h2c_tx(struct rtw89_dev *rtwdev, 854 struct sk_buff *skb, bool fwdl) 855 { 856 struct rtw89_core_tx_request tx_req = {0}; 857 u32 cnt; 858 int ret; 859 860 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 861 rtw89_debug(rtwdev, RTW89_DBG_FW, 862 "ignore h2c due to power is off with firmware state=%d\n", 863 test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)); 864 return 0; 865 } 866 867 tx_req.skb = skb; 868 tx_req.tx_type = RTW89_CORE_TX_TYPE_FWCMD; 869 if (fwdl) 870 tx_req.desc_info.fw_dl = true; 871 872 rtw89_core_tx_update_desc_info(rtwdev, &tx_req); 873 874 if (!fwdl) 875 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "H2C: ", skb->data, skb->len); 876 877 cnt = rtw89_hci_check_and_reclaim_tx_resource(rtwdev, RTW89_TXCH_CH12); 878 if (cnt == 0) { 879 rtw89_err(rtwdev, "no tx fwcmd resource\n"); 880 return -ENOSPC; 881 } 882 883 ret = rtw89_hci_tx_write(rtwdev, &tx_req); 884 if (ret) { 885 rtw89_err(rtwdev, "failed to transmit skb to HCI\n"); 886 return ret; 887 } 888 rtw89_hci_tx_kick_off(rtwdev, RTW89_TXCH_CH12); 889 890 return 0; 891 } 892 893 int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 894 struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel) 895 { 896 struct rtw89_core_tx_request tx_req = {0}; 897 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 898 int ret; 899 900 tx_req.skb = skb; 901 tx_req.sta = sta; 902 tx_req.vif = vif; 903 904 rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true); 905 rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true); 906 rtw89_core_tx_update_desc_info(rtwdev, &tx_req); 907 rtw89_core_tx_wake(rtwdev, &tx_req); 908 909 ret = rtw89_hci_tx_write(rtwdev, &tx_req); 910 if (ret) { 911 rtw89_err(rtwdev, "failed to transmit skb to HCI\n"); 912 return ret; 913 } 914 915 if (qsel) 916 *qsel = tx_req.desc_info.qsel; 917 918 return 0; 919 } 920 921 static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info) 922 { 923 u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET, desc_info->wp_offset) | 924 FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) | 925 FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) | 926 FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) | 927 FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) | 928 FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl) | 929 FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_SEL, desc_info->hw_ssn_sel) | 930 FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_MODE, desc_info->hw_seq_mode); 931 932 return cpu_to_le32(dword); 933 } 934 935 static __le32 rtw89_build_txwd_body0_v1(struct rtw89_tx_desc_info *desc_info) 936 { 937 u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) | 938 FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) | 939 FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) | 940 FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) | 941 FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) | 942 FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl); 943 944 return cpu_to_le32(dword); 945 } 946 947 static __le32 rtw89_build_txwd_body1_v1(struct rtw89_tx_desc_info *desc_info) 948 { 949 u32 dword = FIELD_PREP(RTW89_TXWD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) | 950 FIELD_PREP(RTW89_TXWD_BODY1_SEC_KEYID, desc_info->sec_keyid) | 951 FIELD_PREP(RTW89_TXWD_BODY1_SEC_TYPE, desc_info->sec_type); 952 953 return cpu_to_le32(dword); 954 } 955 956 static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info) 957 { 958 u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) | 959 FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) | 960 FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size) | 961 FIELD_PREP(RTW89_TXWD_BODY2_MACID, desc_info->mac_id); 962 963 return cpu_to_le32(dword); 964 } 965 966 static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info) 967 { 968 u32 dword = FIELD_PREP(RTW89_TXWD_BODY3_SW_SEQ, desc_info->seq) | 969 FIELD_PREP(RTW89_TXWD_BODY3_AGG_EN, desc_info->agg_en) | 970 FIELD_PREP(RTW89_TXWD_BODY3_BK, desc_info->bk); 971 972 return cpu_to_le32(dword); 973 } 974 975 static __le32 rtw89_build_txwd_body4(struct rtw89_tx_desc_info *desc_info) 976 { 977 u32 dword = FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) | 978 FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]); 979 980 return cpu_to_le32(dword); 981 } 982 983 static __le32 rtw89_build_txwd_body5(struct rtw89_tx_desc_info *desc_info) 984 { 985 u32 dword = FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) | 986 FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) | 987 FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) | 988 FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]); 989 990 return cpu_to_le32(dword); 991 } 992 993 static __le32 rtw89_build_txwd_body7_v1(struct rtw89_tx_desc_info *desc_info) 994 { 995 u32 dword = FIELD_PREP(RTW89_TXWD_BODY7_USE_RATE_V1, desc_info->use_rate) | 996 FIELD_PREP(RTW89_TXWD_BODY7_DATA_RATE, desc_info->data_rate); 997 998 return cpu_to_le32(dword); 999 } 1000 1001 static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info) 1002 { 1003 u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) | 1004 FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) | 1005 FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) | 1006 FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port); 1007 1008 return cpu_to_le32(dword); 1009 } 1010 1011 static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info) 1012 { 1013 u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb); 1014 1015 return cpu_to_le32(dword); 1016 } 1017 1018 static __le32 rtw89_build_txwd_info1(struct rtw89_tx_desc_info *desc_info) 1019 { 1020 u32 dword = FIELD_PREP(RTW89_TXWD_INFO1_MAX_AGGNUM, desc_info->ampdu_num) | 1021 FIELD_PREP(RTW89_TXWD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) | 1022 FIELD_PREP(RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE, 1023 desc_info->data_retry_lowest_rate); 1024 1025 return cpu_to_le32(dword); 1026 } 1027 1028 static __le32 rtw89_build_txwd_info2(struct rtw89_tx_desc_info *desc_info) 1029 { 1030 u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) | 1031 FIELD_PREP(RTW89_TXWD_INFO2_SEC_TYPE, desc_info->sec_type) | 1032 FIELD_PREP(RTW89_TXWD_INFO2_SEC_HW_ENC, desc_info->sec_en) | 1033 FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx); 1034 1035 return cpu_to_le32(dword); 1036 } 1037 1038 static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info) 1039 { 1040 u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) | 1041 FIELD_PREP(RTW89_TXWD_INFO2_FORCE_KEY_EN, desc_info->sec_en) | 1042 FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx); 1043 1044 return cpu_to_le32(dword); 1045 } 1046 1047 static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info) 1048 { 1049 u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) | 1050 FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1); 1051 1052 return cpu_to_le32(dword); 1053 } 1054 1055 void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, 1056 struct rtw89_tx_desc_info *desc_info, 1057 void *txdesc) 1058 { 1059 struct rtw89_txwd_body *txwd_body = (struct rtw89_txwd_body *)txdesc; 1060 struct rtw89_txwd_info *txwd_info; 1061 1062 txwd_body->dword0 = rtw89_build_txwd_body0(desc_info); 1063 txwd_body->dword2 = rtw89_build_txwd_body2(desc_info); 1064 txwd_body->dword3 = rtw89_build_txwd_body3(desc_info); 1065 1066 if (!desc_info->en_wd_info) 1067 return; 1068 1069 txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1); 1070 txwd_info->dword0 = rtw89_build_txwd_info0(desc_info); 1071 txwd_info->dword1 = rtw89_build_txwd_info1(desc_info); 1072 txwd_info->dword2 = rtw89_build_txwd_info2(desc_info); 1073 txwd_info->dword4 = rtw89_build_txwd_info4(desc_info); 1074 1075 } 1076 EXPORT_SYMBOL(rtw89_core_fill_txdesc); 1077 1078 void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev, 1079 struct rtw89_tx_desc_info *desc_info, 1080 void *txdesc) 1081 { 1082 struct rtw89_txwd_body_v1 *txwd_body = (struct rtw89_txwd_body_v1 *)txdesc; 1083 struct rtw89_txwd_info *txwd_info; 1084 1085 txwd_body->dword0 = rtw89_build_txwd_body0_v1(desc_info); 1086 txwd_body->dword1 = rtw89_build_txwd_body1_v1(desc_info); 1087 txwd_body->dword2 = rtw89_build_txwd_body2(desc_info); 1088 txwd_body->dword3 = rtw89_build_txwd_body3(desc_info); 1089 if (desc_info->sec_en) { 1090 txwd_body->dword4 = rtw89_build_txwd_body4(desc_info); 1091 txwd_body->dword5 = rtw89_build_txwd_body5(desc_info); 1092 } 1093 txwd_body->dword7 = rtw89_build_txwd_body7_v1(desc_info); 1094 1095 if (!desc_info->en_wd_info) 1096 return; 1097 1098 txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1); 1099 txwd_info->dword0 = rtw89_build_txwd_info0_v1(desc_info); 1100 txwd_info->dword1 = rtw89_build_txwd_info1(desc_info); 1101 txwd_info->dword2 = rtw89_build_txwd_info2_v1(desc_info); 1102 txwd_info->dword4 = rtw89_build_txwd_info4(desc_info); 1103 } 1104 EXPORT_SYMBOL(rtw89_core_fill_txdesc_v1); 1105 1106 static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info) 1107 { 1108 u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) | 1109 FIELD_PREP(AX_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ? 1110 RTW89_CORE_RX_TYPE_FWDL : 1111 RTW89_CORE_RX_TYPE_H2C); 1112 1113 return cpu_to_le32(dword); 1114 } 1115 1116 void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev, 1117 struct rtw89_tx_desc_info *desc_info, 1118 void *txdesc) 1119 { 1120 struct rtw89_rxdesc_short *txwd_v1 = (struct rtw89_rxdesc_short *)txdesc; 1121 1122 txwd_v1->dword0 = rtw89_build_txwd_fwcmd0_v1(desc_info); 1123 } 1124 EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v1); 1125 1126 static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev, 1127 struct sk_buff *skb, 1128 struct rtw89_rx_phy_ppdu *phy_ppdu) 1129 { 1130 bool rx_cnt_valid = false; 1131 u8 plcp_size = 0; 1132 u8 usr_num = 0; 1133 u8 *phy_sts; 1134 1135 rx_cnt_valid = RTW89_GET_RXINFO_RX_CNT_VLD(skb->data); 1136 plcp_size = RTW89_GET_RXINFO_PLCP_LEN(skb->data) << 3; 1137 usr_num = RTW89_GET_RXINFO_USR_NUM(skb->data); 1138 if (usr_num > RTW89_PPDU_MAX_USR) { 1139 rtw89_warn(rtwdev, "Invalid user number in mac info\n"); 1140 return -EINVAL; 1141 } 1142 1143 phy_sts = skb->data + RTW89_PPDU_MAC_INFO_SIZE; 1144 phy_sts += usr_num * RTW89_PPDU_MAC_INFO_USR_SIZE; 1145 /* 8-byte alignment */ 1146 if (usr_num & BIT(0)) 1147 phy_sts += RTW89_PPDU_MAC_INFO_USR_SIZE; 1148 if (rx_cnt_valid) 1149 phy_sts += RTW89_PPDU_MAC_RX_CNT_SIZE; 1150 phy_sts += plcp_size; 1151 1152 phy_ppdu->buf = phy_sts; 1153 phy_ppdu->len = skb->data + skb->len - phy_sts; 1154 1155 return 0; 1156 } 1157 1158 static void rtw89_core_rx_process_phy_ppdu_iter(void *data, 1159 struct ieee80211_sta *sta) 1160 { 1161 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 1162 struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data; 1163 1164 if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self) 1165 ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg); 1166 } 1167 1168 #define VAR_LEN 0xff 1169 #define VAR_LEN_UNIT 8 1170 static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev, u8 *addr) 1171 { 1172 static const u8 physts_ie_len_tab[32] = { 1173 16, 32, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN, 1174 VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN, 1175 VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32 1176 }; 1177 u16 ie_len; 1178 u8 ie; 1179 1180 ie = RTW89_GET_PHY_STS_IE_TYPE(addr); 1181 if (physts_ie_len_tab[ie] != VAR_LEN) 1182 ie_len = physts_ie_len_tab[ie]; 1183 else 1184 ie_len = RTW89_GET_PHY_STS_IE_LEN(addr) * VAR_LEN_UNIT; 1185 1186 return ie_len; 1187 } 1188 1189 static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, u8 *addr, 1190 struct rtw89_rx_phy_ppdu *phy_ppdu) 1191 { 1192 s16 cfo; 1193 1194 phy_ppdu->chan_idx = RTW89_GET_PHY_STS_IE01_CH_IDX(addr); 1195 if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) 1196 return; 1197 /* sign conversion for S(12,2) */ 1198 cfo = sign_extend32(RTW89_GET_PHY_STS_IE01_CFO(addr), 11); 1199 rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu); 1200 } 1201 1202 static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, u8 *addr, 1203 struct rtw89_rx_phy_ppdu *phy_ppdu) 1204 { 1205 u8 ie; 1206 1207 ie = RTW89_GET_PHY_STS_IE_TYPE(addr); 1208 switch (ie) { 1209 case RTW89_PHYSTS_IE01_CMN_OFDM: 1210 rtw89_core_parse_phy_status_ie01(rtwdev, addr, phy_ppdu); 1211 break; 1212 default: 1213 break; 1214 } 1215 1216 return 0; 1217 } 1218 1219 static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu) 1220 { 1221 s8 *rssi = phy_ppdu->rssi; 1222 u8 *buf = phy_ppdu->buf; 1223 1224 phy_ppdu->ie = RTW89_GET_PHY_STS_IE_MAP(buf); 1225 phy_ppdu->rssi_avg = RTW89_GET_PHY_STS_RSSI_AVG(buf); 1226 rssi[RF_PATH_A] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_A(buf)); 1227 rssi[RF_PATH_B] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_B(buf)); 1228 rssi[RF_PATH_C] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_C(buf)); 1229 rssi[RF_PATH_D] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_D(buf)); 1230 } 1231 1232 static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev, 1233 struct rtw89_rx_phy_ppdu *phy_ppdu) 1234 { 1235 if (RTW89_GET_PHY_STS_LEN(phy_ppdu->buf) << 3 != phy_ppdu->len) { 1236 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "phy ppdu len mismatch\n"); 1237 return -EINVAL; 1238 } 1239 rtw89_core_update_phy_ppdu(phy_ppdu); 1240 ieee80211_iterate_stations_atomic(rtwdev->hw, 1241 rtw89_core_rx_process_phy_ppdu_iter, 1242 phy_ppdu); 1243 1244 return 0; 1245 } 1246 1247 static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev, 1248 struct rtw89_rx_phy_ppdu *phy_ppdu) 1249 { 1250 u16 ie_len; 1251 u8 *pos, *end; 1252 1253 /* mark invalid reports and bypass them */ 1254 if (phy_ppdu->ie < RTW89_CCK_PKT) 1255 return -EINVAL; 1256 1257 pos = (u8 *)phy_ppdu->buf + PHY_STS_HDR_LEN; 1258 end = (u8 *)phy_ppdu->buf + phy_ppdu->len; 1259 while (pos < end) { 1260 ie_len = rtw89_core_get_phy_status_ie_len(rtwdev, pos); 1261 rtw89_core_process_phy_status_ie(rtwdev, pos, phy_ppdu); 1262 pos += ie_len; 1263 if (pos > end || ie_len == 0) { 1264 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1265 "phy status parse failed\n"); 1266 return -EINVAL; 1267 } 1268 } 1269 1270 return 0; 1271 } 1272 1273 static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev, 1274 struct rtw89_rx_phy_ppdu *phy_ppdu) 1275 { 1276 int ret; 1277 1278 ret = rtw89_core_rx_parse_phy_sts(rtwdev, phy_ppdu); 1279 if (ret) 1280 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "parse phy sts failed\n"); 1281 else 1282 phy_ppdu->valid = true; 1283 } 1284 1285 static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev, 1286 const struct rtw89_rx_desc_info *desc_info, 1287 bool rx_status) 1288 { 1289 switch (desc_info->gi_ltf) { 1290 case RTW89_GILTF_SGI_4XHE08: 1291 case RTW89_GILTF_2XHE08: 1292 case RTW89_GILTF_1XHE08: 1293 return NL80211_RATE_INFO_HE_GI_0_8; 1294 case RTW89_GILTF_2XHE16: 1295 case RTW89_GILTF_1XHE16: 1296 return NL80211_RATE_INFO_HE_GI_1_6; 1297 case RTW89_GILTF_LGI_4XHE32: 1298 return NL80211_RATE_INFO_HE_GI_3_2; 1299 default: 1300 rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info->gi_ltf); 1301 return rx_status ? NL80211_RATE_INFO_HE_GI_3_2 : U8_MAX; 1302 } 1303 } 1304 1305 static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev, 1306 struct rtw89_rx_desc_info *desc_info, 1307 struct ieee80211_rx_status *status) 1308 { 1309 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; 1310 u8 data_rate_mode, bw, rate_idx = MASKBYTE0, gi_ltf; 1311 u16 data_rate; 1312 bool ret; 1313 1314 data_rate = desc_info->data_rate; 1315 data_rate_mode = GET_DATA_RATE_MODE(data_rate); 1316 if (data_rate_mode == DATA_RATE_MODE_NON_HT) { 1317 rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate); 1318 /* rate_idx is still hardware value here */ 1319 } else if (data_rate_mode == DATA_RATE_MODE_HT) { 1320 rate_idx = GET_DATA_RATE_HT_IDX(data_rate); 1321 } else if (data_rate_mode == DATA_RATE_MODE_VHT) { 1322 rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); 1323 } else if (data_rate_mode == DATA_RATE_MODE_HE) { 1324 rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); 1325 } else { 1326 rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); 1327 } 1328 1329 bw = rtw89_hw_to_rate_info_bw(desc_info->bw); 1330 gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false); 1331 ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt && 1332 status->rate_idx == rate_idx && 1333 status->he_gi == gi_ltf && 1334 status->bw == bw; 1335 1336 return ret; 1337 } 1338 1339 struct rtw89_vif_rx_stats_iter_data { 1340 struct rtw89_dev *rtwdev; 1341 struct rtw89_rx_phy_ppdu *phy_ppdu; 1342 struct rtw89_rx_desc_info *desc_info; 1343 struct sk_buff *skb; 1344 const u8 *bssid; 1345 }; 1346 1347 static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, 1348 struct ieee80211_vif *vif, 1349 struct sk_buff *skb) 1350 { 1351 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1352 struct ieee80211_trigger *tf = (struct ieee80211_trigger *)skb->data; 1353 u8 *pos, *end, type; 1354 u16 aid; 1355 1356 if (!ether_addr_equal(vif->bss_conf.bssid, tf->ta) || 1357 rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || 1358 rtwvif->net_type == RTW89_NET_TYPE_NO_LINK) 1359 return; 1360 1361 type = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_TYPE_MASK); 1362 if (type != IEEE80211_TRIGGER_TYPE_BASIC) 1363 return; 1364 1365 end = (u8 *)tf + skb->len; 1366 pos = tf->variable; 1367 1368 while (end - pos >= RTW89_TF_BASIC_USER_INFO_SZ) { 1369 aid = RTW89_GET_TF_USER_INFO_AID12(pos); 1370 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1371 "[TF] aid: %d, ul_mcs: %d, rua: %d\n", 1372 aid, RTW89_GET_TF_USER_INFO_UL_MCS(pos), 1373 RTW89_GET_TF_USER_INFO_RUA(pos)); 1374 1375 if (aid == RTW89_TF_PAD) 1376 break; 1377 1378 if (aid == vif->cfg.aid) { 1379 rtwvif->stats.rx_tf_acc++; 1380 rtwdev->stats.rx_tf_acc++; 1381 break; 1382 } 1383 1384 pos += RTW89_TF_BASIC_USER_INFO_SZ; 1385 } 1386 } 1387 1388 static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, 1389 struct ieee80211_vif *vif) 1390 { 1391 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1392 struct rtw89_vif_rx_stats_iter_data *iter_data = data; 1393 struct rtw89_dev *rtwdev = iter_data->rtwdev; 1394 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 1395 struct rtw89_rx_desc_info *desc_info = iter_data->desc_info; 1396 struct sk_buff *skb = iter_data->skb; 1397 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1398 const u8 *bssid = iter_data->bssid; 1399 1400 if (ieee80211_is_trigger(hdr->frame_control)) { 1401 rtw89_stats_trigger_frame(rtwdev, vif, skb); 1402 return; 1403 } 1404 1405 if (!ether_addr_equal(vif->bss_conf.bssid, bssid)) 1406 return; 1407 1408 if (ieee80211_is_beacon(hdr->frame_control)) 1409 pkt_stat->beacon_nr++; 1410 1411 if (!ether_addr_equal(vif->addr, hdr->addr1)) 1412 return; 1413 1414 if (desc_info->data_rate < RTW89_HW_RATE_NR) 1415 pkt_stat->rx_rate_cnt[desc_info->data_rate]++; 1416 1417 rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, false); 1418 } 1419 1420 static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev, 1421 struct rtw89_rx_phy_ppdu *phy_ppdu, 1422 struct rtw89_rx_desc_info *desc_info, 1423 struct sk_buff *skb) 1424 { 1425 struct rtw89_vif_rx_stats_iter_data iter_data; 1426 1427 rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, false); 1428 1429 iter_data.rtwdev = rtwdev; 1430 iter_data.phy_ppdu = phy_ppdu; 1431 iter_data.desc_info = desc_info; 1432 iter_data.skb = skb; 1433 iter_data.bssid = get_hdr_bssid((struct ieee80211_hdr *)skb->data); 1434 rtw89_iterate_vifs_bh(rtwdev, rtw89_vif_rx_stats_iter, &iter_data); 1435 } 1436 1437 static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev, 1438 struct ieee80211_rx_status *status) 1439 { 1440 const struct rtw89_chan_rcd *rcd = 1441 rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0); 1442 u16 chan = rcd->prev_primary_channel; 1443 u8 band = rcd->prev_band_type == RTW89_BAND_2G ? 1444 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 1445 1446 if (status->band != NL80211_BAND_2GHZ && 1447 status->encoding == RX_ENC_LEGACY && 1448 status->rate_idx < RTW89_HW_RATE_OFDM6) { 1449 status->freq = ieee80211_channel_to_frequency(chan, band); 1450 status->band = band; 1451 } 1452 } 1453 1454 static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status) 1455 { 1456 if (rx_status->band == NL80211_BAND_2GHZ || 1457 rx_status->encoding != RX_ENC_LEGACY) 1458 return; 1459 1460 /* Some control frames' freq(ACKs in this case) are reported wrong due 1461 * to FW notify timing, set to lowest rate to prevent overflow. 1462 */ 1463 if (rx_status->rate_idx < RTW89_HW_RATE_OFDM6) { 1464 rx_status->rate_idx = 0; 1465 return; 1466 } 1467 1468 /* No 4 CCK rates for non-2G */ 1469 rx_status->rate_idx -= 4; 1470 } 1471 1472 static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev, 1473 struct rtw89_rx_phy_ppdu *phy_ppdu, 1474 struct rtw89_rx_desc_info *desc_info, 1475 struct sk_buff *skb_ppdu, 1476 struct ieee80211_rx_status *rx_status) 1477 { 1478 struct napi_struct *napi = &rtwdev->napi; 1479 1480 /* In low power mode, napi isn't scheduled. Receive it to netif. */ 1481 if (unlikely(!test_bit(NAPI_STATE_SCHED, &napi->state))) 1482 napi = NULL; 1483 1484 rtw89_core_hw_to_sband_rate(rx_status); 1485 rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu); 1486 /* In low power mode, it does RX in thread context. */ 1487 local_bh_disable(); 1488 ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi); 1489 local_bh_enable(); 1490 rtwdev->napi_budget_countdown--; 1491 } 1492 1493 static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev, 1494 struct rtw89_rx_phy_ppdu *phy_ppdu, 1495 struct rtw89_rx_desc_info *desc_info, 1496 struct sk_buff *skb) 1497 { 1498 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; 1499 int curr = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band]; 1500 struct sk_buff *skb_ppdu = NULL, *tmp; 1501 struct ieee80211_rx_status *rx_status; 1502 1503 if (curr > RTW89_MAX_PPDU_CNT) 1504 return; 1505 1506 skb_queue_walk_safe(&rtwdev->ppdu_sts.rx_queue[band], skb_ppdu, tmp) { 1507 skb_unlink(skb_ppdu, &rtwdev->ppdu_sts.rx_queue[band]); 1508 rx_status = IEEE80211_SKB_RXCB(skb_ppdu); 1509 if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status)) 1510 rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status); 1511 rtw89_correct_cck_chan(rtwdev, rx_status); 1512 rtw89_core_rx_to_mac80211(rtwdev, phy_ppdu, desc_info, skb_ppdu, rx_status); 1513 } 1514 } 1515 1516 static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev, 1517 struct rtw89_rx_desc_info *desc_info, 1518 struct sk_buff *skb) 1519 { 1520 struct rtw89_rx_phy_ppdu phy_ppdu = {.buf = skb->data, .valid = false, 1521 .len = skb->len, 1522 .to_self = desc_info->addr1_match, 1523 .rate = desc_info->data_rate, 1524 .mac_id = desc_info->mac_id}; 1525 int ret; 1526 1527 if (desc_info->mac_info_valid) 1528 rtw89_core_rx_process_mac_ppdu(rtwdev, skb, &phy_ppdu); 1529 ret = rtw89_core_rx_process_phy_ppdu(rtwdev, &phy_ppdu); 1530 if (ret) 1531 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "process ppdu failed\n"); 1532 1533 rtw89_core_rx_process_phy_sts(rtwdev, &phy_ppdu); 1534 rtw89_core_rx_pending_skb(rtwdev, &phy_ppdu, desc_info, skb); 1535 dev_kfree_skb_any(skb); 1536 } 1537 1538 static void rtw89_core_rx_process_report(struct rtw89_dev *rtwdev, 1539 struct rtw89_rx_desc_info *desc_info, 1540 struct sk_buff *skb) 1541 { 1542 switch (desc_info->pkt_type) { 1543 case RTW89_CORE_RX_TYPE_C2H: 1544 rtw89_fw_c2h_irqsafe(rtwdev, skb); 1545 break; 1546 case RTW89_CORE_RX_TYPE_PPDU_STAT: 1547 rtw89_core_rx_process_ppdu_sts(rtwdev, desc_info, skb); 1548 break; 1549 default: 1550 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "unhandled pkt_type=%d\n", 1551 desc_info->pkt_type); 1552 dev_kfree_skb_any(skb); 1553 break; 1554 } 1555 } 1556 1557 void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, 1558 struct rtw89_rx_desc_info *desc_info, 1559 u8 *data, u32 data_offset) 1560 { 1561 const struct rtw89_chip_info *chip = rtwdev->chip; 1562 struct rtw89_rxdesc_short *rxd_s; 1563 struct rtw89_rxdesc_long *rxd_l; 1564 u8 shift_len, drv_info_len; 1565 1566 rxd_s = (struct rtw89_rxdesc_short *)(data + data_offset); 1567 desc_info->pkt_size = RTW89_GET_RXWD_PKT_SIZE(rxd_s); 1568 desc_info->drv_info_size = RTW89_GET_RXWD_DRV_INFO_SIZE(rxd_s); 1569 desc_info->long_rxdesc = RTW89_GET_RXWD_LONG_RXD(rxd_s); 1570 desc_info->pkt_type = RTW89_GET_RXWD_RPKT_TYPE(rxd_s); 1571 desc_info->mac_info_valid = RTW89_GET_RXWD_MAC_INFO_VALID(rxd_s); 1572 if (chip->chip_id == RTL8852C) 1573 desc_info->bw = RTW89_GET_RXWD_BW_V1(rxd_s); 1574 else 1575 desc_info->bw = RTW89_GET_RXWD_BW(rxd_s); 1576 desc_info->data_rate = RTW89_GET_RXWD_DATA_RATE(rxd_s); 1577 desc_info->gi_ltf = RTW89_GET_RXWD_GI_LTF(rxd_s); 1578 desc_info->user_id = RTW89_GET_RXWD_USER_ID(rxd_s); 1579 desc_info->sr_en = RTW89_GET_RXWD_SR_EN(rxd_s); 1580 desc_info->ppdu_cnt = RTW89_GET_RXWD_PPDU_CNT(rxd_s); 1581 desc_info->ppdu_type = RTW89_GET_RXWD_PPDU_TYPE(rxd_s); 1582 desc_info->free_run_cnt = RTW89_GET_RXWD_FREE_RUN_CNT(rxd_s); 1583 desc_info->icv_err = RTW89_GET_RXWD_ICV_ERR(rxd_s); 1584 desc_info->crc32_err = RTW89_GET_RXWD_CRC32_ERR(rxd_s); 1585 desc_info->hw_dec = RTW89_GET_RXWD_HW_DEC(rxd_s); 1586 desc_info->sw_dec = RTW89_GET_RXWD_SW_DEC(rxd_s); 1587 desc_info->addr1_match = RTW89_GET_RXWD_A1_MATCH(rxd_s); 1588 1589 shift_len = desc_info->shift << 1; /* 2-byte unit */ 1590 drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */ 1591 desc_info->offset = data_offset + shift_len + drv_info_len; 1592 desc_info->ready = true; 1593 1594 if (!desc_info->long_rxdesc) 1595 return; 1596 1597 rxd_l = (struct rtw89_rxdesc_long *)(data + data_offset); 1598 desc_info->frame_type = RTW89_GET_RXWD_TYPE(rxd_l); 1599 desc_info->addr_cam_valid = RTW89_GET_RXWD_ADDR_CAM_VLD(rxd_l); 1600 desc_info->addr_cam_id = RTW89_GET_RXWD_ADDR_CAM_ID(rxd_l); 1601 desc_info->sec_cam_id = RTW89_GET_RXWD_SEC_CAM_ID(rxd_l); 1602 desc_info->mac_id = RTW89_GET_RXWD_MAC_ID(rxd_l); 1603 desc_info->rx_pl_id = RTW89_GET_RXWD_RX_PL_ID(rxd_l); 1604 } 1605 EXPORT_SYMBOL(rtw89_core_query_rxdesc); 1606 1607 struct rtw89_core_iter_rx_status { 1608 struct rtw89_dev *rtwdev; 1609 struct ieee80211_rx_status *rx_status; 1610 struct rtw89_rx_desc_info *desc_info; 1611 u8 mac_id; 1612 }; 1613 1614 static 1615 void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta) 1616 { 1617 struct rtw89_core_iter_rx_status *iter_data = 1618 (struct rtw89_core_iter_rx_status *)data; 1619 struct ieee80211_rx_status *rx_status = iter_data->rx_status; 1620 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 1621 struct rtw89_rx_desc_info *desc_info = iter_data->desc_info; 1622 u8 mac_id = iter_data->mac_id; 1623 1624 if (mac_id != rtwsta->mac_id) 1625 return; 1626 1627 rtwsta->rx_status = *rx_status; 1628 rtwsta->rx_hw_rate = desc_info->data_rate; 1629 } 1630 1631 static void rtw89_core_stats_sta_rx_status(struct rtw89_dev *rtwdev, 1632 struct rtw89_rx_desc_info *desc_info, 1633 struct ieee80211_rx_status *rx_status) 1634 { 1635 struct rtw89_core_iter_rx_status iter_data; 1636 1637 if (!desc_info->addr1_match || !desc_info->long_rxdesc) 1638 return; 1639 1640 if (desc_info->frame_type != RTW89_RX_TYPE_DATA) 1641 return; 1642 1643 iter_data.rtwdev = rtwdev; 1644 iter_data.rx_status = rx_status; 1645 iter_data.desc_info = desc_info; 1646 iter_data.mac_id = desc_info->mac_id; 1647 ieee80211_iterate_stations_atomic(rtwdev->hw, 1648 rtw89_core_stats_sta_rx_status_iter, 1649 &iter_data); 1650 } 1651 1652 static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, 1653 struct rtw89_rx_desc_info *desc_info, 1654 struct ieee80211_rx_status *rx_status) 1655 { 1656 const struct cfg80211_chan_def *chandef = 1657 rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0); 1658 const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1659 u16 data_rate; 1660 u8 data_rate_mode; 1661 1662 /* currently using single PHY */ 1663 rx_status->freq = chandef->chan->center_freq; 1664 rx_status->band = chandef->chan->band; 1665 1666 if (rtwdev->scanning && 1667 RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { 1668 u8 chan = cur->primary_channel; 1669 u8 band = cur->band_type; 1670 enum nl80211_band nl_band; 1671 1672 nl_band = rtw89_hw_to_nl80211_band(band); 1673 rx_status->freq = ieee80211_channel_to_frequency(chan, nl_band); 1674 rx_status->band = nl_band; 1675 } 1676 1677 if (desc_info->icv_err || desc_info->crc32_err) 1678 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 1679 1680 if (desc_info->hw_dec && 1681 !(desc_info->sw_dec || desc_info->icv_err)) 1682 rx_status->flag |= RX_FLAG_DECRYPTED; 1683 1684 rx_status->bw = rtw89_hw_to_rate_info_bw(desc_info->bw); 1685 1686 data_rate = desc_info->data_rate; 1687 data_rate_mode = GET_DATA_RATE_MODE(data_rate); 1688 if (data_rate_mode == DATA_RATE_MODE_NON_HT) { 1689 rx_status->encoding = RX_ENC_LEGACY; 1690 rx_status->rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate); 1691 /* convert rate_idx after we get the correct band */ 1692 } else if (data_rate_mode == DATA_RATE_MODE_HT) { 1693 rx_status->encoding = RX_ENC_HT; 1694 rx_status->rate_idx = GET_DATA_RATE_HT_IDX(data_rate); 1695 if (desc_info->gi_ltf) 1696 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1697 } else if (data_rate_mode == DATA_RATE_MODE_VHT) { 1698 rx_status->encoding = RX_ENC_VHT; 1699 rx_status->rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); 1700 rx_status->nss = GET_DATA_RATE_NSS(data_rate) + 1; 1701 if (desc_info->gi_ltf) 1702 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1703 } else if (data_rate_mode == DATA_RATE_MODE_HE) { 1704 rx_status->encoding = RX_ENC_HE; 1705 rx_status->rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); 1706 rx_status->nss = GET_DATA_RATE_NSS(data_rate) + 1; 1707 } else { 1708 rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); 1709 } 1710 1711 /* he_gi is used to match ppdu, so we always fill it. */ 1712 rx_status->he_gi = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, true); 1713 rx_status->flag |= RX_FLAG_MACTIME_START; 1714 rx_status->mactime = desc_info->free_run_cnt; 1715 1716 rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status); 1717 } 1718 1719 static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev) 1720 { 1721 const struct rtw89_chip_info *chip = rtwdev->chip; 1722 1723 if (rtw89_disable_ps_mode || !chip->ps_mode_supported) 1724 return RTW89_PS_MODE_NONE; 1725 1726 if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED)) 1727 return RTW89_PS_MODE_PWR_GATED; 1728 1729 if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_CLK_GATED)) 1730 return RTW89_PS_MODE_CLK_GATED; 1731 1732 if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_RFOFF)) 1733 return RTW89_PS_MODE_RFOFF; 1734 1735 return RTW89_PS_MODE_NONE; 1736 } 1737 1738 static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev, 1739 struct rtw89_rx_desc_info *desc_info) 1740 { 1741 struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts; 1742 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; 1743 struct ieee80211_rx_status *rx_status; 1744 struct sk_buff *skb_ppdu, *tmp; 1745 1746 skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) { 1747 skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]); 1748 rx_status = IEEE80211_SKB_RXCB(skb_ppdu); 1749 rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb_ppdu, rx_status); 1750 } 1751 } 1752 1753 void rtw89_core_rx(struct rtw89_dev *rtwdev, 1754 struct rtw89_rx_desc_info *desc_info, 1755 struct sk_buff *skb) 1756 { 1757 struct ieee80211_rx_status *rx_status; 1758 struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts; 1759 u8 ppdu_cnt = desc_info->ppdu_cnt; 1760 u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; 1761 1762 if (desc_info->pkt_type != RTW89_CORE_RX_TYPE_WIFI) { 1763 rtw89_core_rx_process_report(rtwdev, desc_info, skb); 1764 return; 1765 } 1766 1767 if (ppdu_sts->curr_rx_ppdu_cnt[band] != ppdu_cnt) { 1768 rtw89_core_flush_ppdu_rx_queue(rtwdev, desc_info); 1769 ppdu_sts->curr_rx_ppdu_cnt[band] = ppdu_cnt; 1770 } 1771 1772 rx_status = IEEE80211_SKB_RXCB(skb); 1773 memset(rx_status, 0, sizeof(*rx_status)); 1774 rtw89_core_update_rx_status(rtwdev, desc_info, rx_status); 1775 if (desc_info->long_rxdesc && 1776 BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) 1777 skb_queue_tail(&ppdu_sts->rx_queue[band], skb); 1778 else 1779 rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb, rx_status); 1780 } 1781 EXPORT_SYMBOL(rtw89_core_rx); 1782 1783 void rtw89_core_napi_start(struct rtw89_dev *rtwdev) 1784 { 1785 if (test_and_set_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1786 return; 1787 1788 napi_enable(&rtwdev->napi); 1789 } 1790 EXPORT_SYMBOL(rtw89_core_napi_start); 1791 1792 void rtw89_core_napi_stop(struct rtw89_dev *rtwdev) 1793 { 1794 if (!test_and_clear_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1795 return; 1796 1797 napi_synchronize(&rtwdev->napi); 1798 napi_disable(&rtwdev->napi); 1799 } 1800 EXPORT_SYMBOL(rtw89_core_napi_stop); 1801 1802 void rtw89_core_napi_init(struct rtw89_dev *rtwdev) 1803 { 1804 init_dummy_netdev(&rtwdev->netdev); 1805 netif_napi_add(&rtwdev->netdev, &rtwdev->napi, 1806 rtwdev->hci.ops->napi_poll); 1807 } 1808 EXPORT_SYMBOL(rtw89_core_napi_init); 1809 1810 void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev) 1811 { 1812 rtw89_core_napi_stop(rtwdev); 1813 netif_napi_del(&rtwdev->napi); 1814 } 1815 EXPORT_SYMBOL(rtw89_core_napi_deinit); 1816 1817 static void rtw89_core_ba_work(struct work_struct *work) 1818 { 1819 struct rtw89_dev *rtwdev = 1820 container_of(work, struct rtw89_dev, ba_work); 1821 struct rtw89_txq *rtwtxq, *tmp; 1822 int ret; 1823 1824 spin_lock_bh(&rtwdev->ba_lock); 1825 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) { 1826 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 1827 struct ieee80211_sta *sta = txq->sta; 1828 struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL; 1829 u8 tid = txq->tid; 1830 1831 if (!sta) { 1832 rtw89_warn(rtwdev, "cannot start BA without sta\n"); 1833 goto skip_ba_work; 1834 } 1835 1836 if (rtwsta->disassoc) { 1837 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1838 "cannot start BA with disassoc sta\n"); 1839 goto skip_ba_work; 1840 } 1841 1842 ret = ieee80211_start_tx_ba_session(sta, tid, 0); 1843 if (ret) { 1844 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1845 "failed to setup BA session for %pM:%2d: %d\n", 1846 sta->addr, tid, ret); 1847 if (ret == -EINVAL) 1848 set_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags); 1849 } 1850 skip_ba_work: 1851 list_del_init(&rtwtxq->list); 1852 } 1853 spin_unlock_bh(&rtwdev->ba_lock); 1854 } 1855 1856 static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev, 1857 struct ieee80211_sta *sta) 1858 { 1859 struct rtw89_txq *rtwtxq, *tmp; 1860 1861 spin_lock_bh(&rtwdev->ba_lock); 1862 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) { 1863 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 1864 1865 if (sta == txq->sta) 1866 list_del_init(&rtwtxq->list); 1867 } 1868 spin_unlock_bh(&rtwdev->ba_lock); 1869 } 1870 1871 static void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev, 1872 struct ieee80211_sta *sta) 1873 { 1874 struct rtw89_txq *rtwtxq, *tmp; 1875 1876 spin_lock_bh(&rtwdev->ba_lock); 1877 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) { 1878 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 1879 1880 if (sta == txq->sta) { 1881 clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags); 1882 list_del_init(&rtwtxq->list); 1883 } 1884 } 1885 spin_unlock_bh(&rtwdev->ba_lock); 1886 } 1887 1888 static void rtw89_core_stop_tx_ba_session(struct rtw89_dev *rtwdev, 1889 struct rtw89_txq *rtwtxq) 1890 { 1891 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 1892 struct ieee80211_sta *sta = txq->sta; 1893 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1894 1895 if (unlikely(!rtwsta) || unlikely(rtwsta->disassoc)) 1896 return; 1897 1898 if (!test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags) || 1899 test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags)) 1900 return; 1901 1902 spin_lock_bh(&rtwdev->ba_lock); 1903 if (!list_empty(&rtwtxq->list)) { 1904 list_del_init(&rtwtxq->list); 1905 goto out; 1906 } 1907 1908 set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags); 1909 1910 list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list); 1911 ieee80211_stop_tx_ba_session(sta, txq->tid); 1912 cancel_delayed_work(&rtwdev->forbid_ba_work); 1913 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->forbid_ba_work, 1914 RTW89_FORBID_BA_TIMER); 1915 1916 out: 1917 spin_unlock_bh(&rtwdev->ba_lock); 1918 } 1919 1920 static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev, 1921 struct rtw89_txq *rtwtxq, 1922 struct sk_buff *skb) 1923 { 1924 struct ieee80211_hw *hw = rtwdev->hw; 1925 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 1926 struct ieee80211_sta *sta = txq->sta; 1927 struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL; 1928 1929 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) { 1930 rtw89_core_stop_tx_ba_session(rtwdev, rtwtxq); 1931 return; 1932 } 1933 1934 if (unlikely(!sta)) 1935 return; 1936 1937 if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags)) 1938 return; 1939 1940 if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags))) 1941 return; 1942 1943 if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags)) { 1944 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_AMPDU; 1945 return; 1946 } 1947 1948 spin_lock_bh(&rtwdev->ba_lock); 1949 if (!rtwsta->disassoc && list_empty(&rtwtxq->list)) { 1950 list_add_tail(&rtwtxq->list, &rtwdev->ba_list); 1951 ieee80211_queue_work(hw, &rtwdev->ba_work); 1952 } 1953 spin_unlock_bh(&rtwdev->ba_lock); 1954 } 1955 1956 static void rtw89_core_txq_push(struct rtw89_dev *rtwdev, 1957 struct rtw89_txq *rtwtxq, 1958 unsigned long frame_cnt, 1959 unsigned long byte_cnt) 1960 { 1961 struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq); 1962 struct ieee80211_vif *vif = txq->vif; 1963 struct ieee80211_sta *sta = txq->sta; 1964 struct sk_buff *skb; 1965 unsigned long i; 1966 int ret; 1967 1968 rcu_read_lock(); 1969 for (i = 0; i < frame_cnt; i++) { 1970 skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq); 1971 if (!skb) { 1972 rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n"); 1973 goto out; 1974 } 1975 rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb); 1976 ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL); 1977 if (ret) { 1978 rtw89_err(rtwdev, "failed to push txq: %d\n", ret); 1979 ieee80211_free_txskb(rtwdev->hw, skb); 1980 break; 1981 } 1982 } 1983 out: 1984 rcu_read_unlock(); 1985 } 1986 1987 static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid) 1988 { 1989 u8 qsel, ch_dma; 1990 1991 qsel = rtw89_core_get_qsel(rtwdev, tid); 1992 ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); 1993 1994 return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma); 1995 } 1996 1997 static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev, 1998 struct ieee80211_txq *txq, 1999 unsigned long *frame_cnt, 2000 bool *sched_txq, bool *reinvoke) 2001 { 2002 struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv; 2003 struct ieee80211_sta *sta = txq->sta; 2004 struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL; 2005 2006 if (!sta || rtwsta->max_agg_wait <= 0) 2007 return false; 2008 2009 if (rtwdev->stats.tx_tfc_lv <= RTW89_TFC_MID) 2010 return false; 2011 2012 if (*frame_cnt > 1) { 2013 *frame_cnt -= 1; 2014 *sched_txq = true; 2015 *reinvoke = true; 2016 rtwtxq->wait_cnt = 1; 2017 return false; 2018 } 2019 2020 if (*frame_cnt == 1 && rtwtxq->wait_cnt < rtwsta->max_agg_wait) { 2021 *reinvoke = true; 2022 rtwtxq->wait_cnt++; 2023 return true; 2024 } 2025 2026 rtwtxq->wait_cnt = 0; 2027 return false; 2028 } 2029 2030 static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinvoke) 2031 { 2032 struct ieee80211_hw *hw = rtwdev->hw; 2033 struct ieee80211_txq *txq; 2034 struct rtw89_txq *rtwtxq; 2035 unsigned long frame_cnt; 2036 unsigned long byte_cnt; 2037 u32 tx_resource; 2038 bool sched_txq; 2039 2040 ieee80211_txq_schedule_start(hw, ac); 2041 while ((txq = ieee80211_next_txq(hw, ac))) { 2042 rtwtxq = (struct rtw89_txq *)txq->drv_priv; 2043 tx_resource = rtw89_check_and_reclaim_tx_resource(rtwdev, txq->tid); 2044 sched_txq = false; 2045 2046 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); 2047 if (rtw89_core_txq_agg_wait(rtwdev, txq, &frame_cnt, &sched_txq, reinvoke)) { 2048 ieee80211_return_txq(hw, txq, true); 2049 continue; 2050 } 2051 frame_cnt = min_t(unsigned long, frame_cnt, tx_resource); 2052 rtw89_core_txq_push(rtwdev, rtwtxq, frame_cnt, byte_cnt); 2053 ieee80211_return_txq(hw, txq, sched_txq); 2054 if (frame_cnt != 0) 2055 rtw89_core_tx_kick_off(rtwdev, rtw89_core_get_qsel(rtwdev, txq->tid)); 2056 2057 /* bound of tx_resource could get stuck due to burst traffic */ 2058 if (frame_cnt == tx_resource) 2059 *reinvoke = true; 2060 } 2061 ieee80211_txq_schedule_end(hw, ac); 2062 } 2063 2064 static void rtw89_ips_work(struct work_struct *work) 2065 { 2066 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2067 ips_work); 2068 mutex_lock(&rtwdev->mutex); 2069 if (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE) 2070 rtw89_enter_ips(rtwdev); 2071 mutex_unlock(&rtwdev->mutex); 2072 } 2073 2074 static void rtw89_core_txq_work(struct work_struct *w) 2075 { 2076 struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work); 2077 bool reinvoke = false; 2078 u8 ac; 2079 2080 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2081 rtw89_core_txq_schedule(rtwdev, ac, &reinvoke); 2082 2083 if (reinvoke) { 2084 /* reinvoke to process the last frame */ 2085 mod_delayed_work(rtwdev->txq_wq, &rtwdev->txq_reinvoke_work, 1); 2086 } 2087 } 2088 2089 static void rtw89_core_txq_reinvoke_work(struct work_struct *w) 2090 { 2091 struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, 2092 txq_reinvoke_work.work); 2093 2094 queue_work(rtwdev->txq_wq, &rtwdev->txq_work); 2095 } 2096 2097 static void rtw89_forbid_ba_work(struct work_struct *w) 2098 { 2099 struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, 2100 forbid_ba_work.work); 2101 struct rtw89_txq *rtwtxq, *tmp; 2102 2103 spin_lock_bh(&rtwdev->ba_lock); 2104 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) { 2105 clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags); 2106 list_del_init(&rtwtxq->list); 2107 } 2108 spin_unlock_bh(&rtwdev->ba_lock); 2109 } 2110 2111 static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev, 2112 u32 throughput, u64 cnt) 2113 { 2114 if (cnt < 100) 2115 return RTW89_TFC_IDLE; 2116 if (throughput > 50) 2117 return RTW89_TFC_HIGH; 2118 if (throughput > 10) 2119 return RTW89_TFC_MID; 2120 if (throughput > 2) 2121 return RTW89_TFC_LOW; 2122 return RTW89_TFC_ULTRA_LOW; 2123 } 2124 2125 static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev, 2126 struct rtw89_traffic_stats *stats) 2127 { 2128 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 2129 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 2130 2131 stats->tx_throughput_raw = (u32)(stats->tx_unicast >> RTW89_TP_SHIFT); 2132 stats->rx_throughput_raw = (u32)(stats->rx_unicast >> RTW89_TP_SHIFT); 2133 2134 ewma_tp_add(&stats->tx_ewma_tp, stats->tx_throughput_raw); 2135 ewma_tp_add(&stats->rx_ewma_tp, stats->rx_throughput_raw); 2136 2137 stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp); 2138 stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp); 2139 stats->tx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->tx_throughput, 2140 stats->tx_cnt); 2141 stats->rx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->rx_throughput, 2142 stats->rx_cnt); 2143 stats->tx_avg_len = stats->tx_cnt ? 2144 DIV_ROUND_DOWN_ULL(stats->tx_unicast, stats->tx_cnt) : 0; 2145 stats->rx_avg_len = stats->rx_cnt ? 2146 DIV_ROUND_DOWN_ULL(stats->rx_unicast, stats->rx_cnt) : 0; 2147 2148 stats->tx_unicast = 0; 2149 stats->rx_unicast = 0; 2150 stats->tx_cnt = 0; 2151 stats->rx_cnt = 0; 2152 stats->rx_tf_periodic = stats->rx_tf_acc; 2153 stats->rx_tf_acc = 0; 2154 2155 if (tx_tfc_lv != stats->tx_tfc_lv || rx_tfc_lv != stats->rx_tfc_lv) 2156 return true; 2157 2158 return false; 2159 } 2160 2161 static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev) 2162 { 2163 struct rtw89_vif *rtwvif; 2164 bool tfc_changed; 2165 2166 tfc_changed = rtw89_traffic_stats_calc(rtwdev, &rtwdev->stats); 2167 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2168 rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats); 2169 2170 return tfc_changed; 2171 } 2172 2173 static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2174 { 2175 if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION) 2176 return; 2177 2178 if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE && 2179 rtwvif->stats.rx_tfc_lv == RTW89_TFC_IDLE) 2180 rtw89_enter_lps(rtwdev, rtwvif->mac_id); 2181 } 2182 2183 static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev) 2184 { 2185 struct rtw89_vif *rtwvif; 2186 2187 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2188 rtw89_vif_enter_lps(rtwdev, rtwvif); 2189 } 2190 2191 void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, 2192 struct rtw89_traffic_stats *stats) 2193 { 2194 stats->tx_unicast = 0; 2195 stats->rx_unicast = 0; 2196 stats->tx_cnt = 0; 2197 stats->rx_cnt = 0; 2198 ewma_tp_init(&stats->tx_ewma_tp); 2199 ewma_tp_init(&stats->rx_ewma_tp); 2200 } 2201 2202 static void rtw89_track_work(struct work_struct *work) 2203 { 2204 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2205 track_work.work); 2206 bool tfc_changed; 2207 2208 mutex_lock(&rtwdev->mutex); 2209 2210 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2211 goto out; 2212 2213 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, 2214 RTW89_TRACK_WORK_PERIOD); 2215 2216 tfc_changed = rtw89_traffic_stats_track(rtwdev); 2217 if (rtwdev->scanning) 2218 goto out; 2219 2220 rtw89_leave_lps(rtwdev); 2221 2222 if (tfc_changed) { 2223 rtw89_hci_recalc_int_mit(rtwdev); 2224 rtw89_btc_ntfy_wl_sta(rtwdev); 2225 } 2226 rtw89_mac_bf_monitor_track(rtwdev); 2227 rtw89_phy_stat_track(rtwdev); 2228 rtw89_phy_env_monitor_track(rtwdev); 2229 rtw89_phy_dig(rtwdev); 2230 rtw89_chip_rfk_track(rtwdev); 2231 rtw89_phy_ra_update(rtwdev); 2232 rtw89_phy_cfo_track(rtwdev); 2233 2234 if (rtwdev->lps_enabled && !rtwdev->btc.lps) 2235 rtw89_enter_lps_track(rtwdev); 2236 2237 out: 2238 mutex_unlock(&rtwdev->mutex); 2239 } 2240 2241 u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size) 2242 { 2243 unsigned long bit; 2244 2245 bit = find_first_zero_bit(addr, size); 2246 if (bit < size) 2247 set_bit(bit, addr); 2248 2249 return bit; 2250 } 2251 2252 void rtw89_core_release_bit_map(unsigned long *addr, u8 bit) 2253 { 2254 clear_bit(bit, addr); 2255 } 2256 2257 void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits) 2258 { 2259 bitmap_zero(addr, nbits); 2260 } 2261 2262 int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev, 2263 struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx) 2264 { 2265 const struct rtw89_chip_info *chip = rtwdev->chip; 2266 struct rtw89_cam_info *cam_info = &rtwdev->cam_info; 2267 struct rtw89_ba_cam_entry *entry = NULL, *tmp; 2268 u8 idx; 2269 int i; 2270 2271 lockdep_assert_held(&rtwdev->mutex); 2272 2273 idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num); 2274 if (idx == chip->bacam_num) { 2275 /* allocate a static BA CAM to tid=0/5, so replace the existing 2276 * one if BA CAM is full. Hardware will process the original tid 2277 * automatically. 2278 */ 2279 if (tid != 0 && tid != 5) 2280 return -ENOSPC; 2281 2282 for_each_set_bit(i, cam_info->ba_cam_map, chip->bacam_num) { 2283 tmp = &cam_info->ba_cam_entry[i]; 2284 if (tmp->tid == 0 || tmp->tid == 5) 2285 continue; 2286 2287 idx = i; 2288 entry = tmp; 2289 list_del(&entry->list); 2290 break; 2291 } 2292 2293 if (!entry) 2294 return -ENOSPC; 2295 } else { 2296 entry = &cam_info->ba_cam_entry[idx]; 2297 } 2298 2299 entry->tid = tid; 2300 list_add_tail(&entry->list, &rtwsta->ba_cam_list); 2301 2302 *cam_idx = idx; 2303 2304 return 0; 2305 } 2306 2307 int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev, 2308 struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx) 2309 { 2310 struct rtw89_cam_info *cam_info = &rtwdev->cam_info; 2311 struct rtw89_ba_cam_entry *entry = NULL, *tmp; 2312 u8 idx; 2313 2314 lockdep_assert_held(&rtwdev->mutex); 2315 2316 list_for_each_entry_safe(entry, tmp, &rtwsta->ba_cam_list, list) { 2317 if (entry->tid != tid) 2318 continue; 2319 2320 idx = entry - cam_info->ba_cam_entry; 2321 list_del(&entry->list); 2322 2323 rtw89_core_release_bit_map(cam_info->ba_cam_map, idx); 2324 *cam_idx = idx; 2325 return 0; 2326 } 2327 2328 return -ENOENT; 2329 } 2330 2331 #define RTW89_TYPE_MAPPING(_type) \ 2332 case NL80211_IFTYPE_ ## _type: \ 2333 rtwvif->wifi_role = RTW89_WIFI_ROLE_ ## _type; \ 2334 break 2335 void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc) 2336 { 2337 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2338 2339 switch (vif->type) { 2340 RTW89_TYPE_MAPPING(ADHOC); 2341 RTW89_TYPE_MAPPING(STATION); 2342 RTW89_TYPE_MAPPING(AP); 2343 RTW89_TYPE_MAPPING(MONITOR); 2344 RTW89_TYPE_MAPPING(MESH_POINT); 2345 default: 2346 WARN_ON(1); 2347 break; 2348 } 2349 2350 switch (vif->type) { 2351 case NL80211_IFTYPE_AP: 2352 case NL80211_IFTYPE_MESH_POINT: 2353 rtwvif->net_type = RTW89_NET_TYPE_AP_MODE; 2354 rtwvif->self_role = RTW89_SELF_ROLE_AP; 2355 break; 2356 case NL80211_IFTYPE_ADHOC: 2357 rtwvif->net_type = RTW89_NET_TYPE_AD_HOC; 2358 rtwvif->self_role = RTW89_SELF_ROLE_CLIENT; 2359 break; 2360 case NL80211_IFTYPE_STATION: 2361 if (assoc) { 2362 rtwvif->net_type = RTW89_NET_TYPE_INFRA; 2363 rtwvif->trigger = vif->bss_conf.he_support; 2364 } else { 2365 rtwvif->net_type = RTW89_NET_TYPE_NO_LINK; 2366 rtwvif->trigger = false; 2367 } 2368 rtwvif->self_role = RTW89_SELF_ROLE_CLIENT; 2369 rtwvif->addr_cam.sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL; 2370 break; 2371 default: 2372 WARN_ON(1); 2373 break; 2374 } 2375 } 2376 2377 int rtw89_core_sta_add(struct rtw89_dev *rtwdev, 2378 struct ieee80211_vif *vif, 2379 struct ieee80211_sta *sta) 2380 { 2381 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2382 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 2383 int i; 2384 2385 rtwsta->rtwvif = rtwvif; 2386 rtwsta->prev_rssi = 0; 2387 INIT_LIST_HEAD(&rtwsta->ba_cam_list); 2388 2389 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 2390 rtw89_core_txq_init(rtwdev, sta->txq[i]); 2391 2392 ewma_rssi_init(&rtwsta->avg_rssi); 2393 2394 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { 2395 /* for station mode, assign the mac_id from itself */ 2396 rtwsta->mac_id = rtwvif->mac_id; 2397 rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, 2398 BTC_ROLE_MSTS_STA_CONN_START); 2399 rtw89_chip_rfk_channel(rtwdev); 2400 } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { 2401 rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map, 2402 RTW89_MAX_MAC_ID_NUM); 2403 } 2404 2405 return 0; 2406 } 2407 2408 int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev, 2409 struct ieee80211_vif *vif, 2410 struct ieee80211_sta *sta) 2411 { 2412 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 2413 2414 rtwdev->total_sta_assoc--; 2415 rtwsta->disassoc = true; 2416 2417 return 0; 2418 } 2419 2420 int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev, 2421 struct ieee80211_vif *vif, 2422 struct ieee80211_sta *sta) 2423 { 2424 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2425 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 2426 int ret; 2427 2428 rtw89_mac_bf_monitor_calc(rtwdev, sta, true); 2429 rtw89_mac_bf_disassoc(rtwdev, vif, sta); 2430 rtw89_core_free_sta_pending_ba(rtwdev, sta); 2431 rtw89_core_free_sta_pending_forbid_ba(rtwdev, sta); 2432 if (vif->type == NL80211_IFTYPE_AP || sta->tdls) 2433 rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam); 2434 if (sta->tdls) 2435 rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam); 2436 2437 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) 2438 rtw89_vif_type_mapping(vif, false); 2439 2440 ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta); 2441 if (ret) { 2442 rtw89_warn(rtwdev, "failed to send h2c cmac table\n"); 2443 return ret; 2444 } 2445 2446 ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, true); 2447 if (ret) { 2448 rtw89_warn(rtwdev, "failed to send h2c join info\n"); 2449 return ret; 2450 } 2451 2452 if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { 2453 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_REMOVE); 2454 if (ret) { 2455 rtw89_warn(rtwdev, "failed to send h2c role info\n"); 2456 return ret; 2457 } 2458 } 2459 2460 /* update cam aid mac_id net_type */ 2461 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); 2462 if (ret) { 2463 rtw89_warn(rtwdev, "failed to send h2c cam\n"); 2464 return ret; 2465 } 2466 2467 return ret; 2468 } 2469 2470 int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, 2471 struct ieee80211_vif *vif, 2472 struct ieee80211_sta *sta) 2473 { 2474 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2475 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 2476 struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta); 2477 int ret; 2478 2479 if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { 2480 ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false); 2481 if (ret) { 2482 rtw89_warn(rtwdev, "failed to send h2c macid pause\n"); 2483 return ret; 2484 } 2485 2486 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_CREATE); 2487 if (ret) { 2488 rtw89_warn(rtwdev, "failed to send h2c role info\n"); 2489 return ret; 2490 } 2491 2492 if (sta->tdls) { 2493 ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, sta->addr); 2494 if (ret) { 2495 rtw89_warn(rtwdev, "failed to send h2c init bssid cam for TDLS\n"); 2496 return ret; 2497 } 2498 } 2499 2500 ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta->addr_cam, bssid_cam); 2501 if (ret) { 2502 rtw89_warn(rtwdev, "failed to send h2c init addr cam\n"); 2503 return ret; 2504 } 2505 } 2506 2507 ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta); 2508 if (ret) { 2509 rtw89_warn(rtwdev, "failed to send h2c cmac table\n"); 2510 return ret; 2511 } 2512 2513 ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, false); 2514 if (ret) { 2515 rtw89_warn(rtwdev, "failed to send h2c join info\n"); 2516 return ret; 2517 } 2518 2519 /* update cam aid mac_id net_type */ 2520 rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); 2521 if (ret) { 2522 rtw89_warn(rtwdev, "failed to send h2c cam\n"); 2523 return ret; 2524 } 2525 2526 ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwsta->mac_id); 2527 if (ret) { 2528 rtw89_warn(rtwdev, "failed to send h2c general packet\n"); 2529 return ret; 2530 } 2531 2532 rtwdev->total_sta_assoc++; 2533 rtw89_phy_ra_assoc(rtwdev, sta); 2534 rtw89_mac_bf_assoc(rtwdev, vif, sta); 2535 rtw89_mac_bf_monitor_calc(rtwdev, sta, false); 2536 2537 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { 2538 rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, 2539 BTC_ROLE_MSTS_STA_CONN_END); 2540 rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template); 2541 } 2542 2543 return ret; 2544 } 2545 2546 int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, 2547 struct ieee80211_vif *vif, 2548 struct ieee80211_sta *sta) 2549 { 2550 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2551 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 2552 2553 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) 2554 rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, 2555 BTC_ROLE_MSTS_STA_DIS_CONN); 2556 else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) 2557 rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id); 2558 2559 return 0; 2560 } 2561 2562 static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev, 2563 struct ieee80211_sta_ht_cap *ht_cap) 2564 { 2565 static const __le16 highest[RF_PATH_MAX] = { 2566 cpu_to_le16(150), cpu_to_le16(300), cpu_to_le16(450), cpu_to_le16(600), 2567 }; 2568 struct rtw89_hal *hal = &rtwdev->hal; 2569 u8 nss = hal->rx_nss; 2570 int i; 2571 2572 ht_cap->ht_supported = true; 2573 ht_cap->cap = 0; 2574 ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 | 2575 IEEE80211_HT_CAP_MAX_AMSDU | 2576 IEEE80211_HT_CAP_TX_STBC | 2577 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 2578 ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING; 2579 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 2580 IEEE80211_HT_CAP_DSSSCCK40 | 2581 IEEE80211_HT_CAP_SGI_40; 2582 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 2583 ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; 2584 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 2585 for (i = 0; i < nss; i++) 2586 ht_cap->mcs.rx_mask[i] = 0xFF; 2587 ht_cap->mcs.rx_mask[4] = 0x01; 2588 ht_cap->mcs.rx_highest = highest[nss - 1]; 2589 } 2590 2591 static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev, 2592 struct ieee80211_sta_vht_cap *vht_cap) 2593 { 2594 static const __le16 highest_bw80[RF_PATH_MAX] = { 2595 cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733), 2596 }; 2597 static const __le16 highest_bw160[RF_PATH_MAX] = { 2598 cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467), 2599 }; 2600 const struct rtw89_chip_info *chip = rtwdev->chip; 2601 const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80; 2602 struct rtw89_hal *hal = &rtwdev->hal; 2603 u16 tx_mcs_map = 0, rx_mcs_map = 0; 2604 u8 sts_cap = 3; 2605 int i; 2606 2607 for (i = 0; i < 8; i++) { 2608 if (i < hal->tx_nss) 2609 tx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 2610 else 2611 tx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 2612 if (i < hal->rx_nss) 2613 rx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 2614 else 2615 rx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 2616 } 2617 2618 vht_cap->vht_supported = true; 2619 vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | 2620 IEEE80211_VHT_CAP_SHORT_GI_80 | 2621 IEEE80211_VHT_CAP_RXSTBC_1 | 2622 IEEE80211_VHT_CAP_HTC_VHT | 2623 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | 2624 0; 2625 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 2626 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; 2627 vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2628 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; 2629 vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 2630 if (chip->support_bw160) 2631 vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | 2632 IEEE80211_VHT_CAP_SHORT_GI_160; 2633 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map); 2634 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map); 2635 vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1]; 2636 vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1]; 2637 } 2638 2639 #define RTW89_SBAND_IFTYPES_NR 2 2640 2641 static void rtw89_init_he_cap(struct rtw89_dev *rtwdev, 2642 enum nl80211_band band, 2643 struct ieee80211_supported_band *sband) 2644 { 2645 const struct rtw89_chip_info *chip = rtwdev->chip; 2646 struct rtw89_hal *hal = &rtwdev->hal; 2647 struct ieee80211_sband_iftype_data *iftype_data; 2648 bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) || 2649 (chip->chip_id == RTL8852B && hal->cv == CHIP_CAV); 2650 u16 mcs_map = 0; 2651 int i; 2652 int nss = hal->rx_nss; 2653 int idx = 0; 2654 2655 iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL); 2656 if (!iftype_data) 2657 return; 2658 2659 for (i = 0; i < 8; i++) { 2660 if (i < nss) 2661 mcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2); 2662 else 2663 mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2); 2664 } 2665 2666 for (i = 0; i < NUM_NL80211_IFTYPES; i++) { 2667 struct ieee80211_sta_he_cap *he_cap; 2668 u8 *mac_cap_info; 2669 u8 *phy_cap_info; 2670 2671 switch (i) { 2672 case NL80211_IFTYPE_STATION: 2673 case NL80211_IFTYPE_AP: 2674 break; 2675 default: 2676 continue; 2677 } 2678 2679 if (idx >= RTW89_SBAND_IFTYPES_NR) { 2680 rtw89_warn(rtwdev, "run out of iftype_data\n"); 2681 break; 2682 } 2683 2684 iftype_data[idx].types_mask = BIT(i); 2685 he_cap = &iftype_data[idx].he_cap; 2686 mac_cap_info = he_cap->he_cap_elem.mac_cap_info; 2687 phy_cap_info = he_cap->he_cap_elem.phy_cap_info; 2688 2689 he_cap->has_he = true; 2690 mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE; 2691 if (i == NL80211_IFTYPE_STATION) 2692 mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US; 2693 mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK | 2694 IEEE80211_HE_MAC_CAP2_BSR; 2695 mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2; 2696 if (i == NL80211_IFTYPE_AP) 2697 mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL; 2698 mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS | 2699 IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU; 2700 if (i == NL80211_IFTYPE_STATION) 2701 mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX; 2702 if (band == NL80211_BAND_2GHZ) { 2703 phy_cap_info[0] = 2704 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; 2705 } else { 2706 phy_cap_info[0] = 2707 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; 2708 if (chip->support_bw160) 2709 phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; 2710 } 2711 phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | 2712 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | 2713 IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; 2714 phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | 2715 IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | 2716 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | 2717 IEEE80211_HE_PHY_CAP2_DOPPLER_TX; 2718 phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM; 2719 if (i == NL80211_IFTYPE_STATION) 2720 phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM | 2721 IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2; 2722 if (i == NL80211_IFTYPE_AP) 2723 phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU; 2724 phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | 2725 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4; 2726 if (chip->support_bw160) 2727 phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4; 2728 phy_cap_info[5] = no_ng16 ? 0 : 2729 IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | 2730 IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; 2731 phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | 2732 IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | 2733 IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | 2734 IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE; 2735 phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | 2736 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | 2737 IEEE80211_HE_PHY_CAP7_MAX_NC_1; 2738 phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | 2739 IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI | 2740 IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996; 2741 if (chip->support_bw160) 2742 phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | 2743 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; 2744 phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | 2745 IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | 2746 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | 2747 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | 2748 u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US, 2749 IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); 2750 if (i == NL80211_IFTYPE_STATION) 2751 phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; 2752 he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map); 2753 he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map); 2754 if (chip->support_bw160) { 2755 he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map); 2756 he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map); 2757 } 2758 2759 if (band == NL80211_BAND_6GHZ) { 2760 __le16 capa; 2761 2762 capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE, 2763 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) | 2764 le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K, 2765 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) | 2766 le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454, 2767 IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); 2768 iftype_data[idx].he_6ghz_capa.capa = capa; 2769 } 2770 2771 idx++; 2772 } 2773 2774 sband->iftype_data = iftype_data; 2775 sband->n_iftype_data = idx; 2776 } 2777 2778 static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev) 2779 { 2780 struct ieee80211_hw *hw = rtwdev->hw; 2781 struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL; 2782 struct ieee80211_supported_band *sband_6ghz = NULL; 2783 u32 size = sizeof(struct ieee80211_supported_band); 2784 u8 support_bands = rtwdev->chip->support_bands; 2785 2786 if (support_bands & BIT(NL80211_BAND_2GHZ)) { 2787 sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL); 2788 if (!sband_2ghz) 2789 goto err; 2790 rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap); 2791 rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz); 2792 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz; 2793 } 2794 2795 if (support_bands & BIT(NL80211_BAND_5GHZ)) { 2796 sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL); 2797 if (!sband_5ghz) 2798 goto err; 2799 rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap); 2800 rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap); 2801 rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz); 2802 hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz; 2803 } 2804 2805 if (support_bands & BIT(NL80211_BAND_6GHZ)) { 2806 sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL); 2807 if (!sband_6ghz) 2808 goto err; 2809 rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz); 2810 hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz; 2811 } 2812 2813 return 0; 2814 2815 err: 2816 hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; 2817 hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; 2818 hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL; 2819 if (sband_2ghz) 2820 kfree(sband_2ghz->iftype_data); 2821 if (sband_5ghz) 2822 kfree(sband_5ghz->iftype_data); 2823 if (sband_6ghz) 2824 kfree(sband_6ghz->iftype_data); 2825 kfree(sband_2ghz); 2826 kfree(sband_5ghz); 2827 kfree(sband_6ghz); 2828 return -ENOMEM; 2829 } 2830 2831 static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev) 2832 { 2833 struct ieee80211_hw *hw = rtwdev->hw; 2834 2835 kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data); 2836 kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data); 2837 if (hw->wiphy->bands[NL80211_BAND_6GHZ]) 2838 kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data); 2839 kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]); 2840 kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); 2841 kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]); 2842 hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; 2843 hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; 2844 hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL; 2845 } 2846 2847 static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev) 2848 { 2849 int i; 2850 2851 for (i = 0; i < RTW89_PHY_MAX; i++) 2852 skb_queue_head_init(&rtwdev->ppdu_sts.rx_queue[i]); 2853 for (i = 0; i < RTW89_PHY_MAX; i++) 2854 rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX; 2855 } 2856 2857 void rtw89_core_update_beacon_work(struct work_struct *work) 2858 { 2859 struct rtw89_dev *rtwdev; 2860 struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif, 2861 update_beacon_work); 2862 2863 if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE) 2864 return; 2865 2866 rtwdev = rtwvif->rtwdev; 2867 mutex_lock(&rtwdev->mutex); 2868 rtw89_fw_h2c_update_beacon(rtwdev, rtwvif); 2869 mutex_unlock(&rtwdev->mutex); 2870 } 2871 2872 int rtw89_core_start(struct rtw89_dev *rtwdev) 2873 { 2874 int ret; 2875 2876 rtwdev->mac.qta_mode = RTW89_QTA_SCC; 2877 ret = rtw89_mac_init(rtwdev); 2878 if (ret) { 2879 rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret); 2880 return ret; 2881 } 2882 2883 rtw89_btc_ntfy_poweron(rtwdev); 2884 2885 /* efuse process */ 2886 2887 /* pre-config BB/RF, BB reset/RFC reset */ 2888 rtw89_chip_disable_bb_rf(rtwdev); 2889 ret = rtw89_chip_enable_bb_rf(rtwdev); 2890 if (ret) 2891 return ret; 2892 2893 rtw89_phy_init_bb_reg(rtwdev); 2894 rtw89_phy_init_rf_reg(rtwdev); 2895 2896 rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL); 2897 2898 rtw89_phy_dm_init(rtwdev); 2899 2900 rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); 2901 rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_0); 2902 2903 ret = rtw89_hci_start(rtwdev); 2904 if (ret) { 2905 rtw89_err(rtwdev, "failed to start hci\n"); 2906 return ret; 2907 } 2908 2909 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, 2910 RTW89_TRACK_WORK_PERIOD); 2911 2912 set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 2913 2914 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON); 2915 rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.fw_log_enable); 2916 rtw89_fw_h2c_init_ba_cam(rtwdev); 2917 2918 return 0; 2919 } 2920 2921 void rtw89_core_stop(struct rtw89_dev *rtwdev) 2922 { 2923 struct rtw89_btc *btc = &rtwdev->btc; 2924 2925 /* Prvent to stop twice; enter_ips and ops_stop */ 2926 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2927 return; 2928 2929 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF); 2930 2931 clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 2932 2933 mutex_unlock(&rtwdev->mutex); 2934 2935 cancel_work_sync(&rtwdev->c2h_work); 2936 cancel_work_sync(&btc->eapol_notify_work); 2937 cancel_work_sync(&btc->arp_notify_work); 2938 cancel_work_sync(&btc->dhcp_notify_work); 2939 cancel_work_sync(&btc->icmp_notify_work); 2940 cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work); 2941 cancel_delayed_work_sync(&rtwdev->track_work); 2942 cancel_delayed_work_sync(&rtwdev->coex_act1_work); 2943 cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work); 2944 cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work); 2945 cancel_delayed_work_sync(&rtwdev->cfo_track_work); 2946 cancel_delayed_work_sync(&rtwdev->forbid_ba_work); 2947 2948 mutex_lock(&rtwdev->mutex); 2949 2950 rtw89_btc_ntfy_poweroff(rtwdev); 2951 rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, true); 2952 rtw89_mac_flush_txq(rtwdev, BIT(rtwdev->hw->queues) - 1, true); 2953 rtw89_hci_stop(rtwdev); 2954 rtw89_hci_deinit(rtwdev); 2955 rtw89_mac_pwr_off(rtwdev); 2956 rtw89_hci_reset(rtwdev); 2957 } 2958 2959 int rtw89_core_init(struct rtw89_dev *rtwdev) 2960 { 2961 struct rtw89_btc *btc = &rtwdev->btc; 2962 int ret; 2963 u8 band; 2964 2965 INIT_LIST_HEAD(&rtwdev->ba_list); 2966 INIT_LIST_HEAD(&rtwdev->forbid_ba_list); 2967 INIT_LIST_HEAD(&rtwdev->rtwvifs_list); 2968 INIT_LIST_HEAD(&rtwdev->early_h2c_list); 2969 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 2970 if (!(rtwdev->chip->support_bands & BIT(band))) 2971 continue; 2972 INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]); 2973 } 2974 INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work); 2975 INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); 2976 INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); 2977 INIT_DELAYED_WORK(&rtwdev->track_work, rtw89_track_work); 2978 INIT_DELAYED_WORK(&rtwdev->coex_act1_work, rtw89_coex_act1_work); 2979 INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work); 2980 INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); 2981 INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work); 2982 INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work); 2983 rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); 2984 spin_lock_init(&rtwdev->ba_lock); 2985 spin_lock_init(&rtwdev->rpwm_lock); 2986 mutex_init(&rtwdev->mutex); 2987 mutex_init(&rtwdev->rf_mutex); 2988 rtwdev->total_sta_assoc = 0; 2989 2990 INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work); 2991 INIT_WORK(&rtwdev->ips_work, rtw89_ips_work); 2992 skb_queue_head_init(&rtwdev->c2h_queue); 2993 rtw89_core_ppdu_sts_init(rtwdev); 2994 rtw89_traffic_stats_init(rtwdev, &rtwdev->stats); 2995 2996 rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev); 2997 rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR; 2998 2999 INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work); 3000 INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work); 3001 INIT_WORK(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work); 3002 INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work); 3003 3004 ret = rtw89_load_firmware(rtwdev); 3005 if (ret) { 3006 rtw89_warn(rtwdev, "no firmware loaded\n"); 3007 return ret; 3008 } 3009 rtw89_ser_init(rtwdev); 3010 rtw89_entity_init(rtwdev); 3011 3012 return 0; 3013 } 3014 EXPORT_SYMBOL(rtw89_core_init); 3015 3016 void rtw89_core_deinit(struct rtw89_dev *rtwdev) 3017 { 3018 rtw89_ser_deinit(rtwdev); 3019 rtw89_unload_firmware(rtwdev); 3020 rtw89_fw_free_all_early_h2c(rtwdev); 3021 3022 destroy_workqueue(rtwdev->txq_wq); 3023 mutex_destroy(&rtwdev->rf_mutex); 3024 mutex_destroy(&rtwdev->mutex); 3025 } 3026 EXPORT_SYMBOL(rtw89_core_deinit); 3027 3028 void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3029 const u8 *mac_addr, bool hw_scan) 3030 { 3031 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3032 3033 rtwdev->scanning = true; 3034 rtw89_leave_lps(rtwdev); 3035 if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) 3036 rtw89_leave_ips(rtwdev); 3037 3038 ether_addr_copy(rtwvif->mac_addr, mac_addr); 3039 rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, chan->band_type); 3040 rtw89_chip_rfk_scan(rtwdev, true); 3041 rtw89_hci_recalc_int_mit(rtwdev); 3042 3043 rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr); 3044 } 3045 3046 void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, 3047 struct ieee80211_vif *vif, bool hw_scan) 3048 { 3049 struct rtw89_vif *rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 3050 3051 if (!rtwvif) 3052 return; 3053 3054 ether_addr_copy(rtwvif->mac_addr, vif->addr); 3055 rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); 3056 3057 rtw89_chip_rfk_scan(rtwdev, false); 3058 rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0); 3059 3060 rtwdev->scanning = false; 3061 rtwdev->dig.bypass_dig = true; 3062 if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) 3063 ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work); 3064 } 3065 3066 static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev) 3067 { 3068 const struct rtw89_chip_info *chip = rtwdev->chip; 3069 u8 cv; 3070 3071 cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK); 3072 if (chip->chip_id == RTL8852A && cv <= CHIP_CBV) { 3073 if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD) 3074 cv = CHIP_CAV; 3075 else 3076 cv = CHIP_CBV; 3077 } 3078 3079 rtwdev->hal.cv = cv; 3080 } 3081 3082 static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev) 3083 { 3084 rtwdev->hal.support_cckpd = 3085 !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) && 3086 !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV); 3087 rtwdev->hal.support_igi = 3088 rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV; 3089 } 3090 3091 static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) 3092 { 3093 int ret; 3094 3095 ret = rtw89_mac_partial_init(rtwdev); 3096 if (ret) 3097 return ret; 3098 3099 ret = rtw89_parse_efuse_map(rtwdev); 3100 if (ret) 3101 return ret; 3102 3103 ret = rtw89_parse_phycap_map(rtwdev); 3104 if (ret) 3105 return ret; 3106 3107 ret = rtw89_mac_setup_phycap(rtwdev); 3108 if (ret) 3109 return ret; 3110 3111 rtw89_core_setup_phycap(rtwdev); 3112 3113 rtw89_mac_pwr_off(rtwdev); 3114 3115 return 0; 3116 } 3117 3118 static int rtw89_chip_board_info_setup(struct rtw89_dev *rtwdev) 3119 { 3120 rtw89_chip_fem_setup(rtwdev); 3121 3122 return 0; 3123 } 3124 3125 int rtw89_chip_info_setup(struct rtw89_dev *rtwdev) 3126 { 3127 int ret; 3128 3129 rtw89_read_chip_ver(rtwdev); 3130 3131 ret = rtw89_wait_firmware_completion(rtwdev); 3132 if (ret) { 3133 rtw89_err(rtwdev, "failed to wait firmware completion\n"); 3134 return ret; 3135 } 3136 3137 ret = rtw89_fw_recognize(rtwdev); 3138 if (ret) { 3139 rtw89_err(rtwdev, "failed to recognize firmware\n"); 3140 return ret; 3141 } 3142 3143 ret = rtw89_chip_efuse_info_setup(rtwdev); 3144 if (ret) 3145 return ret; 3146 3147 ret = rtw89_chip_board_info_setup(rtwdev); 3148 if (ret) 3149 return ret; 3150 3151 return 0; 3152 } 3153 EXPORT_SYMBOL(rtw89_chip_info_setup); 3154 3155 static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) 3156 { 3157 struct ieee80211_hw *hw = rtwdev->hw; 3158 struct rtw89_efuse *efuse = &rtwdev->efuse; 3159 int ret; 3160 int tx_headroom = IEEE80211_HT_CTL_LEN; 3161 3162 hw->vif_data_size = sizeof(struct rtw89_vif); 3163 hw->sta_data_size = sizeof(struct rtw89_sta); 3164 hw->txq_data_size = sizeof(struct rtw89_txq); 3165 hw->chanctx_data_size = sizeof(struct rtw89_chanctx_cfg); 3166 3167 SET_IEEE80211_PERM_ADDR(hw, efuse->addr); 3168 3169 hw->extra_tx_headroom = tx_headroom; 3170 hw->queues = IEEE80211_NUM_ACS; 3171 hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM; 3172 hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM; 3173 3174 ieee80211_hw_set(hw, SIGNAL_DBM); 3175 ieee80211_hw_set(hw, HAS_RATE_CONTROL); 3176 ieee80211_hw_set(hw, MFP_CAPABLE); 3177 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 3178 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 3179 ieee80211_hw_set(hw, RX_INCLUDES_FCS); 3180 ieee80211_hw_set(hw, TX_AMSDU); 3181 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 3182 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 3183 ieee80211_hw_set(hw, SUPPORTS_PS); 3184 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 3185 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 3186 ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); 3187 3188 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 3189 BIT(NL80211_IFTYPE_AP); 3190 hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1; 3191 hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1; 3192 3193 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | 3194 WIPHY_FLAG_TDLS_EXTERNAL_SETUP; 3195 hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 3196 3197 hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID; 3198 hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN; 3199 3200 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); 3201 3202 ret = rtw89_core_set_supported_band(rtwdev); 3203 if (ret) { 3204 rtw89_err(rtwdev, "failed to set supported band\n"); 3205 return ret; 3206 } 3207 3208 hw->wiphy->reg_notifier = rtw89_regd_notifier; 3209 hw->wiphy->sar_capa = &rtw89_sar_capa; 3210 3211 ret = ieee80211_register_hw(hw); 3212 if (ret) { 3213 rtw89_err(rtwdev, "failed to register hw\n"); 3214 goto err; 3215 } 3216 3217 ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier); 3218 if (ret) { 3219 rtw89_err(rtwdev, "failed to init regd\n"); 3220 goto err; 3221 } 3222 3223 return 0; 3224 3225 err: 3226 return ret; 3227 } 3228 3229 static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev) 3230 { 3231 struct ieee80211_hw *hw = rtwdev->hw; 3232 3233 ieee80211_unregister_hw(hw); 3234 rtw89_core_clr_supported_band(rtwdev); 3235 } 3236 3237 int rtw89_core_register(struct rtw89_dev *rtwdev) 3238 { 3239 int ret; 3240 3241 ret = rtw89_core_register_hw(rtwdev); 3242 if (ret) { 3243 rtw89_err(rtwdev, "failed to register core hw\n"); 3244 return ret; 3245 } 3246 3247 rtw89_debugfs_init(rtwdev); 3248 3249 return 0; 3250 } 3251 EXPORT_SYMBOL(rtw89_core_register); 3252 3253 void rtw89_core_unregister(struct rtw89_dev *rtwdev) 3254 { 3255 rtw89_core_unregister_hw(rtwdev); 3256 } 3257 EXPORT_SYMBOL(rtw89_core_unregister); 3258 3259 struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, 3260 u32 bus_data_size, 3261 const struct rtw89_chip_info *chip) 3262 { 3263 struct ieee80211_hw *hw; 3264 struct rtw89_dev *rtwdev; 3265 struct ieee80211_ops *ops; 3266 u32 driver_data_size; 3267 u32 early_feat_map = 0; 3268 bool no_chanctx; 3269 3270 rtw89_early_fw_feature_recognize(device, chip, &early_feat_map); 3271 3272 ops = kmemdup(&rtw89_ops, sizeof(rtw89_ops), GFP_KERNEL); 3273 if (!ops) 3274 goto err; 3275 3276 no_chanctx = chip->support_chanctx_num == 0 || 3277 !(early_feat_map & BIT(RTW89_FW_FEATURE_SCAN_OFFLOAD)); 3278 3279 if (no_chanctx) { 3280 ops->add_chanctx = NULL; 3281 ops->remove_chanctx = NULL; 3282 ops->change_chanctx = NULL; 3283 ops->assign_vif_chanctx = NULL; 3284 ops->unassign_vif_chanctx = NULL; 3285 } 3286 3287 driver_data_size = sizeof(struct rtw89_dev) + bus_data_size; 3288 hw = ieee80211_alloc_hw(driver_data_size, ops); 3289 if (!hw) 3290 goto err; 3291 3292 rtwdev = hw->priv; 3293 rtwdev->hw = hw; 3294 rtwdev->dev = device; 3295 rtwdev->ops = ops; 3296 rtwdev->chip = chip; 3297 3298 rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n", 3299 no_chanctx ? "without" : "with"); 3300 3301 return rtwdev; 3302 3303 err: 3304 kfree(ops); 3305 return NULL; 3306 } 3307 EXPORT_SYMBOL(rtw89_alloc_ieee80211_hw); 3308 3309 void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev) 3310 { 3311 kfree(rtwdev->ops); 3312 ieee80211_free_hw(rtwdev->hw); 3313 } 3314 EXPORT_SYMBOL(rtw89_free_ieee80211_hw); 3315 3316 MODULE_AUTHOR("Realtek Corporation"); 3317 MODULE_DESCRIPTION("Realtek 802.11ax wireless core module"); 3318 MODULE_LICENSE("Dual BSD/GPL"); 3319