1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/timekeeping.h> 8 #include "coredump.h" 9 #include "mt7996.h" 10 #include "../dma.h" 11 #include "mac.h" 12 #include "mcu.h" 13 14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 15 16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = { 17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 18 .radar_pattern = { 19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 27 }, 28 }; 29 30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = { 31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 32 .radar_pattern = { 33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 38 }, 39 }; 40 41 static const struct mt7996_dfs_radar_spec jp_radar_specs = { 42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 43 .radar_pattern = { 44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 52 }, 53 }; 54 55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, 56 u16 idx, u8 band_idx) 57 { 58 struct mt7996_sta_link *msta_link; 59 struct mt7996_sta *msta; 60 struct mt7996_vif *mvif; 61 struct mt76_wcid *wcid; 62 int i; 63 64 wcid = mt76_wcid_ptr(dev, idx); 65 if (!wcid || !wcid->sta) 66 return NULL; 67 68 if (!mt7996_band_valid(dev, band_idx)) 69 return NULL; 70 71 if (wcid->phy_idx == band_idx) 72 return wcid; 73 74 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 75 msta = msta_link->sta; 76 if (!msta || !msta->vif) 77 return NULL; 78 79 mvif = msta->vif; 80 for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) { 81 struct mt76_vif_link *mlink; 82 83 mlink = rcu_dereference(mvif->mt76.link[i]); 84 if (!mlink) 85 continue; 86 87 if (mlink->band_idx != band_idx) 88 continue; 89 90 msta_link = rcu_dereference(msta->link[i]); 91 break; 92 } 93 94 return &msta_link->wcid; 95 } 96 97 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask) 98 { 99 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 100 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 101 102 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 103 0, 5000); 104 } 105 106 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw) 107 { 108 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 109 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 110 111 return MT_WTBL_LMAC_OFFS(wcid, dw); 112 } 113 114 static void mt7996_mac_sta_poll(struct mt7996_dev *dev) 115 { 116 static const u8 ac_to_tid[] = { 117 [IEEE80211_AC_BE] = 0, 118 [IEEE80211_AC_BK] = 1, 119 [IEEE80211_AC_VI] = 4, 120 [IEEE80211_AC_VO] = 6 121 }; 122 struct mt7996_sta_link *msta_link; 123 struct mt76_vif_link *mlink; 124 struct ieee80211_sta *sta; 125 struct mt7996_sta *msta; 126 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 127 LIST_HEAD(sta_poll_list); 128 struct mt76_wcid *wcid; 129 int i; 130 131 spin_lock_bh(&dev->mt76.sta_poll_lock); 132 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); 133 spin_unlock_bh(&dev->mt76.sta_poll_lock); 134 135 rcu_read_lock(); 136 137 while (true) { 138 bool clear = false; 139 u32 addr, val; 140 u16 idx; 141 s8 rssi[4]; 142 143 spin_lock_bh(&dev->mt76.sta_poll_lock); 144 if (list_empty(&sta_poll_list)) { 145 spin_unlock_bh(&dev->mt76.sta_poll_lock); 146 break; 147 } 148 msta_link = list_first_entry(&sta_poll_list, 149 struct mt7996_sta_link, 150 wcid.poll_list); 151 msta = msta_link->sta; 152 wcid = &msta_link->wcid; 153 list_del_init(&wcid->poll_list); 154 spin_unlock_bh(&dev->mt76.sta_poll_lock); 155 156 idx = wcid->idx; 157 158 /* refresh peer's airtime reporting */ 159 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20); 160 161 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 162 u32 tx_last = msta_link->airtime_ac[i]; 163 u32 rx_last = msta_link->airtime_ac[i + 4]; 164 165 msta_link->airtime_ac[i] = mt76_rr(dev, addr); 166 msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 167 168 tx_time[i] = msta_link->airtime_ac[i] - tx_last; 169 rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last; 170 171 if ((tx_last | rx_last) & BIT(30)) 172 clear = true; 173 174 addr += 8; 175 } 176 177 if (clear) { 178 mt7996_mac_wtbl_update(dev, idx, 179 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 180 memset(msta_link->airtime_ac, 0, 181 sizeof(msta_link->airtime_ac)); 182 } 183 184 if (!wcid->sta) 185 continue; 186 187 sta = container_of((void *)msta, struct ieee80211_sta, 188 drv_priv); 189 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 190 u8 q = mt76_connac_lmac_mapping(i); 191 u32 tx_cur = tx_time[q]; 192 u32 rx_cur = rx_time[q]; 193 u8 tid = ac_to_tid[i]; 194 195 if (!tx_cur && !rx_cur) 196 continue; 197 198 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); 199 } 200 201 /* get signal strength of resp frames (CTS/BA/ACK) */ 202 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34); 203 val = mt76_rr(dev, addr); 204 205 rssi[0] = to_rssi(GENMASK(7, 0), val); 206 rssi[1] = to_rssi(GENMASK(15, 8), val); 207 rssi[2] = to_rssi(GENMASK(23, 16), val); 208 rssi[3] = to_rssi(GENMASK(31, 14), val); 209 210 mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]); 211 if (mlink) { 212 struct mt76_phy *mphy = mt76_vif_link_phy(mlink); 213 214 if (mphy) 215 msta_link->ack_signal = 216 mt76_rx_signal(mphy->antenna_mask, 217 rssi); 218 } 219 220 ewma_avg_signal_add(&msta_link->avg_ack_signal, 221 -msta_link->ack_signal); 222 } 223 224 rcu_read_unlock(); 225 } 226 227 /* The HW does not translate the mac header to 802.3 for mesh point */ 228 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 229 { 230 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 231 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 232 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid; 233 __le32 *rxd = (__le32 *)skb->data; 234 struct ieee80211_sta *sta; 235 struct ieee80211_vif *vif; 236 struct ieee80211_hdr hdr; 237 u16 frame_control; 238 239 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != 240 MT_RXD3_NORMAL_U2M) 241 return -EINVAL; 242 243 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) 244 return -EINVAL; 245 246 if (!msta || !msta->vif) 247 return -EINVAL; 248 249 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 250 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 251 252 /* store the info from RXD and ethhdr to avoid being overridden */ 253 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); 254 hdr.frame_control = cpu_to_le16(frame_control); 255 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); 256 hdr.duration_id = 0; 257 258 ether_addr_copy(hdr.addr1, vif->addr); 259 ether_addr_copy(hdr.addr2, sta->addr); 260 switch (frame_control & (IEEE80211_FCTL_TODS | 261 IEEE80211_FCTL_FROMDS)) { 262 case 0: 263 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); 264 break; 265 case IEEE80211_FCTL_FROMDS: 266 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 267 break; 268 case IEEE80211_FCTL_TODS: 269 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 270 break; 271 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 272 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 273 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 274 break; 275 default: 276 return -EINVAL; 277 } 278 279 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 280 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 281 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 282 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 283 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) 284 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 285 else 286 skb_pull(skb, 2); 287 288 if (ieee80211_has_order(hdr.frame_control)) 289 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], 290 IEEE80211_HT_CTL_LEN); 291 if (ieee80211_is_data_qos(hdr.frame_control)) { 292 __le16 qos_ctrl; 293 294 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); 295 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, 296 IEEE80211_QOS_CTL_LEN); 297 } 298 299 if (ieee80211_has_a4(hdr.frame_control)) 300 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 301 else 302 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 303 304 return 0; 305 } 306 307 static int 308 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, 309 struct mt76_rx_status *status, 310 struct ieee80211_supported_band *sband, 311 __le32 *rxv, u8 *mode) 312 { 313 u32 v0, v2; 314 u8 stbc, gi, bw, dcm, nss; 315 int i, idx; 316 bool cck = false; 317 318 v0 = le32_to_cpu(rxv[0]); 319 v2 = le32_to_cpu(rxv[2]); 320 321 idx = FIELD_GET(MT_PRXV_TX_RATE, v0); 322 i = idx; 323 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; 324 325 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); 326 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); 327 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); 328 dcm = FIELD_GET(MT_PRXV_DCM, v2); 329 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); 330 331 switch (*mode) { 332 case MT_PHY_TYPE_CCK: 333 cck = true; 334 fallthrough; 335 case MT_PHY_TYPE_OFDM: 336 i = mt76_get_rate(&dev->mt76, sband, i, cck); 337 break; 338 case MT_PHY_TYPE_HT_GF: 339 case MT_PHY_TYPE_HT: 340 status->encoding = RX_ENC_HT; 341 if (gi) 342 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 343 if (i > 31) 344 return -EINVAL; 345 break; 346 case MT_PHY_TYPE_VHT: 347 status->nss = nss; 348 status->encoding = RX_ENC_VHT; 349 if (gi) 350 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 351 if (i > 11) 352 return -EINVAL; 353 break; 354 case MT_PHY_TYPE_HE_MU: 355 case MT_PHY_TYPE_HE_SU: 356 case MT_PHY_TYPE_HE_EXT_SU: 357 case MT_PHY_TYPE_HE_TB: 358 status->nss = nss; 359 status->encoding = RX_ENC_HE; 360 i &= GENMASK(3, 0); 361 362 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 363 status->he_gi = gi; 364 365 status->he_dcm = dcm; 366 break; 367 case MT_PHY_TYPE_EHT_SU: 368 case MT_PHY_TYPE_EHT_TRIG: 369 case MT_PHY_TYPE_EHT_MU: 370 status->nss = nss; 371 status->encoding = RX_ENC_EHT; 372 i &= GENMASK(3, 0); 373 374 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) 375 status->eht.gi = gi; 376 break; 377 default: 378 return -EINVAL; 379 } 380 status->rate_idx = i; 381 382 switch (bw) { 383 case IEEE80211_STA_RX_BW_20: 384 break; 385 case IEEE80211_STA_RX_BW_40: 386 if (*mode & MT_PHY_TYPE_HE_EXT_SU && 387 (idx & MT_PRXV_TX_ER_SU_106T)) { 388 status->bw = RATE_INFO_BW_HE_RU; 389 status->he_ru = 390 NL80211_RATE_INFO_HE_RU_ALLOC_106; 391 } else { 392 status->bw = RATE_INFO_BW_40; 393 } 394 break; 395 case IEEE80211_STA_RX_BW_80: 396 status->bw = RATE_INFO_BW_80; 397 break; 398 case IEEE80211_STA_RX_BW_160: 399 status->bw = RATE_INFO_BW_160; 400 break; 401 /* rxv reports bw 320-1 and 320-2 separately */ 402 case IEEE80211_STA_RX_BW_320: 403 case IEEE80211_STA_RX_BW_320 + 1: 404 status->bw = RATE_INFO_BW_320; 405 break; 406 default: 407 return -EINVAL; 408 } 409 410 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 411 if (*mode < MT_PHY_TYPE_HE_SU && gi) 412 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 413 414 return 0; 415 } 416 417 static void 418 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q, 419 struct mt7996_sta *msta, struct sk_buff *skb, 420 u32 info) 421 { 422 struct ieee80211_vif *vif; 423 struct wireless_dev *wdev; 424 425 if (!msta || !msta->vif) 426 return; 427 428 if (!mt76_queue_is_wed_rx(q)) 429 return; 430 431 if (!(info & MT_DMA_INFO_PPE_VLD)) 432 return; 433 434 vif = container_of((void *)msta->vif, struct ieee80211_vif, 435 drv_priv); 436 wdev = ieee80211_vif_to_wdev(vif); 437 skb->dev = wdev->netdev; 438 439 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, 440 FIELD_GET(MT_DMA_PPE_CPU_REASON, info), 441 FIELD_GET(MT_DMA_PPE_ENTRY, info)); 442 } 443 444 static int 445 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, 446 struct sk_buff *skb, u32 *info) 447 { 448 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 449 struct mt76_phy *mphy = &dev->mt76.phy; 450 struct mt7996_phy *phy = &dev->phy; 451 struct ieee80211_supported_band *sband; 452 __le32 *rxd = (__le32 *)skb->data; 453 __le32 *rxv = NULL; 454 u32 rxd0 = le32_to_cpu(rxd[0]); 455 u32 rxd1 = le32_to_cpu(rxd[1]); 456 u32 rxd2 = le32_to_cpu(rxd[2]); 457 u32 rxd3 = le32_to_cpu(rxd[3]); 458 u32 rxd4 = le32_to_cpu(rxd[4]); 459 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; 460 u32 csum_status = *(u32 *)skb->cb; 461 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; 462 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; 463 bool unicast, insert_ccmp_hdr = false; 464 u8 remove_pad, amsdu_info, band_idx; 465 u8 mode = 0, qos_ctl = 0; 466 bool hdr_trans; 467 u16 hdr_gap; 468 u16 seq_ctrl = 0; 469 __le16 fc = 0; 470 int idx; 471 u8 hw_aggr = false; 472 struct mt7996_sta *msta = NULL; 473 474 hw_aggr = status->aggr; 475 memset(status, 0, sizeof(*status)); 476 477 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); 478 mphy = dev->mt76.phys[band_idx]; 479 phy = mphy->priv; 480 status->phy_idx = mphy->band_idx; 481 482 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 483 return -EINVAL; 484 485 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 486 return -EINVAL; 487 488 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 489 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 490 return -EINVAL; 491 492 /* ICV error or CCMP/BIP/WPI MIC error */ 493 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 494 status->flag |= RX_FLAG_ONLY_MONITOR; 495 496 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 497 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 498 status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx); 499 500 if (status->wcid) { 501 struct mt7996_sta_link *msta_link; 502 503 msta_link = container_of(status->wcid, struct mt7996_sta_link, 504 wcid); 505 msta = msta_link->sta; 506 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 507 } 508 509 status->freq = mphy->chandef.chan->center_freq; 510 status->band = mphy->chandef.chan->band; 511 if (status->band == NL80211_BAND_5GHZ) 512 sband = &mphy->sband_5g.sband; 513 else if (status->band == NL80211_BAND_6GHZ) 514 sband = &mphy->sband_6g.sband; 515 else 516 sband = &mphy->sband_2g.sband; 517 518 if (!sband->channels) 519 return -EINVAL; 520 521 if ((rxd3 & csum_mask) == csum_mask && 522 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 523 skb->ip_summed = CHECKSUM_UNNECESSARY; 524 525 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR) 526 status->flag |= RX_FLAG_FAILED_FCS_CRC; 527 528 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 529 status->flag |= RX_FLAG_MMIC_ERROR; 530 531 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 532 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 533 status->flag |= RX_FLAG_DECRYPTED; 534 status->flag |= RX_FLAG_IV_STRIPPED; 535 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 536 } 537 538 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 539 540 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 541 return -EINVAL; 542 543 rxd += 8; 544 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 545 u32 v0 = le32_to_cpu(rxd[0]); 546 u32 v2 = le32_to_cpu(rxd[2]); 547 548 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); 549 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); 550 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); 551 552 rxd += 4; 553 if ((u8 *)rxd - skb->data >= skb->len) 554 return -EINVAL; 555 } 556 557 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 558 u8 *data = (u8 *)rxd; 559 560 if (status->flag & RX_FLAG_DECRYPTED) { 561 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 562 case MT_CIPHER_AES_CCMP: 563 case MT_CIPHER_CCMP_CCX: 564 case MT_CIPHER_CCMP_256: 565 insert_ccmp_hdr = 566 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 567 fallthrough; 568 case MT_CIPHER_TKIP: 569 case MT_CIPHER_TKIP_NO_MIC: 570 case MT_CIPHER_GCMP: 571 case MT_CIPHER_GCMP_256: 572 status->iv[0] = data[5]; 573 status->iv[1] = data[4]; 574 status->iv[2] = data[3]; 575 status->iv[3] = data[2]; 576 status->iv[4] = data[1]; 577 status->iv[5] = data[0]; 578 break; 579 default: 580 break; 581 } 582 } 583 rxd += 4; 584 if ((u8 *)rxd - skb->data >= skb->len) 585 return -EINVAL; 586 } 587 588 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 589 status->timestamp = le32_to_cpu(rxd[0]); 590 status->flag |= RX_FLAG_MACTIME_START; 591 592 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 593 status->flag |= RX_FLAG_AMPDU_DETAILS; 594 595 /* all subframes of an A-MPDU have the same timestamp */ 596 if (phy->rx_ampdu_ts != status->timestamp) { 597 if (!++phy->ampdu_ref) 598 phy->ampdu_ref++; 599 } 600 phy->rx_ampdu_ts = status->timestamp; 601 602 status->ampdu_ref = phy->ampdu_ref; 603 } 604 605 rxd += 4; 606 if ((u8 *)rxd - skb->data >= skb->len) 607 return -EINVAL; 608 } 609 610 /* RXD Group 3 - P-RXV */ 611 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 612 u32 v3; 613 int ret; 614 615 rxv = rxd; 616 rxd += 4; 617 if ((u8 *)rxd - skb->data >= skb->len) 618 return -EINVAL; 619 620 v3 = le32_to_cpu(rxv[3]); 621 622 status->chains = mphy->antenna_mask; 623 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); 624 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); 625 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); 626 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); 627 628 /* RXD Group 5 - C-RXV */ 629 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 630 rxd += 24; 631 if ((u8 *)rxd - skb->data >= skb->len) 632 return -EINVAL; 633 } 634 635 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode); 636 if (ret < 0) 637 return ret; 638 } 639 640 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 641 status->amsdu = !!amsdu_info; 642 if (status->amsdu) { 643 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 644 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 645 } 646 647 /* IEEE 802.11 fragmentation can only be applied to unicast frames. 648 * Hence, drop fragments with multicast/broadcast RA. 649 * This check fixes vulnerabilities, like CVE-2020-26145. 650 */ 651 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) && 652 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M) 653 return -EINVAL; 654 655 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 656 if (hdr_trans && ieee80211_has_morefrags(fc)) { 657 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) 658 return -EINVAL; 659 hdr_trans = false; 660 } else { 661 int pad_start = 0; 662 663 skb_pull(skb, hdr_gap); 664 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) { 665 pad_start = ieee80211_get_hdrlen_from_skb(skb); 666 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 667 /* When header translation failure is indicated, 668 * the hardware will insert an extra 2-byte field 669 * containing the data length after the protocol 670 * type field. This happens either when the LLC-SNAP 671 * pattern did not match, or if a VLAN header was 672 * detected. 673 */ 674 pad_start = 12; 675 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 676 pad_start += 4; 677 else 678 pad_start = 0; 679 } 680 681 if (pad_start) { 682 memmove(skb->data + 2, skb->data, pad_start); 683 skb_pull(skb, 2); 684 } 685 } 686 687 if (!hdr_trans) { 688 struct ieee80211_hdr *hdr; 689 690 if (insert_ccmp_hdr) { 691 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 692 693 mt76_insert_ccmp_hdr(skb, key_id); 694 } 695 696 hdr = mt76_skb_get_hdr(skb); 697 fc = hdr->frame_control; 698 if (ieee80211_is_data_qos(fc)) { 699 u8 *qos = ieee80211_get_qos_ctl(hdr); 700 701 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 702 qos_ctl = *qos; 703 704 /* Mesh DA/SA/Length will be stripped after hardware 705 * de-amsdu, so here needs to clear amsdu present bit 706 * to mark it as a normal mesh frame. 707 */ 708 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu) 709 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 710 } 711 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data); 712 } else { 713 status->flag |= RX_FLAG_8023; 714 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, 715 *info); 716 } 717 718 if (rxv && !(status->flag & RX_FLAG_8023)) { 719 switch (status->encoding) { 720 case RX_ENC_EHT: 721 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode); 722 break; 723 case RX_ENC_HE: 724 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); 725 break; 726 default: 727 break; 728 } 729 } 730 731 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr) 732 return 0; 733 734 status->aggr = unicast && 735 !ieee80211_is_qos_nullfunc(fc); 736 status->qos_ctl = qos_ctl; 737 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 738 739 return 0; 740 } 741 742 static void 743 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, 744 struct sk_buff *skb, struct mt76_wcid *wcid) 745 { 746 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 747 u8 fc_type, fc_stype; 748 u16 ethertype; 749 bool wmm = false; 750 u32 val; 751 752 if (wcid->sta) { 753 struct ieee80211_sta *sta = wcid_to_sta(wcid); 754 755 wmm = sta->wme; 756 } 757 758 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 759 FIELD_PREP(MT_TXD1_TID, tid); 760 761 ethertype = get_unaligned_be16(&skb->data[12]); 762 if (ethertype >= ETH_P_802_3_MIN) 763 val |= MT_TXD1_ETH_802_3; 764 765 txwi[1] |= cpu_to_le32(val); 766 767 fc_type = IEEE80211_FTYPE_DATA >> 2; 768 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 769 770 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 771 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 772 773 txwi[2] |= cpu_to_le32(val); 774 775 if (wcid->amsdu) 776 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU); 777 } 778 779 static void 780 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, 781 struct sk_buff *skb, 782 struct ieee80211_key_conf *key, 783 struct mt76_wcid *wcid) 784 { 785 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 786 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 787 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 788 bool multicast = is_multicast_ether_addr(hdr->addr1); 789 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 790 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl; 791 u16 seqno = le16_to_cpu(sc); 792 u8 fc_type, fc_stype; 793 u32 val; 794 795 if (ieee80211_is_action(fc) && 796 mgmt->u.action.category == WLAN_CATEGORY_BACK && 797 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { 798 if (is_mt7990(&dev->mt76)) 799 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid)); 800 tid = MT_TX_ADDBA; 801 } else if (ieee80211_is_mgmt(hdr->frame_control)) { 802 tid = MT_TX_NORMAL; 803 } 804 805 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 806 FIELD_PREP(MT_TXD1_HDR_INFO, 807 ieee80211_get_hdrlen_from_skb(skb) / 2) | 808 FIELD_PREP(MT_TXD1_TID, tid); 809 810 if (!ieee80211_is_data(fc) || multicast || 811 info->flags & IEEE80211_TX_CTL_USE_MINRATE) 812 val |= MT_TXD1_FIXED_RATE; 813 814 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb)) { 815 val |= MT_TXD1_BIP; 816 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 817 } 818 819 txwi[1] |= cpu_to_le32(val); 820 821 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 822 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 823 824 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 825 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 826 827 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc)) 828 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST); 829 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 830 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID); 831 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 832 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST); 833 else 834 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE); 835 836 txwi[2] |= cpu_to_le32(val); 837 838 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); 839 if (ieee80211_is_beacon(fc)) { 840 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 841 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 842 } 843 844 if (multicast && ieee80211_vif_is_mld(info->control.vif)) { 845 val = MT_TXD3_SN_VALID | 846 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 847 txwi[3] |= cpu_to_le32(val); 848 } 849 850 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 851 if (ieee80211_is_back_req(hdr->frame_control)) { 852 struct ieee80211_bar *bar; 853 854 bar = (struct ieee80211_bar *)skb->data; 855 seqno = le16_to_cpu(bar->start_seq_num); 856 } 857 858 val = MT_TXD3_SN_VALID | 859 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 860 txwi[3] |= cpu_to_le32(val); 861 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); 862 } 863 864 if (ieee80211_vif_is_mld(info->control.vif) && 865 (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))) 866 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 867 868 if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) && 869 ieee80211_vif_is_mld(info->control.vif)) { 870 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 871 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 872 } 873 874 if (!wcid->sta && ieee80211_is_mgmt(fc)) 875 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 876 } 877 878 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, 879 struct sk_buff *skb, struct mt76_wcid *wcid, 880 struct ieee80211_key_conf *key, int pid, 881 enum mt76_txq_id qid, u32 changed) 882 { 883 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 884 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 885 struct ieee80211_vif *vif = info->control.vif; 886 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 887 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 888 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 889 struct mt76_vif_link *mlink = NULL; 890 struct mt7996_vif *mvif; 891 unsigned int link_id; 892 u16 tx_count = 15; 893 u32 val; 894 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | 895 BSS_CHANGED_FILS_DISCOVERY)); 896 bool beacon = !!(changed & (BSS_CHANGED_BEACON | 897 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc); 898 899 if (wcid != &dev->mt76.global_wcid) 900 link_id = wcid->link_id; 901 else 902 link_id = u32_get_bits(info->control.flags, 903 IEEE80211_TX_CTRL_MLO_LINK); 904 905 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; 906 if (mvif) { 907 if (wcid->offchannel) 908 mlink = rcu_dereference(mvif->mt76.offchannel_link); 909 if (!mlink) 910 mlink = rcu_dereference(mvif->mt76.link[link_id]); 911 } 912 913 if (mlink) { 914 omac_idx = mlink->omac_idx; 915 wmm_idx = mlink->wmm_idx; 916 band_idx = mlink->band_idx; 917 } 918 919 if (inband_disc) { 920 p_fmt = MT_TX_TYPE_FW; 921 q_idx = MT_LMAC_ALTX0; 922 } else if (beacon) { 923 p_fmt = MT_TX_TYPE_FW; 924 q_idx = MT_LMAC_BCN0; 925 } else if (qid >= MT_TXQ_PSD) { 926 p_fmt = MT_TX_TYPE_CT; 927 q_idx = MT_LMAC_ALTX0; 928 } else { 929 p_fmt = MT_TX_TYPE_CT; 930 q_idx = wmm_idx * MT7996_MAX_WMM_SETS + 931 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); 932 } 933 934 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 935 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 936 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 937 txwi[0] = cpu_to_le32(val); 938 939 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 940 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 941 942 if (band_idx) 943 val |= FIELD_PREP(MT_TXD1_TGID, band_idx); 944 945 txwi[1] = cpu_to_le32(val); 946 txwi[2] = 0; 947 948 val = MT_TXD3_SW_POWER_MGMT | 949 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 950 if (key) 951 val |= MT_TXD3_PROTECT_FRAME; 952 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 953 val |= MT_TXD3_NO_ACK; 954 955 txwi[3] = cpu_to_le32(val); 956 txwi[4] = 0; 957 958 val = FIELD_PREP(MT_TXD5_PID, pid); 959 if (pid >= MT_PACKET_ID_FIRST) 960 val |= MT_TXD5_TX_STATUS_HOST; 961 txwi[5] = cpu_to_le32(val); 962 963 val = MT_TXD6_DAS; 964 if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) 965 val |= MT_TXD6_DIS_MAT; 966 967 if (is_mt7996(&dev->mt76)) 968 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1); 969 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control)) 970 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1); 971 972 txwi[6] = cpu_to_le32(val); 973 txwi[7] = 0; 974 975 if (is_8023) 976 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid); 977 else 978 mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid); 979 980 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { 981 bool mcast = ieee80211_is_data(hdr->frame_control) && 982 is_multicast_ether_addr(hdr->addr1); 983 u8 idx = MT7996_BASIC_RATES_TBL; 984 985 if (mlink) { 986 if (mcast && mlink->mcast_rates_idx) 987 idx = mlink->mcast_rates_idx; 988 else if (beacon && mlink->beacon_rates_idx) 989 idx = mlink->beacon_rates_idx; 990 else 991 idx = mlink->basic_rates_idx; 992 } 993 994 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW; 995 if (mcast) 996 val |= MT_TXD6_DIS_MAT; 997 txwi[6] |= cpu_to_le32(val); 998 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 999 } 1000 } 1001 1002 static bool 1003 mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb) 1004 { 1005 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1006 1007 if (ieee80211_is_mgmt(hdr->frame_control)) 1008 return true; 1009 1010 /* for SDO to bypass specific data frame */ 1011 if (!mt7996_has_wa(dev)) { 1012 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) 1013 return true; 1014 1015 if (ieee80211_has_a4(hdr->frame_control) && 1016 !ieee80211_is_data_present(hdr->frame_control)) 1017 return true; 1018 } 1019 1020 return false; 1021 } 1022 1023 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1024 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1025 struct ieee80211_sta *sta, 1026 struct mt76_tx_info *tx_info) 1027 { 1028 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1029 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1030 struct ieee80211_key_conf *key = info->control.hw_key; 1031 struct ieee80211_vif *vif = info->control.vif; 1032 struct mt76_connac_txp_common *txp; 1033 struct mt76_txwi_cache *t; 1034 int id, i, pid, nbuf = tx_info->nbuf - 1; 1035 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1036 u8 *txwi = (u8 *)txwi_ptr; 1037 1038 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 1039 return -EINVAL; 1040 1041 if (!wcid) 1042 wcid = &dev->mt76.global_wcid; 1043 1044 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 1045 t->skb = tx_info->skb; 1046 1047 id = mt76_token_consume(mdev, &t); 1048 if (id < 0) 1049 return id; 1050 1051 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1052 memset(txwi_ptr, 0, MT_TXD_SIZE); 1053 /* Transmit non qos data by 802.11 header and need to fill txd by host*/ 1054 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1055 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 1056 pid, qid, 0); 1057 1058 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE); 1059 for (i = 0; i < nbuf; i++) { 1060 u16 len; 1061 1062 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len); 1063 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1064 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H, 1065 tx_info->buf[i + 1].addr >> 32); 1066 #endif 1067 1068 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 1069 txp->fw.len[i] = cpu_to_le16(len); 1070 } 1071 txp->fw.nbuf = nbuf; 1072 1073 txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST); 1074 1075 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1076 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD); 1077 1078 if (!key) 1079 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 1080 1081 if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb)) 1082 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 1083 1084 if (vif) { 1085 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 1086 struct mt76_vif_link *mlink = NULL; 1087 1088 if (wcid->offchannel) 1089 mlink = rcu_dereference(mvif->mt76.offchannel_link); 1090 if (!mlink) 1091 mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]); 1092 1093 txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx; 1094 } 1095 1096 txp->fw.token = cpu_to_le16(id); 1097 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff); 1098 1099 tx_info->skb = NULL; 1100 1101 /* pass partial skb header to fw */ 1102 tx_info->buf[1].len = MT_CT_PARSE_LEN; 1103 tx_info->buf[1].skip_unmap = true; 1104 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 1105 1106 return 0; 1107 } 1108 1109 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 1110 { 1111 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 1112 __le32 *txwi = ptr; 1113 u32 val; 1114 1115 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 1116 1117 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 1118 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 1119 txwi[0] = cpu_to_le32(val); 1120 1121 val = BIT(31) | 1122 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 1123 txwi[1] = cpu_to_le32(val); 1124 1125 txp->token = cpu_to_le16(token_id); 1126 txp->nbuf = 1; 1127 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 1128 1129 return MT_TXD_SIZE + sizeof(*txp); 1130 } 1131 1132 static void 1133 mt7996_tx_check_aggr(struct ieee80211_link_sta *link_sta, 1134 struct mt76_wcid *wcid, struct sk_buff *skb) 1135 { 1136 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1137 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1138 u16 fc, tid; 1139 1140 if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) 1141 return; 1142 1143 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1144 if (tid >= 6) /* skip VO queue */ 1145 return; 1146 1147 if (is_8023) { 1148 fc = IEEE80211_FTYPE_DATA | 1149 (link_sta->sta->wme ? IEEE80211_STYPE_QOS_DATA 1150 : IEEE80211_STYPE_DATA); 1151 } else { 1152 /* No need to get precise TID for Action/Management Frame, 1153 * since it will not meet the following Frame Control 1154 * condition anyway. 1155 */ 1156 1157 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1158 1159 fc = le16_to_cpu(hdr->frame_control) & 1160 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); 1161 } 1162 1163 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1164 return; 1165 1166 if (!test_and_set_bit(tid, &wcid->ampdu_state)) 1167 ieee80211_start_tx_ba_session(link_sta->sta, tid, 0); 1168 } 1169 1170 static void 1171 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, 1172 struct ieee80211_link_sta *link_sta, 1173 struct mt76_wcid *wcid, struct list_head *free_list) 1174 { 1175 struct mt76_dev *mdev = &dev->mt76; 1176 __le32 *txwi; 1177 u16 wcid_idx; 1178 1179 mt76_connac_txp_skb_unmap(mdev, t); 1180 if (!t->skb) 1181 goto out; 1182 1183 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 1184 if (link_sta) { 1185 wcid_idx = wcid->idx; 1186 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 1187 mt7996_tx_check_aggr(link_sta, wcid, t->skb); 1188 } else { 1189 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); 1190 } 1191 1192 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 1193 1194 out: 1195 t->skb = NULL; 1196 mt76_put_txwi(mdev, t); 1197 } 1198 1199 static void 1200 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) 1201 { 1202 __le32 *tx_free = (__le32 *)data, *cur_info; 1203 struct mt76_dev *mdev = &dev->mt76; 1204 struct mt76_phy *phy2 = mdev->phys[MT_BAND1]; 1205 struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; 1206 struct ieee80211_link_sta *link_sta = NULL; 1207 struct mt76_txwi_cache *txwi; 1208 struct mt76_wcid *wcid = NULL; 1209 LIST_HEAD(free_list); 1210 struct sk_buff *skb, *tmp; 1211 void *end = data + len; 1212 bool wake = false; 1213 u16 total, count = 0; 1214 u8 ver; 1215 1216 /* clean DMA queues and unmap buffers first */ 1217 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1218 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1219 if (phy2) { 1220 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false); 1221 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false); 1222 } 1223 if (phy3) { 1224 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false); 1225 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); 1226 } 1227 1228 ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER); 1229 if (WARN_ON_ONCE(ver < 5)) 1230 return; 1231 1232 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); 1233 for (cur_info = &tx_free[2]; count < total; cur_info++) { 1234 u32 msdu, info; 1235 u8 i; 1236 1237 if (WARN_ON_ONCE((void *)cur_info >= end)) 1238 return; 1239 /* 1'b1: new wcid pair. 1240 * 1'b0: msdu_id with the same 'wcid pair' as above. 1241 */ 1242 info = le32_to_cpu(*cur_info); 1243 if (info & MT_TXFREE_INFO_PAIR) { 1244 struct ieee80211_sta *sta; 1245 u16 idx; 1246 1247 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); 1248 wcid = mt76_wcid_ptr(dev, idx); 1249 sta = wcid_to_sta(wcid); 1250 if (!sta) { 1251 link_sta = NULL; 1252 goto next; 1253 } 1254 1255 link_sta = rcu_dereference(sta->link[wcid->link_id]); 1256 if (!link_sta) 1257 goto next; 1258 1259 mt76_wcid_add_poll(&dev->mt76, wcid); 1260 next: 1261 /* ver 7 has a new DW with pair = 1, skip it */ 1262 if (ver == 7 && ((void *)(cur_info + 1) < end) && 1263 (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR)) 1264 cur_info++; 1265 continue; 1266 } else if (info & MT_TXFREE_INFO_HEADER) { 1267 u32 tx_retries = 0, tx_failed = 0; 1268 1269 if (!wcid) 1270 continue; 1271 1272 tx_retries = 1273 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; 1274 tx_failed = tx_retries + 1275 !!FIELD_GET(MT_TXFREE_INFO_STAT, info); 1276 1277 wcid->stats.tx_retries += tx_retries; 1278 wcid->stats.tx_failed += tx_failed; 1279 continue; 1280 } 1281 1282 for (i = 0; i < 2; i++) { 1283 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; 1284 if (msdu == MT_TXFREE_INFO_MSDU_ID) 1285 continue; 1286 1287 count++; 1288 txwi = mt76_token_release(mdev, msdu, &wake); 1289 if (!txwi) 1290 continue; 1291 1292 mt7996_txwi_free(dev, txwi, link_sta, wcid, 1293 &free_list); 1294 } 1295 } 1296 1297 mt7996_mac_sta_poll(dev); 1298 1299 if (wake) 1300 mt76_set_tx_blocked(&dev->mt76, false); 1301 1302 mt76_worker_schedule(&dev->mt76.tx_worker); 1303 1304 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1305 skb_list_del_init(skb); 1306 napi_consume_skb(skb, 1); 1307 } 1308 } 1309 1310 static bool 1311 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, 1312 int pid, __le32 *txs_data) 1313 { 1314 struct mt76_sta_stats *stats = &wcid->stats; 1315 struct ieee80211_supported_band *sband; 1316 struct mt76_dev *mdev = &dev->mt76; 1317 struct mt76_phy *mphy; 1318 struct ieee80211_tx_info *info; 1319 struct sk_buff_head list; 1320 struct rate_info rate = {}; 1321 struct sk_buff *skb = NULL; 1322 bool cck = false; 1323 u32 txrate, txs, mode, stbc; 1324 1325 txs = le32_to_cpu(txs_data[0]); 1326 1327 mt76_tx_status_lock(mdev, &list); 1328 1329 /* only report MPDU TXS */ 1330 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) { 1331 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); 1332 if (skb) { 1333 info = IEEE80211_SKB_CB(skb); 1334 if (!(txs & MT_TXS0_ACK_ERROR_MASK)) 1335 info->flags |= IEEE80211_TX_STAT_ACK; 1336 1337 info->status.ampdu_len = 1; 1338 info->status.ampdu_ack_len = 1339 !!(info->flags & IEEE80211_TX_STAT_ACK); 1340 1341 info->status.rates[0].idx = -1; 1342 } 1343 } 1344 1345 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) { 1346 struct ieee80211_sta *sta; 1347 u8 tid; 1348 1349 sta = wcid_to_sta(wcid); 1350 tid = FIELD_GET(MT_TXS0_TID, txs); 1351 ieee80211_refresh_tx_agg_session_timer(sta, tid); 1352 } 1353 1354 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1355 1356 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); 1357 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; 1358 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); 1359 1360 if (stbc && rate.nss > 1) 1361 rate.nss >>= 1; 1362 1363 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) 1364 stats->tx_nss[rate.nss - 1]++; 1365 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) 1366 stats->tx_mcs[rate.mcs]++; 1367 1368 mode = FIELD_GET(MT_TX_RATE_MODE, txrate); 1369 switch (mode) { 1370 case MT_PHY_TYPE_CCK: 1371 cck = true; 1372 fallthrough; 1373 case MT_PHY_TYPE_OFDM: 1374 mphy = mt76_dev_phy(mdev, wcid->phy_idx); 1375 1376 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1377 sband = &mphy->sband_5g.sband; 1378 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 1379 sband = &mphy->sband_6g.sband; 1380 else 1381 sband = &mphy->sband_2g.sband; 1382 1383 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); 1384 rate.legacy = sband->bitrates[rate.mcs].bitrate; 1385 break; 1386 case MT_PHY_TYPE_HT: 1387 case MT_PHY_TYPE_HT_GF: 1388 if (rate.mcs > 31) 1389 goto out; 1390 1391 rate.flags = RATE_INFO_FLAGS_MCS; 1392 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1393 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1394 break; 1395 case MT_PHY_TYPE_VHT: 1396 if (rate.mcs > 9) 1397 goto out; 1398 1399 rate.flags = RATE_INFO_FLAGS_VHT_MCS; 1400 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1401 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1402 break; 1403 case MT_PHY_TYPE_HE_SU: 1404 case MT_PHY_TYPE_HE_EXT_SU: 1405 case MT_PHY_TYPE_HE_TB: 1406 case MT_PHY_TYPE_HE_MU: 1407 if (rate.mcs > 11) 1408 goto out; 1409 1410 rate.he_gi = wcid->rate.he_gi; 1411 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); 1412 rate.flags = RATE_INFO_FLAGS_HE_MCS; 1413 break; 1414 case MT_PHY_TYPE_EHT_SU: 1415 case MT_PHY_TYPE_EHT_TRIG: 1416 case MT_PHY_TYPE_EHT_MU: 1417 if (rate.mcs > 13) 1418 goto out; 1419 1420 rate.eht_gi = wcid->rate.eht_gi; 1421 rate.flags = RATE_INFO_FLAGS_EHT_MCS; 1422 break; 1423 default: 1424 goto out; 1425 } 1426 1427 stats->tx_mode[mode]++; 1428 1429 switch (FIELD_GET(MT_TXS0_BW, txs)) { 1430 case IEEE80211_STA_RX_BW_320: 1431 rate.bw = RATE_INFO_BW_320; 1432 stats->tx_bw[4]++; 1433 break; 1434 case IEEE80211_STA_RX_BW_160: 1435 rate.bw = RATE_INFO_BW_160; 1436 stats->tx_bw[3]++; 1437 break; 1438 case IEEE80211_STA_RX_BW_80: 1439 rate.bw = RATE_INFO_BW_80; 1440 stats->tx_bw[2]++; 1441 break; 1442 case IEEE80211_STA_RX_BW_40: 1443 rate.bw = RATE_INFO_BW_40; 1444 stats->tx_bw[1]++; 1445 break; 1446 default: 1447 rate.bw = RATE_INFO_BW_20; 1448 stats->tx_bw[0]++; 1449 break; 1450 } 1451 wcid->rate = rate; 1452 1453 out: 1454 if (skb) 1455 mt76_tx_status_skb_done(mdev, skb, &list); 1456 mt76_tx_status_unlock(mdev, &list); 1457 1458 return !!skb; 1459 } 1460 1461 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) 1462 { 1463 struct mt7996_sta_link *msta_link; 1464 struct mt76_wcid *wcid; 1465 __le32 *txs_data = data; 1466 u16 wcidx; 1467 u8 pid; 1468 1469 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1470 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1471 1472 if (pid < MT_PACKET_ID_NO_SKB) 1473 return; 1474 1475 rcu_read_lock(); 1476 1477 wcid = mt76_wcid_ptr(dev, wcidx); 1478 if (!wcid) 1479 goto out; 1480 1481 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data); 1482 1483 if (!wcid->sta) 1484 goto out; 1485 1486 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 1487 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 1488 1489 out: 1490 rcu_read_unlock(); 1491 } 1492 1493 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) 1494 { 1495 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1496 __le32 *rxd = (__le32 *)data; 1497 __le32 *end = (__le32 *)&rxd[len / 4]; 1498 enum rx_pkt_type type; 1499 1500 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1501 if (type != PKT_TYPE_NORMAL) { 1502 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1503 1504 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1505 MT_RXD0_SW_PKT_TYPE_FRAME)) 1506 return true; 1507 } 1508 1509 switch (type) { 1510 case PKT_TYPE_TXRX_NOTIFY: 1511 mt7996_mac_tx_free(dev, data, len); 1512 return false; 1513 case PKT_TYPE_TXS: 1514 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1515 mt7996_mac_add_txs(dev, rxd); 1516 return false; 1517 case PKT_TYPE_RX_FW_MONITOR: 1518 mt7996_debugfs_rx_fw_monitor(dev, data, len); 1519 return false; 1520 default: 1521 return true; 1522 } 1523 } 1524 1525 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1526 struct sk_buff *skb, u32 *info) 1527 { 1528 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1529 __le32 *rxd = (__le32 *)skb->data; 1530 __le32 *end = (__le32 *)&skb->data[skb->len]; 1531 enum rx_pkt_type type; 1532 1533 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1534 if (type != PKT_TYPE_NORMAL) { 1535 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1536 1537 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1538 MT_RXD0_SW_PKT_TYPE_FRAME)) 1539 type = PKT_TYPE_NORMAL; 1540 } 1541 1542 switch (type) { 1543 case PKT_TYPE_TXRX_NOTIFY: 1544 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) && 1545 q == MT_RXQ_TXFREE_BAND2) { 1546 dev_kfree_skb(skb); 1547 break; 1548 } 1549 1550 mt7996_mac_tx_free(dev, skb->data, skb->len); 1551 napi_consume_skb(skb, 1); 1552 break; 1553 case PKT_TYPE_RX_EVENT: 1554 mt7996_mcu_rx_event(dev, skb); 1555 break; 1556 case PKT_TYPE_TXS: 1557 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1558 mt7996_mac_add_txs(dev, rxd); 1559 dev_kfree_skb(skb); 1560 break; 1561 case PKT_TYPE_RX_FW_MONITOR: 1562 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1563 dev_kfree_skb(skb); 1564 break; 1565 case PKT_TYPE_NORMAL: 1566 if (!mt7996_mac_fill_rx(dev, q, skb, info)) { 1567 mt76_rx(&dev->mt76, q, skb); 1568 return; 1569 } 1570 fallthrough; 1571 default: 1572 dev_kfree_skb(skb); 1573 break; 1574 } 1575 } 1576 1577 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy) 1578 { 1579 struct mt7996_dev *dev = phy->dev; 1580 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx); 1581 1582 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN); 1583 mt76_set(dev, reg, BIT(11) | BIT(9)); 1584 } 1585 1586 void mt7996_mac_reset_counters(struct mt7996_phy *phy) 1587 { 1588 struct mt7996_dev *dev = phy->dev; 1589 u8 band_idx = phy->mt76->band_idx; 1590 int i; 1591 1592 for (i = 0; i < 16; i++) 1593 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 1594 1595 phy->mt76->survey_time = ktime_get_boottime(); 1596 1597 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 1598 1599 /* reset airtime counters */ 1600 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx), 1601 MT_WF_RMAC_MIB_RXTIME_CLR); 1602 1603 mt7996_mcu_get_chan_mib_info(phy, true); 1604 } 1605 1606 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy) 1607 { 1608 s16 coverage_class = phy->coverage_class; 1609 struct mt7996_dev *dev = phy->dev; 1610 struct mt7996_phy *phy2 = mt7996_phy2(dev); 1611 struct mt7996_phy *phy3 = mt7996_phy3(dev); 1612 u32 reg_offset; 1613 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1614 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1615 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1616 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1617 u8 band_idx = phy->mt76->band_idx; 1618 int offset; 1619 1620 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1621 return; 1622 1623 if (phy2) 1624 coverage_class = max_t(s16, dev->phy.coverage_class, 1625 phy2->coverage_class); 1626 1627 if (phy3) 1628 coverage_class = max_t(s16, coverage_class, 1629 phy3->coverage_class); 1630 1631 offset = 3 * coverage_class; 1632 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1633 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1634 1635 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset); 1636 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset); 1637 } 1638 1639 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band) 1640 { 1641 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band), 1642 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY | 1643 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR); 1644 1645 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band), 1646 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5)); 1647 } 1648 1649 static u8 1650 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx) 1651 { 1652 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1653 struct mt7996_dev *dev = phy->dev; 1654 u32 val, sum = 0, n = 0; 1655 int ant, i; 1656 1657 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) { 1658 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant); 1659 1660 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1661 val = mt76_rr(dev, reg); 1662 sum += val * nf_power[i]; 1663 n += val; 1664 } 1665 } 1666 1667 return n ? sum / n : 0; 1668 } 1669 1670 void mt7996_update_channel(struct mt76_phy *mphy) 1671 { 1672 struct mt7996_phy *phy = mphy->priv; 1673 struct mt76_channel_state *state = mphy->chan_state; 1674 int nf; 1675 1676 mt7996_mcu_get_chan_mib_info(phy, false); 1677 1678 nf = mt7996_phy_get_nf(phy, mphy->band_idx); 1679 if (!phy->noise) 1680 phy->noise = nf << 4; 1681 else if (nf) 1682 phy->noise += nf - (phy->noise >> 4); 1683 1684 state->noise = -(phy->noise >> 4); 1685 } 1686 1687 static bool 1688 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state) 1689 { 1690 bool ret; 1691 1692 ret = wait_event_timeout(dev->reset_wait, 1693 (READ_ONCE(dev->recovery.state) & state), 1694 MT7996_RESET_TIMEOUT); 1695 1696 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1697 return ret; 1698 } 1699 1700 static void 1701 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1702 { 1703 struct ieee80211_bss_conf *link_conf; 1704 struct mt7996_phy *phy = priv; 1705 struct mt7996_dev *dev = phy->dev; 1706 unsigned int link_id; 1707 1708 1709 switch (vif->type) { 1710 case NL80211_IFTYPE_MESH_POINT: 1711 case NL80211_IFTYPE_ADHOC: 1712 case NL80211_IFTYPE_AP: 1713 break; 1714 default: 1715 return; 1716 } 1717 1718 for_each_vif_active_link(vif, link_conf, link_id) { 1719 struct mt7996_vif_link *link; 1720 1721 link = mt7996_vif_link(dev, vif, link_id); 1722 if (!link || link->phy != phy) 1723 continue; 1724 1725 mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf); 1726 } 1727 } 1728 1729 void mt7996_mac_update_beacons(struct mt7996_phy *phy) 1730 { 1731 ieee80211_iterate_active_interfaces(phy->mt76->hw, 1732 IEEE80211_IFACE_ITER_RESUME_ALL, 1733 mt7996_update_vif_beacon, phy); 1734 } 1735 1736 static void 1737 mt7996_update_beacons(struct mt7996_dev *dev) 1738 { 1739 struct mt76_phy *phy2, *phy3; 1740 1741 mt7996_mac_update_beacons(&dev->phy); 1742 1743 phy2 = dev->mt76.phys[MT_BAND1]; 1744 if (phy2) 1745 mt7996_mac_update_beacons(phy2->priv); 1746 1747 phy3 = dev->mt76.phys[MT_BAND2]; 1748 if (phy3) 1749 mt7996_mac_update_beacons(phy3->priv); 1750 } 1751 1752 void mt7996_tx_token_put(struct mt7996_dev *dev) 1753 { 1754 struct mt76_txwi_cache *txwi; 1755 int id; 1756 1757 spin_lock_bh(&dev->mt76.token_lock); 1758 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1759 mt7996_txwi_free(dev, txwi, NULL, NULL, NULL); 1760 dev->mt76.token_count--; 1761 } 1762 spin_unlock_bh(&dev->mt76.token_lock); 1763 idr_destroy(&dev->mt76.token); 1764 } 1765 1766 static int 1767 mt7996_mac_restart(struct mt7996_dev *dev) 1768 { 1769 struct mt7996_phy *phy2, *phy3; 1770 struct mt76_dev *mdev = &dev->mt76; 1771 int i, ret; 1772 1773 phy2 = mt7996_phy2(dev); 1774 phy3 = mt7996_phy3(dev); 1775 1776 if (dev->hif2) { 1777 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 1778 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1779 } 1780 1781 if (dev_is_pci(mdev->dev)) { 1782 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1783 if (dev->hif2) 1784 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 1785 } 1786 1787 set_bit(MT76_RESET, &dev->mphy.state); 1788 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1789 wake_up(&dev->mt76.mcu.wait); 1790 if (phy2) 1791 set_bit(MT76_RESET, &phy2->mt76->state); 1792 if (phy3) 1793 set_bit(MT76_RESET, &phy3->mt76->state); 1794 1795 /* lock/unlock all queues to ensure that no tx is pending */ 1796 mt76_txq_schedule_all(&dev->mphy); 1797 if (phy2) 1798 mt76_txq_schedule_all(phy2->mt76); 1799 if (phy3) 1800 mt76_txq_schedule_all(phy3->mt76); 1801 1802 /* disable all tx/rx napi */ 1803 mt76_worker_disable(&dev->mt76.tx_worker); 1804 mt76_for_each_q_rx(mdev, i) { 1805 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 1806 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 1807 continue; 1808 1809 if (mdev->q_rx[i].ndesc) 1810 napi_disable(&dev->mt76.napi[i]); 1811 } 1812 napi_disable(&dev->mt76.tx_napi); 1813 1814 /* token reinit */ 1815 mt7996_tx_token_put(dev); 1816 idr_init(&dev->mt76.token); 1817 1818 mt7996_dma_reset(dev, true); 1819 1820 mt76_for_each_q_rx(mdev, i) { 1821 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 1822 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 1823 continue; 1824 1825 if (mdev->q_rx[i].ndesc) { 1826 napi_enable(&dev->mt76.napi[i]); 1827 local_bh_disable(); 1828 napi_schedule(&dev->mt76.napi[i]); 1829 local_bh_enable(); 1830 } 1831 } 1832 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1833 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 1834 1835 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 1836 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 1837 if (dev->hif2) { 1838 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 1839 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1840 } 1841 if (dev_is_pci(mdev->dev)) { 1842 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1843 if (dev->hif2) 1844 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 1845 } 1846 1847 /* load firmware */ 1848 ret = mt7996_mcu_init_firmware(dev); 1849 if (ret) 1850 goto out; 1851 1852 /* set the necessary init items */ 1853 ret = mt7996_mcu_set_eeprom(dev); 1854 if (ret) 1855 goto out; 1856 1857 mt7996_mac_init(dev); 1858 mt7996_init_txpower(&dev->phy); 1859 mt7996_init_txpower(phy2); 1860 mt7996_init_txpower(phy3); 1861 ret = mt7996_txbf_init(dev); 1862 1863 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) { 1864 ret = mt7996_run(&dev->phy); 1865 if (ret) 1866 goto out; 1867 } 1868 1869 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) { 1870 ret = mt7996_run(phy2); 1871 if (ret) 1872 goto out; 1873 } 1874 1875 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) { 1876 ret = mt7996_run(phy3); 1877 if (ret) 1878 goto out; 1879 } 1880 1881 out: 1882 /* reset done */ 1883 clear_bit(MT76_RESET, &dev->mphy.state); 1884 if (phy2) 1885 clear_bit(MT76_RESET, &phy2->mt76->state); 1886 if (phy3) 1887 clear_bit(MT76_RESET, &phy3->mt76->state); 1888 1889 napi_enable(&dev->mt76.tx_napi); 1890 local_bh_disable(); 1891 napi_schedule(&dev->mt76.tx_napi); 1892 local_bh_enable(); 1893 1894 mt76_worker_enable(&dev->mt76.tx_worker); 1895 return ret; 1896 } 1897 1898 static void 1899 mt7996_mac_full_reset(struct mt7996_dev *dev) 1900 { 1901 struct mt7996_phy *phy2, *phy3; 1902 int i; 1903 1904 phy2 = mt7996_phy2(dev); 1905 phy3 = mt7996_phy3(dev); 1906 dev->recovery.hw_full_reset = true; 1907 1908 wake_up(&dev->mt76.mcu.wait); 1909 ieee80211_stop_queues(mt76_hw(dev)); 1910 if (phy2) 1911 ieee80211_stop_queues(phy2->mt76->hw); 1912 if (phy3) 1913 ieee80211_stop_queues(phy3->mt76->hw); 1914 1915 cancel_work_sync(&dev->wed_rro.work); 1916 cancel_delayed_work_sync(&dev->mphy.mac_work); 1917 if (phy2) 1918 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1919 if (phy3) 1920 cancel_delayed_work_sync(&phy3->mt76->mac_work); 1921 1922 mutex_lock(&dev->mt76.mutex); 1923 for (i = 0; i < 10; i++) { 1924 if (!mt7996_mac_restart(dev)) 1925 break; 1926 } 1927 mutex_unlock(&dev->mt76.mutex); 1928 1929 if (i == 10) 1930 dev_err(dev->mt76.dev, "chip full reset failed\n"); 1931 1932 ieee80211_restart_hw(mt76_hw(dev)); 1933 if (phy2) 1934 ieee80211_restart_hw(phy2->mt76->hw); 1935 if (phy3) 1936 ieee80211_restart_hw(phy3->mt76->hw); 1937 1938 ieee80211_wake_queues(mt76_hw(dev)); 1939 if (phy2) 1940 ieee80211_wake_queues(phy2->mt76->hw); 1941 if (phy3) 1942 ieee80211_wake_queues(phy3->mt76->hw); 1943 1944 dev->recovery.hw_full_reset = false; 1945 ieee80211_queue_delayed_work(mt76_hw(dev), 1946 &dev->mphy.mac_work, 1947 MT7996_WATCHDOG_TIME); 1948 if (phy2) 1949 ieee80211_queue_delayed_work(phy2->mt76->hw, 1950 &phy2->mt76->mac_work, 1951 MT7996_WATCHDOG_TIME); 1952 if (phy3) 1953 ieee80211_queue_delayed_work(phy3->mt76->hw, 1954 &phy3->mt76->mac_work, 1955 MT7996_WATCHDOG_TIME); 1956 } 1957 1958 void mt7996_mac_reset_work(struct work_struct *work) 1959 { 1960 struct mt7996_phy *phy2, *phy3; 1961 struct mt7996_dev *dev; 1962 int i; 1963 1964 dev = container_of(work, struct mt7996_dev, reset_work); 1965 phy2 = mt7996_phy2(dev); 1966 phy3 = mt7996_phy3(dev); 1967 1968 /* chip full reset */ 1969 if (dev->recovery.restart) { 1970 /* disable WA/WM WDT */ 1971 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 1972 MT_MCU_CMD_WDT_MASK); 1973 1974 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 1975 dev->recovery.wa_reset_count++; 1976 else 1977 dev->recovery.wm_reset_count++; 1978 1979 mt7996_mac_full_reset(dev); 1980 1981 /* enable mcu irq */ 1982 mt7996_irq_enable(dev, MT_INT_MCU_CMD); 1983 mt7996_irq_disable(dev, 0); 1984 1985 /* enable WA/WM WDT */ 1986 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 1987 1988 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 1989 dev->recovery.restart = false; 1990 return; 1991 } 1992 1993 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 1994 return; 1995 1996 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.", 1997 wiphy_name(dev->mt76.hw->wiphy)); 1998 1999 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) 2000 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2); 2001 2002 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 2003 mtk_wed_device_stop(&dev->mt76.mmio.wed); 2004 2005 ieee80211_stop_queues(mt76_hw(dev)); 2006 if (phy2) 2007 ieee80211_stop_queues(phy2->mt76->hw); 2008 if (phy3) 2009 ieee80211_stop_queues(phy3->mt76->hw); 2010 2011 set_bit(MT76_RESET, &dev->mphy.state); 2012 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2013 wake_up(&dev->mt76.mcu.wait); 2014 2015 cancel_work_sync(&dev->wed_rro.work); 2016 cancel_delayed_work_sync(&dev->mphy.mac_work); 2017 if (phy2) { 2018 set_bit(MT76_RESET, &phy2->mt76->state); 2019 cancel_delayed_work_sync(&phy2->mt76->mac_work); 2020 } 2021 if (phy3) { 2022 set_bit(MT76_RESET, &phy3->mt76->state); 2023 cancel_delayed_work_sync(&phy3->mt76->mac_work); 2024 } 2025 mt76_worker_disable(&dev->mt76.tx_worker); 2026 mt76_for_each_q_rx(&dev->mt76, i) { 2027 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2028 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2029 continue; 2030 2031 napi_disable(&dev->mt76.napi[i]); 2032 } 2033 napi_disable(&dev->mt76.tx_napi); 2034 2035 mutex_lock(&dev->mt76.mutex); 2036 2037 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 2038 2039 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 2040 mt7996_dma_reset(dev, false); 2041 2042 mt7996_tx_token_put(dev); 2043 idr_init(&dev->mt76.token); 2044 2045 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 2046 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 2047 } 2048 2049 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2050 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2051 2052 /* enable DMA Tx/Tx and interrupt */ 2053 mt7996_dma_start(dev, false, false); 2054 2055 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 2056 u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 | 2057 dev->mt76.mmio.irqmask; 2058 2059 if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) 2060 wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND; 2061 2062 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 2063 2064 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask, 2065 true); 2066 mt7996_irq_enable(dev, wed_irq_mask); 2067 mt7996_irq_disable(dev, 0); 2068 } 2069 2070 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { 2071 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT); 2072 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, 2073 MT_INT_TX_RX_DONE_EXT); 2074 } 2075 2076 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2077 clear_bit(MT76_RESET, &dev->mphy.state); 2078 if (phy2) 2079 clear_bit(MT76_RESET, &phy2->mt76->state); 2080 if (phy3) 2081 clear_bit(MT76_RESET, &phy3->mt76->state); 2082 2083 mt76_for_each_q_rx(&dev->mt76, i) { 2084 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2085 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2086 continue; 2087 2088 napi_enable(&dev->mt76.napi[i]); 2089 local_bh_disable(); 2090 napi_schedule(&dev->mt76.napi[i]); 2091 local_bh_enable(); 2092 } 2093 2094 tasklet_schedule(&dev->mt76.irq_tasklet); 2095 2096 mt76_worker_enable(&dev->mt76.tx_worker); 2097 2098 napi_enable(&dev->mt76.tx_napi); 2099 local_bh_disable(); 2100 napi_schedule(&dev->mt76.tx_napi); 2101 local_bh_enable(); 2102 2103 ieee80211_wake_queues(mt76_hw(dev)); 2104 if (phy2) 2105 ieee80211_wake_queues(phy2->mt76->hw); 2106 if (phy3) 2107 ieee80211_wake_queues(phy3->mt76->hw); 2108 2109 mutex_unlock(&dev->mt76.mutex); 2110 2111 mt7996_update_beacons(dev); 2112 2113 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 2114 MT7996_WATCHDOG_TIME); 2115 if (phy2) 2116 ieee80211_queue_delayed_work(phy2->mt76->hw, 2117 &phy2->mt76->mac_work, 2118 MT7996_WATCHDOG_TIME); 2119 if (phy3) 2120 ieee80211_queue_delayed_work(phy3->mt76->hw, 2121 &phy3->mt76->mac_work, 2122 MT7996_WATCHDOG_TIME); 2123 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.", 2124 wiphy_name(dev->mt76.hw->wiphy)); 2125 } 2126 2127 /* firmware coredump */ 2128 void mt7996_mac_dump_work(struct work_struct *work) 2129 { 2130 const struct mt7996_mem_region *mem_region; 2131 struct mt7996_crash_data *crash_data; 2132 struct mt7996_dev *dev; 2133 struct mt7996_mem_hdr *hdr; 2134 size_t buf_len; 2135 int i; 2136 u32 num; 2137 u8 *buf; 2138 2139 dev = container_of(work, struct mt7996_dev, dump_work); 2140 2141 mutex_lock(&dev->dump_mutex); 2142 2143 crash_data = mt7996_coredump_new(dev); 2144 if (!crash_data) { 2145 mutex_unlock(&dev->dump_mutex); 2146 goto skip_coredump; 2147 } 2148 2149 mem_region = mt7996_coredump_get_mem_layout(dev, &num); 2150 if (!mem_region || !crash_data->memdump_buf_len) { 2151 mutex_unlock(&dev->dump_mutex); 2152 goto skip_memdump; 2153 } 2154 2155 buf = crash_data->memdump_buf; 2156 buf_len = crash_data->memdump_buf_len; 2157 2158 /* dumping memory content... */ 2159 memset(buf, 0, buf_len); 2160 for (i = 0; i < num; i++) { 2161 if (mem_region->len > buf_len) { 2162 dev_warn(dev->mt76.dev, "%s len %zu is too large\n", 2163 mem_region->name, mem_region->len); 2164 break; 2165 } 2166 2167 /* reserve space for the header */ 2168 hdr = (void *)buf; 2169 buf += sizeof(*hdr); 2170 buf_len -= sizeof(*hdr); 2171 2172 mt7996_memcpy_fromio(dev, buf, mem_region->start, 2173 mem_region->len); 2174 2175 hdr->start = mem_region->start; 2176 hdr->len = mem_region->len; 2177 2178 if (!mem_region->len) 2179 /* note: the header remains, just with zero length */ 2180 break; 2181 2182 buf += mem_region->len; 2183 buf_len -= mem_region->len; 2184 2185 mem_region++; 2186 } 2187 2188 mutex_unlock(&dev->dump_mutex); 2189 2190 skip_memdump: 2191 mt7996_coredump_submit(dev); 2192 skip_coredump: 2193 queue_work(dev->mt76.wq, &dev->reset_work); 2194 } 2195 2196 void mt7996_reset(struct mt7996_dev *dev) 2197 { 2198 if (!dev->recovery.hw_init_done) 2199 return; 2200 2201 if (dev->recovery.hw_full_reset) 2202 return; 2203 2204 /* wm/wa exception: do full recovery */ 2205 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 2206 dev->recovery.restart = true; 2207 dev_info(dev->mt76.dev, 2208 "%s indicated firmware crash, attempting recovery\n", 2209 wiphy_name(dev->mt76.hw->wiphy)); 2210 2211 mt7996_irq_disable(dev, MT_INT_MCU_CMD); 2212 queue_work(dev->mt76.wq, &dev->dump_work); 2213 return; 2214 } 2215 2216 queue_work(dev->mt76.wq, &dev->reset_work); 2217 wake_up(&dev->reset_wait); 2218 } 2219 2220 void mt7996_mac_update_stats(struct mt7996_phy *phy) 2221 { 2222 struct mt76_mib_stats *mib = &phy->mib; 2223 struct mt7996_dev *dev = phy->dev; 2224 u8 band_idx = phy->mt76->band_idx; 2225 u32 cnt; 2226 int i; 2227 2228 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx)); 2229 mib->fcs_err_cnt += cnt; 2230 2231 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx)); 2232 mib->rx_fifo_full_cnt += cnt; 2233 2234 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx)); 2235 mib->rx_mpdu_cnt += cnt; 2236 2237 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx)); 2238 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 2239 2240 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx)); 2241 mib->rx_vector_mismatch_cnt += cnt; 2242 2243 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx)); 2244 mib->rx_delimiter_fail_cnt += cnt; 2245 2246 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx)); 2247 mib->rx_len_mismatch_cnt += cnt; 2248 2249 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx)); 2250 mib->tx_ampdu_cnt += cnt; 2251 2252 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx)); 2253 mib->tx_stop_q_empty_cnt += cnt; 2254 2255 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx)); 2256 mib->tx_mpdu_attempts_cnt += cnt; 2257 2258 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx)); 2259 mib->tx_mpdu_success_cnt += cnt; 2260 2261 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx)); 2262 mib->rx_ampdu_cnt += cnt; 2263 2264 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx)); 2265 mib->rx_ampdu_bytes_cnt += cnt; 2266 2267 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx)); 2268 mib->rx_ampdu_valid_subframe_cnt += cnt; 2269 2270 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx)); 2271 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 2272 2273 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx)); 2274 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt); 2275 2276 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx)); 2277 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt); 2278 2279 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx)); 2280 mib->rx_pfdrop_cnt += cnt; 2281 2282 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx)); 2283 mib->rx_vec_queue_overflow_drop_cnt += cnt; 2284 2285 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx)); 2286 mib->rx_ba_cnt += cnt; 2287 2288 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx)); 2289 mib->tx_bf_ebf_ppdu_cnt += cnt; 2290 2291 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx)); 2292 mib->tx_bf_ibf_ppdu_cnt += cnt; 2293 2294 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx)); 2295 mib->tx_mu_bf_cnt += cnt; 2296 2297 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx)); 2298 mib->tx_mu_mpdu_cnt += cnt; 2299 2300 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx)); 2301 mib->tx_mu_acked_mpdu_cnt += cnt; 2302 2303 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx)); 2304 mib->tx_su_acked_mpdu_cnt += cnt; 2305 2306 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx)); 2307 mib->tx_bf_rx_fb_ht_cnt += cnt; 2308 mib->tx_bf_rx_fb_all_cnt += cnt; 2309 2310 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx)); 2311 mib->tx_bf_rx_fb_vht_cnt += cnt; 2312 mib->tx_bf_rx_fb_all_cnt += cnt; 2313 2314 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx)); 2315 mib->tx_bf_rx_fb_he_cnt += cnt; 2316 mib->tx_bf_rx_fb_all_cnt += cnt; 2317 2318 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx)); 2319 mib->tx_bf_rx_fb_eht_cnt += cnt; 2320 mib->tx_bf_rx_fb_all_cnt += cnt; 2321 2322 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx)); 2323 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); 2324 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); 2325 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); 2326 2327 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx)); 2328 mib->tx_bf_fb_trig_cnt += cnt; 2329 2330 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx)); 2331 mib->tx_bf_fb_cpl_cnt += cnt; 2332 2333 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 2334 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 2335 mib->tx_amsdu[i] += cnt; 2336 mib->tx_amsdu_cnt += cnt; 2337 } 2338 2339 /* rts count */ 2340 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx)); 2341 mib->rts_cnt += cnt; 2342 2343 /* rts retry count */ 2344 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx)); 2345 mib->rts_retries_cnt += cnt; 2346 2347 /* ba miss count */ 2348 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx)); 2349 mib->ba_miss_cnt += cnt; 2350 2351 /* ack fail count */ 2352 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx)); 2353 mib->ack_fail_cnt += cnt; 2354 2355 for (i = 0; i < 16; i++) { 2356 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2357 phy->mt76->aggr_stats[i] += cnt; 2358 } 2359 } 2360 2361 void mt7996_mac_sta_rc_work(struct work_struct *work) 2362 { 2363 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); 2364 struct mt7996_sta_link *msta_link; 2365 struct ieee80211_vif *vif; 2366 struct mt7996_vif *mvif; 2367 LIST_HEAD(list); 2368 u32 changed; 2369 2370 spin_lock_bh(&dev->mt76.sta_poll_lock); 2371 list_splice_init(&dev->sta_rc_list, &list); 2372 2373 while (!list_empty(&list)) { 2374 msta_link = list_first_entry(&list, struct mt7996_sta_link, 2375 rc_list); 2376 list_del_init(&msta_link->rc_list); 2377 2378 changed = msta_link->changed; 2379 msta_link->changed = 0; 2380 mvif = msta_link->sta->vif; 2381 vif = container_of((void *)mvif, struct ieee80211_vif, 2382 drv_priv); 2383 2384 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2385 2386 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2387 IEEE80211_RC_NSS_CHANGED | 2388 IEEE80211_RC_BW_CHANGED)) 2389 mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif, 2390 msta_link->wcid.link_id, 2391 true); 2392 2393 if (changed & IEEE80211_RC_SMPS_CHANGED) 2394 mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL, 2395 msta_link->wcid.link_id, 2396 RATE_PARAM_MMPS_UPDATE); 2397 2398 spin_lock_bh(&dev->mt76.sta_poll_lock); 2399 } 2400 2401 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2402 } 2403 2404 void mt7996_mac_work(struct work_struct *work) 2405 { 2406 struct mt7996_phy *phy; 2407 struct mt76_phy *mphy; 2408 2409 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2410 mac_work.work); 2411 phy = mphy->priv; 2412 2413 mutex_lock(&mphy->dev->mutex); 2414 2415 mt76_update_survey(mphy); 2416 if (++mphy->mac_work_count == 5) { 2417 mphy->mac_work_count = 0; 2418 2419 mt7996_mac_update_stats(phy); 2420 2421 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE); 2422 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { 2423 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT); 2424 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT); 2425 } 2426 } 2427 2428 mutex_unlock(&mphy->dev->mutex); 2429 2430 mt76_tx_status_check(mphy->dev, false); 2431 2432 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2433 MT7996_WATCHDOG_TIME); 2434 } 2435 2436 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy) 2437 { 2438 struct mt7996_dev *dev = phy->dev; 2439 int rdd_idx = mt7996_get_rdd_idx(phy, false); 2440 2441 if (rdd_idx < 0) 2442 return; 2443 2444 mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0); 2445 } 2446 2447 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx) 2448 { 2449 int err, region; 2450 2451 switch (dev->mt76.region) { 2452 case NL80211_DFS_ETSI: 2453 region = 0; 2454 break; 2455 case NL80211_DFS_JP: 2456 region = 2; 2457 break; 2458 case NL80211_DFS_FCC: 2459 default: 2460 region = 1; 2461 break; 2462 } 2463 2464 err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region); 2465 if (err < 0) 2466 return err; 2467 2468 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1); 2469 } 2470 2471 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) 2472 { 2473 struct mt7996_dev *dev = phy->dev; 2474 int err, rdd_idx; 2475 2476 rdd_idx = mt7996_get_rdd_idx(phy, false); 2477 if (rdd_idx < 0) 2478 return -EINVAL; 2479 2480 /* start CAC */ 2481 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0); 2482 if (err < 0) 2483 return err; 2484 2485 err = mt7996_dfs_start_rdd(dev, rdd_idx); 2486 2487 return err; 2488 } 2489 2490 static int 2491 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy) 2492 { 2493 const struct mt7996_dfs_radar_spec *radar_specs; 2494 struct mt7996_dev *dev = phy->dev; 2495 int err, i; 2496 2497 switch (dev->mt76.region) { 2498 case NL80211_DFS_FCC: 2499 radar_specs = &fcc_radar_specs; 2500 err = mt7996_mcu_set_fcc5_lpn(dev, 8); 2501 if (err < 0) 2502 return err; 2503 break; 2504 case NL80211_DFS_ETSI: 2505 radar_specs = &etsi_radar_specs; 2506 break; 2507 case NL80211_DFS_JP: 2508 radar_specs = &jp_radar_specs; 2509 break; 2510 default: 2511 return -EINVAL; 2512 } 2513 2514 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2515 err = mt7996_mcu_set_radar_th(dev, i, 2516 &radar_specs->radar_pattern[i]); 2517 if (err < 0) 2518 return err; 2519 } 2520 2521 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2522 } 2523 2524 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) 2525 { 2526 struct mt7996_dev *dev = phy->dev; 2527 enum mt76_dfs_state dfs_state, prev_state; 2528 int err, rdd_idx = mt7996_get_rdd_idx(phy, false); 2529 2530 prev_state = phy->mt76->dfs_state; 2531 dfs_state = mt76_phy_dfs_state(phy->mt76); 2532 2533 if (prev_state == dfs_state || rdd_idx < 0) 2534 return 0; 2535 2536 if (prev_state == MT_DFS_STATE_UNKNOWN) 2537 mt7996_dfs_stop_radar_detector(phy); 2538 2539 if (dfs_state == MT_DFS_STATE_DISABLED) 2540 goto stop; 2541 2542 if (prev_state <= MT_DFS_STATE_DISABLED) { 2543 err = mt7996_dfs_init_radar_specs(phy); 2544 if (err < 0) 2545 return err; 2546 2547 err = mt7996_dfs_start_radar_detector(phy); 2548 if (err < 0) 2549 return err; 2550 2551 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2552 } 2553 2554 if (dfs_state == MT_DFS_STATE_CAC) 2555 return 0; 2556 2557 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0); 2558 if (err < 0) { 2559 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2560 return err; 2561 } 2562 2563 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2564 return 0; 2565 2566 stop: 2567 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0); 2568 if (err < 0) 2569 return err; 2570 2571 mt7996_dfs_stop_radar_detector(phy); 2572 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2573 2574 return 0; 2575 } 2576 2577 static int 2578 mt7996_mac_twt_duration_align(int duration) 2579 { 2580 return duration << 8; 2581 } 2582 2583 static u64 2584 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev, 2585 struct mt7996_twt_flow *flow) 2586 { 2587 struct mt7996_twt_flow *iter, *iter_next; 2588 u32 duration = flow->duration << 8; 2589 u64 start_tsf; 2590 2591 iter = list_first_entry_or_null(&dev->twt_list, 2592 struct mt7996_twt_flow, list); 2593 if (!iter || !iter->sched || iter->start_tsf > duration) { 2594 /* add flow as first entry in the list */ 2595 list_add(&flow->list, &dev->twt_list); 2596 return 0; 2597 } 2598 2599 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2600 start_tsf = iter->start_tsf + 2601 mt7996_mac_twt_duration_align(iter->duration); 2602 if (list_is_last(&iter->list, &dev->twt_list)) 2603 break; 2604 2605 if (!iter_next->sched || 2606 iter_next->start_tsf > start_tsf + duration) { 2607 list_add(&flow->list, &iter->list); 2608 goto out; 2609 } 2610 } 2611 2612 /* add flow as last entry in the list */ 2613 list_add_tail(&flow->list, &dev->twt_list); 2614 out: 2615 return start_tsf; 2616 } 2617 2618 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2619 { 2620 struct ieee80211_twt_params *twt_agrt; 2621 u64 interval, duration; 2622 u16 mantissa; 2623 u8 exp; 2624 2625 /* only individual agreement supported */ 2626 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2627 return -EOPNOTSUPP; 2628 2629 /* only 256us unit supported */ 2630 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2631 return -EOPNOTSUPP; 2632 2633 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2634 2635 /* explicit agreement not supported */ 2636 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2637 return -EOPNOTSUPP; 2638 2639 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2640 le16_to_cpu(twt_agrt->req_type)); 2641 mantissa = le16_to_cpu(twt_agrt->mantissa); 2642 duration = twt_agrt->min_twt_dur << 8; 2643 2644 interval = (u64)mantissa << exp; 2645 if (interval < duration) 2646 return -EOPNOTSUPP; 2647 2648 return 0; 2649 } 2650 2651 static bool 2652 mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link, 2653 struct ieee80211_twt_params *twt_agrt) 2654 { 2655 u16 type = le16_to_cpu(twt_agrt->req_type); 2656 u8 exp; 2657 int i; 2658 2659 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2660 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) { 2661 struct mt7996_twt_flow *f; 2662 2663 if (!(msta_link->twt.flowid_mask & BIT(i))) 2664 continue; 2665 2666 f = &msta_link->twt.flow[i]; 2667 if (f->duration == twt_agrt->min_twt_dur && 2668 f->mantissa == twt_agrt->mantissa && 2669 f->exp == exp && 2670 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2671 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2672 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2673 return true; 2674 } 2675 2676 return false; 2677 } 2678 2679 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, 2680 struct ieee80211_sta *sta, 2681 struct ieee80211_twt_setup *twt) 2682 { 2683 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2684 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 2685 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2686 struct mt7996_sta_link *msta_link = &msta->deflink; 2687 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2688 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2689 struct mt7996_dev *dev = mt7996_hw_dev(hw); 2690 struct mt7996_twt_flow *flow; 2691 u8 flowid, table_id, exp; 2692 2693 if (mt7996_mac_check_twt_req(twt)) 2694 goto out; 2695 2696 mutex_lock(&dev->mt76.mutex); 2697 2698 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT) 2699 goto unlock; 2700 2701 if (hweight8(msta_link->twt.flowid_mask) == 2702 ARRAY_SIZE(msta_link->twt.flow)) 2703 goto unlock; 2704 2705 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) { 2706 setup_cmd = TWT_SETUP_CMD_DICTATE; 2707 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR; 2708 goto unlock; 2709 } 2710 2711 if (mt7996_mac_twt_param_equal(msta_link, twt_agrt)) 2712 goto unlock; 2713 2714 flowid = ffs(~msta_link->twt.flowid_mask) - 1; 2715 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 2716 twt_agrt->req_type |= le16_encode_bits(flowid, 2717 IEEE80211_TWT_REQTYPE_FLOWID); 2718 2719 table_id = ffs(~dev->twt.table_mask) - 1; 2720 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2721 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2722 2723 flow = &msta_link->twt.flow[flowid]; 2724 memset(flow, 0, sizeof(*flow)); 2725 INIT_LIST_HEAD(&flow->list); 2726 flow->wcid = msta_link->wcid.idx; 2727 flow->table_id = table_id; 2728 flow->id = flowid; 2729 flow->duration = twt_agrt->min_twt_dur; 2730 flow->mantissa = twt_agrt->mantissa; 2731 flow->exp = exp; 2732 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2733 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2734 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2735 2736 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2737 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2738 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2739 u64 flow_tsf, curr_tsf; 2740 u32 rem; 2741 2742 flow->sched = true; 2743 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow); 2744 curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink); 2745 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2746 flow_tsf = curr_tsf + interval - rem; 2747 twt_agrt->twt = cpu_to_le64(flow_tsf); 2748 } else { 2749 list_add_tail(&flow->list, &dev->twt_list); 2750 } 2751 flow->tsf = le64_to_cpu(twt_agrt->twt); 2752 2753 if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow, 2754 MCU_TWT_AGRT_ADD)) 2755 goto unlock; 2756 2757 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2758 dev->twt.table_mask |= BIT(table_id); 2759 msta_link->twt.flowid_mask |= BIT(flowid); 2760 dev->twt.n_agrt++; 2761 2762 unlock: 2763 mutex_unlock(&dev->mt76.mutex); 2764 out: 2765 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 2766 twt_agrt->req_type |= 2767 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 2768 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED; 2769 } 2770 2771 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, 2772 struct mt7996_vif_link *link, 2773 struct mt7996_sta_link *msta_link, 2774 u8 flowid) 2775 { 2776 struct mt7996_twt_flow *flow; 2777 2778 lockdep_assert_held(&dev->mt76.mutex); 2779 2780 if (flowid >= ARRAY_SIZE(msta_link->twt.flow)) 2781 return; 2782 2783 if (!(msta_link->twt.flowid_mask & BIT(flowid))) 2784 return; 2785 2786 flow = &msta_link->twt.flow[flowid]; 2787 if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE)) 2788 return; 2789 2790 list_del_init(&flow->list); 2791 msta_link->twt.flowid_mask &= ~BIT(flowid); 2792 dev->twt.table_mask &= ~BIT(flow->table_id); 2793 dev->twt.n_agrt--; 2794 } 2795