1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/timekeeping.h> 8 #include "coredump.h" 9 #include "mt7996.h" 10 #include "../dma.h" 11 #include "mac.h" 12 #include "mcu.h" 13 14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 15 16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = { 17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 18 .radar_pattern = { 19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 27 }, 28 }; 29 30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = { 31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 32 .radar_pattern = { 33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 38 }, 39 }; 40 41 static const struct mt7996_dfs_radar_spec jp_radar_specs = { 42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 43 .radar_pattern = { 44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 52 }, 53 }; 54 55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, 56 u16 idx, u8 band_idx) 57 { 58 struct mt7996_sta_link *msta_link; 59 struct mt7996_sta *msta; 60 struct mt7996_vif *mvif; 61 struct mt76_wcid *wcid; 62 int i; 63 64 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 65 return NULL; 66 67 wcid = rcu_dereference(dev->mt76.wcid[idx]); 68 if (!wcid) 69 return NULL; 70 71 if (!mt7996_band_valid(dev, band_idx)) 72 return NULL; 73 74 if (wcid->phy_idx == band_idx) 75 return wcid; 76 77 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 78 msta = msta_link->sta; 79 if (!msta || !msta->vif) 80 return NULL; 81 82 mvif = msta->vif; 83 for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) { 84 struct mt76_vif_link *mlink; 85 86 mlink = rcu_dereference(mvif->mt76.link[i]); 87 if (!mlink) 88 continue; 89 90 if (mlink->band_idx != band_idx) 91 continue; 92 93 msta_link = rcu_dereference(msta->link[i]); 94 break; 95 } 96 97 return &msta_link->wcid; 98 } 99 100 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask) 101 { 102 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 103 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 104 105 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 106 0, 5000); 107 } 108 109 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw) 110 { 111 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 112 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 113 114 return MT_WTBL_LMAC_OFFS(wcid, dw); 115 } 116 117 static void mt7996_mac_sta_poll(struct mt7996_dev *dev) 118 { 119 static const u8 ac_to_tid[] = { 120 [IEEE80211_AC_BE] = 0, 121 [IEEE80211_AC_BK] = 1, 122 [IEEE80211_AC_VI] = 4, 123 [IEEE80211_AC_VO] = 6 124 }; 125 struct mt7996_sta_link *msta_link; 126 struct mt76_vif_link *mlink; 127 struct ieee80211_sta *sta; 128 struct mt7996_sta *msta; 129 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 130 LIST_HEAD(sta_poll_list); 131 struct mt76_wcid *wcid; 132 int i; 133 134 spin_lock_bh(&dev->mt76.sta_poll_lock); 135 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); 136 spin_unlock_bh(&dev->mt76.sta_poll_lock); 137 138 rcu_read_lock(); 139 140 while (true) { 141 bool clear = false; 142 u32 addr, val; 143 u16 idx; 144 s8 rssi[4]; 145 146 spin_lock_bh(&dev->mt76.sta_poll_lock); 147 if (list_empty(&sta_poll_list)) { 148 spin_unlock_bh(&dev->mt76.sta_poll_lock); 149 break; 150 } 151 msta_link = list_first_entry(&sta_poll_list, 152 struct mt7996_sta_link, 153 wcid.poll_list); 154 msta = msta_link->sta; 155 wcid = &msta_link->wcid; 156 list_del_init(&wcid->poll_list); 157 spin_unlock_bh(&dev->mt76.sta_poll_lock); 158 159 idx = wcid->idx; 160 161 /* refresh peer's airtime reporting */ 162 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20); 163 164 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 165 u32 tx_last = msta_link->airtime_ac[i]; 166 u32 rx_last = msta_link->airtime_ac[i + 4]; 167 168 msta_link->airtime_ac[i] = mt76_rr(dev, addr); 169 msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 170 171 tx_time[i] = msta_link->airtime_ac[i] - tx_last; 172 rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last; 173 174 if ((tx_last | rx_last) & BIT(30)) 175 clear = true; 176 177 addr += 8; 178 } 179 180 if (clear) { 181 mt7996_mac_wtbl_update(dev, idx, 182 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 183 memset(msta_link->airtime_ac, 0, 184 sizeof(msta_link->airtime_ac)); 185 } 186 187 if (!wcid->sta) 188 continue; 189 190 sta = container_of((void *)msta, struct ieee80211_sta, 191 drv_priv); 192 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 193 u8 q = mt76_connac_lmac_mapping(i); 194 u32 tx_cur = tx_time[q]; 195 u32 rx_cur = rx_time[q]; 196 u8 tid = ac_to_tid[i]; 197 198 if (!tx_cur && !rx_cur) 199 continue; 200 201 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); 202 } 203 204 /* get signal strength of resp frames (CTS/BA/ACK) */ 205 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34); 206 val = mt76_rr(dev, addr); 207 208 rssi[0] = to_rssi(GENMASK(7, 0), val); 209 rssi[1] = to_rssi(GENMASK(15, 8), val); 210 rssi[2] = to_rssi(GENMASK(23, 16), val); 211 rssi[3] = to_rssi(GENMASK(31, 14), val); 212 213 mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]); 214 if (mlink) { 215 struct mt76_phy *mphy = mt76_vif_link_phy(mlink); 216 217 if (mphy) 218 msta_link->ack_signal = 219 mt76_rx_signal(mphy->antenna_mask, 220 rssi); 221 } 222 223 ewma_avg_signal_add(&msta_link->avg_ack_signal, 224 -msta_link->ack_signal); 225 } 226 227 rcu_read_unlock(); 228 } 229 230 /* The HW does not translate the mac header to 802.3 for mesh point */ 231 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 232 { 233 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 234 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 235 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid; 236 __le32 *rxd = (__le32 *)skb->data; 237 struct ieee80211_sta *sta; 238 struct ieee80211_vif *vif; 239 struct ieee80211_hdr hdr; 240 u16 frame_control; 241 242 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != 243 MT_RXD3_NORMAL_U2M) 244 return -EINVAL; 245 246 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) 247 return -EINVAL; 248 249 if (!msta || !msta->vif) 250 return -EINVAL; 251 252 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 253 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 254 255 /* store the info from RXD and ethhdr to avoid being overridden */ 256 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); 257 hdr.frame_control = cpu_to_le16(frame_control); 258 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); 259 hdr.duration_id = 0; 260 261 ether_addr_copy(hdr.addr1, vif->addr); 262 ether_addr_copy(hdr.addr2, sta->addr); 263 switch (frame_control & (IEEE80211_FCTL_TODS | 264 IEEE80211_FCTL_FROMDS)) { 265 case 0: 266 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); 267 break; 268 case IEEE80211_FCTL_FROMDS: 269 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 270 break; 271 case IEEE80211_FCTL_TODS: 272 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 273 break; 274 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 275 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 276 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 277 break; 278 default: 279 return -EINVAL; 280 } 281 282 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 283 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 284 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 285 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 286 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) 287 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 288 else 289 skb_pull(skb, 2); 290 291 if (ieee80211_has_order(hdr.frame_control)) 292 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], 293 IEEE80211_HT_CTL_LEN); 294 if (ieee80211_is_data_qos(hdr.frame_control)) { 295 __le16 qos_ctrl; 296 297 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); 298 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, 299 IEEE80211_QOS_CTL_LEN); 300 } 301 302 if (ieee80211_has_a4(hdr.frame_control)) 303 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 304 else 305 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 306 307 return 0; 308 } 309 310 static int 311 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, 312 struct mt76_rx_status *status, 313 struct ieee80211_supported_band *sband, 314 __le32 *rxv, u8 *mode) 315 { 316 u32 v0, v2; 317 u8 stbc, gi, bw, dcm, nss; 318 int i, idx; 319 bool cck = false; 320 321 v0 = le32_to_cpu(rxv[0]); 322 v2 = le32_to_cpu(rxv[2]); 323 324 idx = FIELD_GET(MT_PRXV_TX_RATE, v0); 325 i = idx; 326 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; 327 328 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); 329 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); 330 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); 331 dcm = FIELD_GET(MT_PRXV_DCM, v2); 332 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); 333 334 switch (*mode) { 335 case MT_PHY_TYPE_CCK: 336 cck = true; 337 fallthrough; 338 case MT_PHY_TYPE_OFDM: 339 i = mt76_get_rate(&dev->mt76, sband, i, cck); 340 break; 341 case MT_PHY_TYPE_HT_GF: 342 case MT_PHY_TYPE_HT: 343 status->encoding = RX_ENC_HT; 344 if (gi) 345 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 346 if (i > 31) 347 return -EINVAL; 348 break; 349 case MT_PHY_TYPE_VHT: 350 status->nss = nss; 351 status->encoding = RX_ENC_VHT; 352 if (gi) 353 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 354 if (i > 11) 355 return -EINVAL; 356 break; 357 case MT_PHY_TYPE_HE_MU: 358 case MT_PHY_TYPE_HE_SU: 359 case MT_PHY_TYPE_HE_EXT_SU: 360 case MT_PHY_TYPE_HE_TB: 361 status->nss = nss; 362 status->encoding = RX_ENC_HE; 363 i &= GENMASK(3, 0); 364 365 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 366 status->he_gi = gi; 367 368 status->he_dcm = dcm; 369 break; 370 case MT_PHY_TYPE_EHT_SU: 371 case MT_PHY_TYPE_EHT_TRIG: 372 case MT_PHY_TYPE_EHT_MU: 373 status->nss = nss; 374 status->encoding = RX_ENC_EHT; 375 i &= GENMASK(3, 0); 376 377 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) 378 status->eht.gi = gi; 379 break; 380 default: 381 return -EINVAL; 382 } 383 status->rate_idx = i; 384 385 switch (bw) { 386 case IEEE80211_STA_RX_BW_20: 387 break; 388 case IEEE80211_STA_RX_BW_40: 389 if (*mode & MT_PHY_TYPE_HE_EXT_SU && 390 (idx & MT_PRXV_TX_ER_SU_106T)) { 391 status->bw = RATE_INFO_BW_HE_RU; 392 status->he_ru = 393 NL80211_RATE_INFO_HE_RU_ALLOC_106; 394 } else { 395 status->bw = RATE_INFO_BW_40; 396 } 397 break; 398 case IEEE80211_STA_RX_BW_80: 399 status->bw = RATE_INFO_BW_80; 400 break; 401 case IEEE80211_STA_RX_BW_160: 402 status->bw = RATE_INFO_BW_160; 403 break; 404 /* rxv reports bw 320-1 and 320-2 separately */ 405 case IEEE80211_STA_RX_BW_320: 406 case IEEE80211_STA_RX_BW_320 + 1: 407 status->bw = RATE_INFO_BW_320; 408 break; 409 default: 410 return -EINVAL; 411 } 412 413 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 414 if (*mode < MT_PHY_TYPE_HE_SU && gi) 415 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 416 417 return 0; 418 } 419 420 static void 421 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q, 422 struct mt7996_sta *msta, struct sk_buff *skb, 423 u32 info) 424 { 425 struct ieee80211_vif *vif; 426 struct wireless_dev *wdev; 427 428 if (!msta || !msta->vif) 429 return; 430 431 if (!mt76_queue_is_wed_rx(q)) 432 return; 433 434 if (!(info & MT_DMA_INFO_PPE_VLD)) 435 return; 436 437 vif = container_of((void *)msta->vif, struct ieee80211_vif, 438 drv_priv); 439 wdev = ieee80211_vif_to_wdev(vif); 440 skb->dev = wdev->netdev; 441 442 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, 443 FIELD_GET(MT_DMA_PPE_CPU_REASON, info), 444 FIELD_GET(MT_DMA_PPE_ENTRY, info)); 445 } 446 447 static int 448 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, 449 struct sk_buff *skb, u32 *info) 450 { 451 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 452 struct mt76_phy *mphy = &dev->mt76.phy; 453 struct mt7996_phy *phy = &dev->phy; 454 struct ieee80211_supported_band *sband; 455 __le32 *rxd = (__le32 *)skb->data; 456 __le32 *rxv = NULL; 457 u32 rxd0 = le32_to_cpu(rxd[0]); 458 u32 rxd1 = le32_to_cpu(rxd[1]); 459 u32 rxd2 = le32_to_cpu(rxd[2]); 460 u32 rxd3 = le32_to_cpu(rxd[3]); 461 u32 rxd4 = le32_to_cpu(rxd[4]); 462 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; 463 u32 csum_status = *(u32 *)skb->cb; 464 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; 465 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; 466 bool unicast, insert_ccmp_hdr = false; 467 u8 remove_pad, amsdu_info, band_idx; 468 u8 mode = 0, qos_ctl = 0; 469 bool hdr_trans; 470 u16 hdr_gap; 471 u16 seq_ctrl = 0; 472 __le16 fc = 0; 473 int idx; 474 u8 hw_aggr = false; 475 struct mt7996_sta *msta = NULL; 476 477 hw_aggr = status->aggr; 478 memset(status, 0, sizeof(*status)); 479 480 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); 481 mphy = dev->mt76.phys[band_idx]; 482 phy = mphy->priv; 483 status->phy_idx = mphy->band_idx; 484 485 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 486 return -EINVAL; 487 488 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 489 return -EINVAL; 490 491 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 492 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 493 return -EINVAL; 494 495 /* ICV error or CCMP/BIP/WPI MIC error */ 496 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 497 status->flag |= RX_FLAG_ONLY_MONITOR; 498 499 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 500 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 501 status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx); 502 503 if (status->wcid) { 504 struct mt7996_sta_link *msta_link; 505 506 msta_link = container_of(status->wcid, struct mt7996_sta_link, 507 wcid); 508 msta = msta_link->sta; 509 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 510 } 511 512 status->freq = mphy->chandef.chan->center_freq; 513 status->band = mphy->chandef.chan->band; 514 if (status->band == NL80211_BAND_5GHZ) 515 sband = &mphy->sband_5g.sband; 516 else if (status->band == NL80211_BAND_6GHZ) 517 sband = &mphy->sband_6g.sband; 518 else 519 sband = &mphy->sband_2g.sband; 520 521 if (!sband->channels) 522 return -EINVAL; 523 524 if ((rxd3 & csum_mask) == csum_mask && 525 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 526 skb->ip_summed = CHECKSUM_UNNECESSARY; 527 528 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR) 529 status->flag |= RX_FLAG_FAILED_FCS_CRC; 530 531 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 532 status->flag |= RX_FLAG_MMIC_ERROR; 533 534 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 535 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 536 status->flag |= RX_FLAG_DECRYPTED; 537 status->flag |= RX_FLAG_IV_STRIPPED; 538 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 539 } 540 541 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 542 543 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 544 return -EINVAL; 545 546 rxd += 8; 547 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 548 u32 v0 = le32_to_cpu(rxd[0]); 549 u32 v2 = le32_to_cpu(rxd[2]); 550 551 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); 552 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); 553 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); 554 555 rxd += 4; 556 if ((u8 *)rxd - skb->data >= skb->len) 557 return -EINVAL; 558 } 559 560 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 561 u8 *data = (u8 *)rxd; 562 563 if (status->flag & RX_FLAG_DECRYPTED) { 564 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 565 case MT_CIPHER_AES_CCMP: 566 case MT_CIPHER_CCMP_CCX: 567 case MT_CIPHER_CCMP_256: 568 insert_ccmp_hdr = 569 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 570 fallthrough; 571 case MT_CIPHER_TKIP: 572 case MT_CIPHER_TKIP_NO_MIC: 573 case MT_CIPHER_GCMP: 574 case MT_CIPHER_GCMP_256: 575 status->iv[0] = data[5]; 576 status->iv[1] = data[4]; 577 status->iv[2] = data[3]; 578 status->iv[3] = data[2]; 579 status->iv[4] = data[1]; 580 status->iv[5] = data[0]; 581 break; 582 default: 583 break; 584 } 585 } 586 rxd += 4; 587 if ((u8 *)rxd - skb->data >= skb->len) 588 return -EINVAL; 589 } 590 591 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 592 status->timestamp = le32_to_cpu(rxd[0]); 593 status->flag |= RX_FLAG_MACTIME_START; 594 595 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 596 status->flag |= RX_FLAG_AMPDU_DETAILS; 597 598 /* all subframes of an A-MPDU have the same timestamp */ 599 if (phy->rx_ampdu_ts != status->timestamp) { 600 if (!++phy->ampdu_ref) 601 phy->ampdu_ref++; 602 } 603 phy->rx_ampdu_ts = status->timestamp; 604 605 status->ampdu_ref = phy->ampdu_ref; 606 } 607 608 rxd += 4; 609 if ((u8 *)rxd - skb->data >= skb->len) 610 return -EINVAL; 611 } 612 613 /* RXD Group 3 - P-RXV */ 614 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 615 u32 v3; 616 int ret; 617 618 rxv = rxd; 619 rxd += 4; 620 if ((u8 *)rxd - skb->data >= skb->len) 621 return -EINVAL; 622 623 v3 = le32_to_cpu(rxv[3]); 624 625 status->chains = mphy->antenna_mask; 626 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); 627 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); 628 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); 629 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); 630 631 /* RXD Group 5 - C-RXV */ 632 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 633 rxd += 24; 634 if ((u8 *)rxd - skb->data >= skb->len) 635 return -EINVAL; 636 } 637 638 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode); 639 if (ret < 0) 640 return ret; 641 } 642 643 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 644 status->amsdu = !!amsdu_info; 645 if (status->amsdu) { 646 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 647 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 648 } 649 650 /* IEEE 802.11 fragmentation can only be applied to unicast frames. 651 * Hence, drop fragments with multicast/broadcast RA. 652 * This check fixes vulnerabilities, like CVE-2020-26145. 653 */ 654 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) && 655 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M) 656 return -EINVAL; 657 658 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 659 if (hdr_trans && ieee80211_has_morefrags(fc)) { 660 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) 661 return -EINVAL; 662 hdr_trans = false; 663 } else { 664 int pad_start = 0; 665 666 skb_pull(skb, hdr_gap); 667 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) { 668 pad_start = ieee80211_get_hdrlen_from_skb(skb); 669 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 670 /* When header translation failure is indicated, 671 * the hardware will insert an extra 2-byte field 672 * containing the data length after the protocol 673 * type field. This happens either when the LLC-SNAP 674 * pattern did not match, or if a VLAN header was 675 * detected. 676 */ 677 pad_start = 12; 678 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 679 pad_start += 4; 680 else 681 pad_start = 0; 682 } 683 684 if (pad_start) { 685 memmove(skb->data + 2, skb->data, pad_start); 686 skb_pull(skb, 2); 687 } 688 } 689 690 if (!hdr_trans) { 691 struct ieee80211_hdr *hdr; 692 693 if (insert_ccmp_hdr) { 694 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 695 696 mt76_insert_ccmp_hdr(skb, key_id); 697 } 698 699 hdr = mt76_skb_get_hdr(skb); 700 fc = hdr->frame_control; 701 if (ieee80211_is_data_qos(fc)) { 702 u8 *qos = ieee80211_get_qos_ctl(hdr); 703 704 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 705 qos_ctl = *qos; 706 707 /* Mesh DA/SA/Length will be stripped after hardware 708 * de-amsdu, so here needs to clear amsdu present bit 709 * to mark it as a normal mesh frame. 710 */ 711 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu) 712 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 713 } 714 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data); 715 } else { 716 status->flag |= RX_FLAG_8023; 717 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, 718 *info); 719 } 720 721 if (rxv && !(status->flag & RX_FLAG_8023)) { 722 switch (status->encoding) { 723 case RX_ENC_EHT: 724 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode); 725 break; 726 case RX_ENC_HE: 727 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); 728 break; 729 default: 730 break; 731 } 732 } 733 734 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr) 735 return 0; 736 737 status->aggr = unicast && 738 !ieee80211_is_qos_nullfunc(fc); 739 status->qos_ctl = qos_ctl; 740 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 741 742 return 0; 743 } 744 745 static void 746 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, 747 struct sk_buff *skb, struct mt76_wcid *wcid) 748 { 749 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 750 u8 fc_type, fc_stype; 751 u16 ethertype; 752 bool wmm = false; 753 u32 val; 754 755 if (wcid->sta) { 756 struct ieee80211_sta *sta = wcid_to_sta(wcid); 757 758 wmm = sta->wme; 759 } 760 761 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 762 FIELD_PREP(MT_TXD1_TID, tid); 763 764 ethertype = get_unaligned_be16(&skb->data[12]); 765 if (ethertype >= ETH_P_802_3_MIN) 766 val |= MT_TXD1_ETH_802_3; 767 768 txwi[1] |= cpu_to_le32(val); 769 770 fc_type = IEEE80211_FTYPE_DATA >> 2; 771 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 772 773 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 774 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 775 776 txwi[2] |= cpu_to_le32(val); 777 778 if (wcid->amsdu) 779 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU); 780 } 781 782 static void 783 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, 784 struct sk_buff *skb, 785 struct ieee80211_key_conf *key, 786 struct mt76_wcid *wcid) 787 { 788 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 789 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 790 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 791 bool multicast = is_multicast_ether_addr(hdr->addr1); 792 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 793 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl; 794 u16 seqno = le16_to_cpu(sc); 795 u8 fc_type, fc_stype; 796 u32 val; 797 798 if (ieee80211_is_action(fc) && 799 mgmt->u.action.category == WLAN_CATEGORY_BACK && 800 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { 801 if (is_mt7990(&dev->mt76)) 802 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid)); 803 tid = MT_TX_ADDBA; 804 } else if (ieee80211_is_mgmt(hdr->frame_control)) { 805 tid = MT_TX_NORMAL; 806 } 807 808 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 809 FIELD_PREP(MT_TXD1_HDR_INFO, 810 ieee80211_get_hdrlen_from_skb(skb) / 2) | 811 FIELD_PREP(MT_TXD1_TID, tid); 812 813 if (!ieee80211_is_data(fc) || multicast || 814 info->flags & IEEE80211_TX_CTL_USE_MINRATE) 815 val |= MT_TXD1_FIXED_RATE; 816 817 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb)) { 818 val |= MT_TXD1_BIP; 819 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 820 } 821 822 txwi[1] |= cpu_to_le32(val); 823 824 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 825 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 826 827 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 828 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 829 830 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc)) 831 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST); 832 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 833 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID); 834 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 835 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST); 836 else 837 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE); 838 839 txwi[2] |= cpu_to_le32(val); 840 841 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); 842 if (ieee80211_is_beacon(fc)) { 843 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 844 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 845 } 846 847 if (multicast && ieee80211_vif_is_mld(info->control.vif)) { 848 val = MT_TXD3_SN_VALID | 849 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 850 txwi[3] |= cpu_to_le32(val); 851 } 852 853 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 854 if (ieee80211_is_back_req(hdr->frame_control)) { 855 struct ieee80211_bar *bar; 856 857 bar = (struct ieee80211_bar *)skb->data; 858 seqno = le16_to_cpu(bar->start_seq_num); 859 } 860 861 val = MT_TXD3_SN_VALID | 862 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 863 txwi[3] |= cpu_to_le32(val); 864 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); 865 } 866 867 if (ieee80211_vif_is_mld(info->control.vif) && 868 (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))) 869 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 870 871 if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) && 872 ieee80211_vif_is_mld(info->control.vif)) { 873 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 874 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 875 } 876 877 if (!wcid->sta && ieee80211_is_mgmt(fc)) 878 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 879 } 880 881 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, 882 struct sk_buff *skb, struct mt76_wcid *wcid, 883 struct ieee80211_key_conf *key, int pid, 884 enum mt76_txq_id qid, u32 changed) 885 { 886 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 887 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 888 struct ieee80211_vif *vif = info->control.vif; 889 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 890 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 891 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 892 struct mt76_vif_link *mlink = NULL; 893 struct mt7996_vif *mvif; 894 unsigned int link_id; 895 u16 tx_count = 15; 896 u32 val; 897 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | 898 BSS_CHANGED_FILS_DISCOVERY)); 899 bool beacon = !!(changed & (BSS_CHANGED_BEACON | 900 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc); 901 902 if (wcid != &dev->mt76.global_wcid) 903 link_id = wcid->link_id; 904 else 905 link_id = u32_get_bits(info->control.flags, 906 IEEE80211_TX_CTRL_MLO_LINK); 907 908 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; 909 if (mvif) 910 mlink = rcu_dereference(mvif->mt76.link[link_id]); 911 912 if (mlink) { 913 omac_idx = mlink->omac_idx; 914 wmm_idx = mlink->wmm_idx; 915 band_idx = mlink->band_idx; 916 } 917 918 if (inband_disc) { 919 p_fmt = MT_TX_TYPE_FW; 920 q_idx = MT_LMAC_ALTX0; 921 } else if (beacon) { 922 p_fmt = MT_TX_TYPE_FW; 923 q_idx = MT_LMAC_BCN0; 924 } else if (qid >= MT_TXQ_PSD) { 925 p_fmt = MT_TX_TYPE_CT; 926 q_idx = MT_LMAC_ALTX0; 927 } else { 928 p_fmt = MT_TX_TYPE_CT; 929 q_idx = wmm_idx * MT7996_MAX_WMM_SETS + 930 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); 931 } 932 933 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 934 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 935 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 936 txwi[0] = cpu_to_le32(val); 937 938 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 939 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 940 941 if (band_idx) 942 val |= FIELD_PREP(MT_TXD1_TGID, band_idx); 943 944 txwi[1] = cpu_to_le32(val); 945 txwi[2] = 0; 946 947 val = MT_TXD3_SW_POWER_MGMT | 948 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 949 if (key) 950 val |= MT_TXD3_PROTECT_FRAME; 951 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 952 val |= MT_TXD3_NO_ACK; 953 954 txwi[3] = cpu_to_le32(val); 955 txwi[4] = 0; 956 957 val = FIELD_PREP(MT_TXD5_PID, pid); 958 if (pid >= MT_PACKET_ID_FIRST) 959 val |= MT_TXD5_TX_STATUS_HOST; 960 txwi[5] = cpu_to_le32(val); 961 962 val = MT_TXD6_DAS; 963 if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) 964 val |= MT_TXD6_DIS_MAT; 965 966 if (is_mt7996(&dev->mt76)) 967 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1); 968 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control)) 969 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1); 970 971 txwi[6] = cpu_to_le32(val); 972 txwi[7] = 0; 973 974 if (is_8023) 975 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid); 976 else 977 mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid); 978 979 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { 980 bool mcast = ieee80211_is_data(hdr->frame_control) && 981 is_multicast_ether_addr(hdr->addr1); 982 u8 idx = MT7996_BASIC_RATES_TBL; 983 984 if (mlink) { 985 if (mcast && mlink->mcast_rates_idx) 986 idx = mlink->mcast_rates_idx; 987 else if (beacon && mlink->beacon_rates_idx) 988 idx = mlink->beacon_rates_idx; 989 else 990 idx = mlink->basic_rates_idx; 991 } 992 993 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW; 994 if (mcast) 995 val |= MT_TXD6_DIS_MAT; 996 txwi[6] |= cpu_to_le32(val); 997 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 998 } 999 } 1000 1001 static bool 1002 mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb) 1003 { 1004 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1005 1006 if (ieee80211_is_mgmt(hdr->frame_control)) 1007 return true; 1008 1009 /* for SDO to bypass specific data frame */ 1010 if (!mt7996_has_wa(dev)) { 1011 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) 1012 return true; 1013 1014 if (ieee80211_has_a4(hdr->frame_control) && 1015 !ieee80211_is_data_present(hdr->frame_control)) 1016 return true; 1017 } 1018 1019 return false; 1020 } 1021 1022 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1023 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1024 struct ieee80211_sta *sta, 1025 struct mt76_tx_info *tx_info) 1026 { 1027 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1028 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1029 struct ieee80211_key_conf *key = info->control.hw_key; 1030 struct ieee80211_vif *vif = info->control.vif; 1031 struct mt76_connac_txp_common *txp; 1032 struct mt76_txwi_cache *t; 1033 int id, i, pid, nbuf = tx_info->nbuf - 1; 1034 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1035 u8 *txwi = (u8 *)txwi_ptr; 1036 1037 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 1038 return -EINVAL; 1039 1040 if (!wcid) 1041 wcid = &dev->mt76.global_wcid; 1042 1043 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 1044 t->skb = tx_info->skb; 1045 1046 id = mt76_token_consume(mdev, &t); 1047 if (id < 0) 1048 return id; 1049 1050 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1051 memset(txwi_ptr, 0, MT_TXD_SIZE); 1052 /* Transmit non qos data by 802.11 header and need to fill txd by host*/ 1053 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1054 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 1055 pid, qid, 0); 1056 1057 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE); 1058 for (i = 0; i < nbuf; i++) { 1059 u16 len; 1060 1061 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len); 1062 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1063 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H, 1064 tx_info->buf[i + 1].addr >> 32); 1065 #endif 1066 1067 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 1068 txp->fw.len[i] = cpu_to_le16(len); 1069 } 1070 txp->fw.nbuf = nbuf; 1071 1072 txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST); 1073 1074 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1075 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD); 1076 1077 if (!key) 1078 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 1079 1080 if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb)) 1081 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 1082 1083 if (vif) { 1084 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 1085 struct mt76_vif_link *mlink = NULL; 1086 1087 if (wcid->offchannel) 1088 mlink = rcu_dereference(mvif->mt76.offchannel_link); 1089 if (!mlink) 1090 mlink = &mvif->deflink.mt76; 1091 1092 txp->fw.bss_idx = mlink->idx; 1093 } 1094 1095 txp->fw.token = cpu_to_le16(id); 1096 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff); 1097 1098 tx_info->skb = NULL; 1099 1100 /* pass partial skb header to fw */ 1101 tx_info->buf[1].len = MT_CT_PARSE_LEN; 1102 tx_info->buf[1].skip_unmap = true; 1103 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 1104 1105 return 0; 1106 } 1107 1108 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 1109 { 1110 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 1111 __le32 *txwi = ptr; 1112 u32 val; 1113 1114 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 1115 1116 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 1117 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 1118 txwi[0] = cpu_to_le32(val); 1119 1120 val = BIT(31) | 1121 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 1122 txwi[1] = cpu_to_le32(val); 1123 1124 txp->token = cpu_to_le16(token_id); 1125 txp->nbuf = 1; 1126 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 1127 1128 return MT_TXD_SIZE + sizeof(*txp); 1129 } 1130 1131 static void 1132 mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb) 1133 { 1134 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1135 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1136 struct mt7996_sta_link *msta_link; 1137 struct mt7996_sta *msta; 1138 u16 fc, tid; 1139 1140 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 1141 return; 1142 1143 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1144 if (tid >= 6) /* skip VO queue */ 1145 return; 1146 1147 if (is_8023) { 1148 fc = IEEE80211_FTYPE_DATA | 1149 (sta->wme ? IEEE80211_STYPE_QOS_DATA : IEEE80211_STYPE_DATA); 1150 } else { 1151 /* No need to get precise TID for Action/Management Frame, 1152 * since it will not meet the following Frame Control 1153 * condition anyway. 1154 */ 1155 1156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1157 1158 fc = le16_to_cpu(hdr->frame_control) & 1159 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); 1160 } 1161 1162 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1163 return; 1164 1165 msta = (struct mt7996_sta *)sta->drv_priv; 1166 msta_link = &msta->deflink; 1167 1168 if (!test_and_set_bit(tid, &msta_link->wcid.ampdu_state)) 1169 ieee80211_start_tx_ba_session(sta, tid, 0); 1170 } 1171 1172 static void 1173 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, 1174 struct ieee80211_sta *sta, struct list_head *free_list) 1175 { 1176 struct mt76_dev *mdev = &dev->mt76; 1177 struct mt76_wcid *wcid; 1178 __le32 *txwi; 1179 u16 wcid_idx; 1180 1181 mt76_connac_txp_skb_unmap(mdev, t); 1182 if (!t->skb) 1183 goto out; 1184 1185 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 1186 if (sta) { 1187 wcid = (struct mt76_wcid *)sta->drv_priv; 1188 wcid_idx = wcid->idx; 1189 1190 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 1191 mt7996_tx_check_aggr(sta, t->skb); 1192 } else { 1193 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); 1194 } 1195 1196 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 1197 1198 out: 1199 t->skb = NULL; 1200 mt76_put_txwi(mdev, t); 1201 } 1202 1203 static void 1204 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) 1205 { 1206 __le32 *tx_free = (__le32 *)data, *cur_info; 1207 struct mt76_dev *mdev = &dev->mt76; 1208 struct mt76_phy *phy2 = mdev->phys[MT_BAND1]; 1209 struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; 1210 struct mt76_txwi_cache *txwi; 1211 struct ieee80211_sta *sta = NULL; 1212 struct mt76_wcid *wcid = NULL; 1213 LIST_HEAD(free_list); 1214 struct sk_buff *skb, *tmp; 1215 void *end = data + len; 1216 bool wake = false; 1217 u16 total, count = 0; 1218 u8 ver; 1219 1220 /* clean DMA queues and unmap buffers first */ 1221 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1222 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1223 if (phy2) { 1224 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false); 1225 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false); 1226 } 1227 if (phy3) { 1228 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false); 1229 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); 1230 } 1231 1232 ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER); 1233 if (WARN_ON_ONCE(ver < 5)) 1234 return; 1235 1236 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); 1237 for (cur_info = &tx_free[2]; count < total; cur_info++) { 1238 u32 msdu, info; 1239 u8 i; 1240 1241 if (WARN_ON_ONCE((void *)cur_info >= end)) 1242 return; 1243 /* 1'b1: new wcid pair. 1244 * 1'b0: msdu_id with the same 'wcid pair' as above. 1245 */ 1246 info = le32_to_cpu(*cur_info); 1247 if (info & MT_TXFREE_INFO_PAIR) { 1248 struct mt7996_sta_link *msta_link; 1249 u16 idx; 1250 1251 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); 1252 wcid = rcu_dereference(dev->mt76.wcid[idx]); 1253 sta = wcid_to_sta(wcid); 1254 if (!sta) 1255 goto next; 1256 1257 msta_link = container_of(wcid, struct mt7996_sta_link, 1258 wcid); 1259 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 1260 next: 1261 /* ver 7 has a new DW with pair = 1, skip it */ 1262 if (ver == 7 && ((void *)(cur_info + 1) < end) && 1263 (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR)) 1264 cur_info++; 1265 continue; 1266 } else if (info & MT_TXFREE_INFO_HEADER) { 1267 u32 tx_retries = 0, tx_failed = 0; 1268 1269 if (!wcid) 1270 continue; 1271 1272 tx_retries = 1273 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; 1274 tx_failed = tx_retries + 1275 !!FIELD_GET(MT_TXFREE_INFO_STAT, info); 1276 1277 wcid->stats.tx_retries += tx_retries; 1278 wcid->stats.tx_failed += tx_failed; 1279 continue; 1280 } 1281 1282 for (i = 0; i < 2; i++) { 1283 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; 1284 if (msdu == MT_TXFREE_INFO_MSDU_ID) 1285 continue; 1286 1287 count++; 1288 txwi = mt76_token_release(mdev, msdu, &wake); 1289 if (!txwi) 1290 continue; 1291 1292 mt7996_txwi_free(dev, txwi, sta, &free_list); 1293 } 1294 } 1295 1296 mt7996_mac_sta_poll(dev); 1297 1298 if (wake) 1299 mt76_set_tx_blocked(&dev->mt76, false); 1300 1301 mt76_worker_schedule(&dev->mt76.tx_worker); 1302 1303 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1304 skb_list_del_init(skb); 1305 napi_consume_skb(skb, 1); 1306 } 1307 } 1308 1309 static bool 1310 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, 1311 int pid, __le32 *txs_data) 1312 { 1313 struct mt76_sta_stats *stats = &wcid->stats; 1314 struct ieee80211_supported_band *sband; 1315 struct mt76_dev *mdev = &dev->mt76; 1316 struct mt76_phy *mphy; 1317 struct ieee80211_tx_info *info; 1318 struct sk_buff_head list; 1319 struct rate_info rate = {}; 1320 struct sk_buff *skb = NULL; 1321 bool cck = false; 1322 u32 txrate, txs, mode, stbc; 1323 1324 txs = le32_to_cpu(txs_data[0]); 1325 1326 mt76_tx_status_lock(mdev, &list); 1327 1328 /* only report MPDU TXS */ 1329 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) { 1330 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); 1331 if (skb) { 1332 info = IEEE80211_SKB_CB(skb); 1333 if (!(txs & MT_TXS0_ACK_ERROR_MASK)) 1334 info->flags |= IEEE80211_TX_STAT_ACK; 1335 1336 info->status.ampdu_len = 1; 1337 info->status.ampdu_ack_len = 1338 !!(info->flags & IEEE80211_TX_STAT_ACK); 1339 1340 info->status.rates[0].idx = -1; 1341 } 1342 } 1343 1344 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) { 1345 struct ieee80211_sta *sta; 1346 u8 tid; 1347 1348 sta = wcid_to_sta(wcid); 1349 tid = FIELD_GET(MT_TXS0_TID, txs); 1350 ieee80211_refresh_tx_agg_session_timer(sta, tid); 1351 } 1352 1353 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1354 1355 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); 1356 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; 1357 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); 1358 1359 if (stbc && rate.nss > 1) 1360 rate.nss >>= 1; 1361 1362 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) 1363 stats->tx_nss[rate.nss - 1]++; 1364 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) 1365 stats->tx_mcs[rate.mcs]++; 1366 1367 mode = FIELD_GET(MT_TX_RATE_MODE, txrate); 1368 switch (mode) { 1369 case MT_PHY_TYPE_CCK: 1370 cck = true; 1371 fallthrough; 1372 case MT_PHY_TYPE_OFDM: 1373 mphy = mt76_dev_phy(mdev, wcid->phy_idx); 1374 1375 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1376 sband = &mphy->sband_5g.sband; 1377 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 1378 sband = &mphy->sband_6g.sband; 1379 else 1380 sband = &mphy->sband_2g.sband; 1381 1382 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); 1383 rate.legacy = sband->bitrates[rate.mcs].bitrate; 1384 break; 1385 case MT_PHY_TYPE_HT: 1386 case MT_PHY_TYPE_HT_GF: 1387 if (rate.mcs > 31) 1388 goto out; 1389 1390 rate.flags = RATE_INFO_FLAGS_MCS; 1391 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1392 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1393 break; 1394 case MT_PHY_TYPE_VHT: 1395 if (rate.mcs > 9) 1396 goto out; 1397 1398 rate.flags = RATE_INFO_FLAGS_VHT_MCS; 1399 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1400 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1401 break; 1402 case MT_PHY_TYPE_HE_SU: 1403 case MT_PHY_TYPE_HE_EXT_SU: 1404 case MT_PHY_TYPE_HE_TB: 1405 case MT_PHY_TYPE_HE_MU: 1406 if (rate.mcs > 11) 1407 goto out; 1408 1409 rate.he_gi = wcid->rate.he_gi; 1410 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); 1411 rate.flags = RATE_INFO_FLAGS_HE_MCS; 1412 break; 1413 case MT_PHY_TYPE_EHT_SU: 1414 case MT_PHY_TYPE_EHT_TRIG: 1415 case MT_PHY_TYPE_EHT_MU: 1416 if (rate.mcs > 13) 1417 goto out; 1418 1419 rate.eht_gi = wcid->rate.eht_gi; 1420 rate.flags = RATE_INFO_FLAGS_EHT_MCS; 1421 break; 1422 default: 1423 goto out; 1424 } 1425 1426 stats->tx_mode[mode]++; 1427 1428 switch (FIELD_GET(MT_TXS0_BW, txs)) { 1429 case IEEE80211_STA_RX_BW_320: 1430 rate.bw = RATE_INFO_BW_320; 1431 stats->tx_bw[4]++; 1432 break; 1433 case IEEE80211_STA_RX_BW_160: 1434 rate.bw = RATE_INFO_BW_160; 1435 stats->tx_bw[3]++; 1436 break; 1437 case IEEE80211_STA_RX_BW_80: 1438 rate.bw = RATE_INFO_BW_80; 1439 stats->tx_bw[2]++; 1440 break; 1441 case IEEE80211_STA_RX_BW_40: 1442 rate.bw = RATE_INFO_BW_40; 1443 stats->tx_bw[1]++; 1444 break; 1445 default: 1446 rate.bw = RATE_INFO_BW_20; 1447 stats->tx_bw[0]++; 1448 break; 1449 } 1450 wcid->rate = rate; 1451 1452 out: 1453 if (skb) 1454 mt76_tx_status_skb_done(mdev, skb, &list); 1455 mt76_tx_status_unlock(mdev, &list); 1456 1457 return !!skb; 1458 } 1459 1460 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) 1461 { 1462 struct mt7996_sta_link *msta_link; 1463 struct mt76_wcid *wcid; 1464 __le32 *txs_data = data; 1465 u16 wcidx; 1466 u8 pid; 1467 1468 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1469 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1470 1471 if (pid < MT_PACKET_ID_NO_SKB) 1472 return; 1473 1474 if (wcidx >= mt7996_wtbl_size(dev)) 1475 return; 1476 1477 rcu_read_lock(); 1478 1479 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1480 if (!wcid) 1481 goto out; 1482 1483 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data); 1484 1485 if (!wcid->sta) 1486 goto out; 1487 1488 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 1489 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 1490 1491 out: 1492 rcu_read_unlock(); 1493 } 1494 1495 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) 1496 { 1497 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1498 __le32 *rxd = (__le32 *)data; 1499 __le32 *end = (__le32 *)&rxd[len / 4]; 1500 enum rx_pkt_type type; 1501 1502 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1503 if (type != PKT_TYPE_NORMAL) { 1504 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1505 1506 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1507 MT_RXD0_SW_PKT_TYPE_FRAME)) 1508 return true; 1509 } 1510 1511 switch (type) { 1512 case PKT_TYPE_TXRX_NOTIFY: 1513 mt7996_mac_tx_free(dev, data, len); 1514 return false; 1515 case PKT_TYPE_TXS: 1516 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1517 mt7996_mac_add_txs(dev, rxd); 1518 return false; 1519 case PKT_TYPE_RX_FW_MONITOR: 1520 mt7996_debugfs_rx_fw_monitor(dev, data, len); 1521 return false; 1522 default: 1523 return true; 1524 } 1525 } 1526 1527 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1528 struct sk_buff *skb, u32 *info) 1529 { 1530 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1531 __le32 *rxd = (__le32 *)skb->data; 1532 __le32 *end = (__le32 *)&skb->data[skb->len]; 1533 enum rx_pkt_type type; 1534 1535 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1536 if (type != PKT_TYPE_NORMAL) { 1537 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1538 1539 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1540 MT_RXD0_SW_PKT_TYPE_FRAME)) 1541 type = PKT_TYPE_NORMAL; 1542 } 1543 1544 switch (type) { 1545 case PKT_TYPE_TXRX_NOTIFY: 1546 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) && 1547 q == MT_RXQ_TXFREE_BAND2) { 1548 dev_kfree_skb(skb); 1549 break; 1550 } 1551 1552 mt7996_mac_tx_free(dev, skb->data, skb->len); 1553 napi_consume_skb(skb, 1); 1554 break; 1555 case PKT_TYPE_RX_EVENT: 1556 mt7996_mcu_rx_event(dev, skb); 1557 break; 1558 case PKT_TYPE_TXS: 1559 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1560 mt7996_mac_add_txs(dev, rxd); 1561 dev_kfree_skb(skb); 1562 break; 1563 case PKT_TYPE_RX_FW_MONITOR: 1564 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1565 dev_kfree_skb(skb); 1566 break; 1567 case PKT_TYPE_NORMAL: 1568 if (!mt7996_mac_fill_rx(dev, q, skb, info)) { 1569 mt76_rx(&dev->mt76, q, skb); 1570 return; 1571 } 1572 fallthrough; 1573 default: 1574 dev_kfree_skb(skb); 1575 break; 1576 } 1577 } 1578 1579 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy) 1580 { 1581 struct mt7996_dev *dev = phy->dev; 1582 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx); 1583 1584 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN); 1585 mt76_set(dev, reg, BIT(11) | BIT(9)); 1586 } 1587 1588 void mt7996_mac_reset_counters(struct mt7996_phy *phy) 1589 { 1590 struct mt7996_dev *dev = phy->dev; 1591 u8 band_idx = phy->mt76->band_idx; 1592 int i; 1593 1594 for (i = 0; i < 16; i++) 1595 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 1596 1597 phy->mt76->survey_time = ktime_get_boottime(); 1598 1599 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 1600 1601 /* reset airtime counters */ 1602 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx), 1603 MT_WF_RMAC_MIB_RXTIME_CLR); 1604 1605 mt7996_mcu_get_chan_mib_info(phy, true); 1606 } 1607 1608 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy) 1609 { 1610 s16 coverage_class = phy->coverage_class; 1611 struct mt7996_dev *dev = phy->dev; 1612 struct mt7996_phy *phy2 = mt7996_phy2(dev); 1613 struct mt7996_phy *phy3 = mt7996_phy3(dev); 1614 u32 reg_offset; 1615 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1616 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1617 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1618 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1619 u8 band_idx = phy->mt76->band_idx; 1620 int offset; 1621 1622 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1623 return; 1624 1625 if (phy2) 1626 coverage_class = max_t(s16, dev->phy.coverage_class, 1627 phy2->coverage_class); 1628 1629 if (phy3) 1630 coverage_class = max_t(s16, coverage_class, 1631 phy3->coverage_class); 1632 1633 offset = 3 * coverage_class; 1634 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1635 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1636 1637 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset); 1638 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset); 1639 } 1640 1641 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band) 1642 { 1643 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band), 1644 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY | 1645 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR); 1646 1647 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band), 1648 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5)); 1649 } 1650 1651 static u8 1652 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx) 1653 { 1654 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1655 struct mt7996_dev *dev = phy->dev; 1656 u32 val, sum = 0, n = 0; 1657 int ant, i; 1658 1659 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) { 1660 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant); 1661 1662 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1663 val = mt76_rr(dev, reg); 1664 sum += val * nf_power[i]; 1665 n += val; 1666 } 1667 } 1668 1669 return n ? sum / n : 0; 1670 } 1671 1672 void mt7996_update_channel(struct mt76_phy *mphy) 1673 { 1674 struct mt7996_phy *phy = mphy->priv; 1675 struct mt76_channel_state *state = mphy->chan_state; 1676 int nf; 1677 1678 mt7996_mcu_get_chan_mib_info(phy, false); 1679 1680 nf = mt7996_phy_get_nf(phy, mphy->band_idx); 1681 if (!phy->noise) 1682 phy->noise = nf << 4; 1683 else if (nf) 1684 phy->noise += nf - (phy->noise >> 4); 1685 1686 state->noise = -(phy->noise >> 4); 1687 } 1688 1689 static bool 1690 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state) 1691 { 1692 bool ret; 1693 1694 ret = wait_event_timeout(dev->reset_wait, 1695 (READ_ONCE(dev->recovery.state) & state), 1696 MT7996_RESET_TIMEOUT); 1697 1698 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1699 return ret; 1700 } 1701 1702 static void 1703 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1704 { 1705 struct ieee80211_hw *hw = priv; 1706 1707 switch (vif->type) { 1708 case NL80211_IFTYPE_MESH_POINT: 1709 case NL80211_IFTYPE_ADHOC: 1710 case NL80211_IFTYPE_AP: 1711 mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf); 1712 break; 1713 default: 1714 break; 1715 } 1716 } 1717 1718 static void 1719 mt7996_update_beacons(struct mt7996_dev *dev) 1720 { 1721 struct mt76_phy *phy2, *phy3; 1722 1723 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1724 IEEE80211_IFACE_ITER_RESUME_ALL, 1725 mt7996_update_vif_beacon, dev->mt76.hw); 1726 1727 phy2 = dev->mt76.phys[MT_BAND1]; 1728 if (!phy2) 1729 return; 1730 1731 ieee80211_iterate_active_interfaces(phy2->hw, 1732 IEEE80211_IFACE_ITER_RESUME_ALL, 1733 mt7996_update_vif_beacon, phy2->hw); 1734 1735 phy3 = dev->mt76.phys[MT_BAND2]; 1736 if (!phy3) 1737 return; 1738 1739 ieee80211_iterate_active_interfaces(phy3->hw, 1740 IEEE80211_IFACE_ITER_RESUME_ALL, 1741 mt7996_update_vif_beacon, phy3->hw); 1742 } 1743 1744 void mt7996_tx_token_put(struct mt7996_dev *dev) 1745 { 1746 struct mt76_txwi_cache *txwi; 1747 int id; 1748 1749 spin_lock_bh(&dev->mt76.token_lock); 1750 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1751 mt7996_txwi_free(dev, txwi, NULL, NULL); 1752 dev->mt76.token_count--; 1753 } 1754 spin_unlock_bh(&dev->mt76.token_lock); 1755 idr_destroy(&dev->mt76.token); 1756 } 1757 1758 static int 1759 mt7996_mac_restart(struct mt7996_dev *dev) 1760 { 1761 struct mt7996_phy *phy2, *phy3; 1762 struct mt76_dev *mdev = &dev->mt76; 1763 int i, ret; 1764 1765 phy2 = mt7996_phy2(dev); 1766 phy3 = mt7996_phy3(dev); 1767 1768 if (dev->hif2) { 1769 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 1770 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1771 } 1772 1773 if (dev_is_pci(mdev->dev)) { 1774 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1775 if (dev->hif2) 1776 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 1777 } 1778 1779 set_bit(MT76_RESET, &dev->mphy.state); 1780 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1781 wake_up(&dev->mt76.mcu.wait); 1782 if (phy2) 1783 set_bit(MT76_RESET, &phy2->mt76->state); 1784 if (phy3) 1785 set_bit(MT76_RESET, &phy3->mt76->state); 1786 1787 /* lock/unlock all queues to ensure that no tx is pending */ 1788 mt76_txq_schedule_all(&dev->mphy); 1789 if (phy2) 1790 mt76_txq_schedule_all(phy2->mt76); 1791 if (phy3) 1792 mt76_txq_schedule_all(phy3->mt76); 1793 1794 /* disable all tx/rx napi */ 1795 mt76_worker_disable(&dev->mt76.tx_worker); 1796 mt76_for_each_q_rx(mdev, i) { 1797 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 1798 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 1799 continue; 1800 1801 if (mdev->q_rx[i].ndesc) 1802 napi_disable(&dev->mt76.napi[i]); 1803 } 1804 napi_disable(&dev->mt76.tx_napi); 1805 1806 /* token reinit */ 1807 mt7996_tx_token_put(dev); 1808 idr_init(&dev->mt76.token); 1809 1810 mt7996_dma_reset(dev, true); 1811 1812 mt76_for_each_q_rx(mdev, i) { 1813 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 1814 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 1815 continue; 1816 1817 if (mdev->q_rx[i].ndesc) { 1818 napi_enable(&dev->mt76.napi[i]); 1819 local_bh_disable(); 1820 napi_schedule(&dev->mt76.napi[i]); 1821 local_bh_enable(); 1822 } 1823 } 1824 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1825 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 1826 1827 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 1828 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 1829 if (dev->hif2) { 1830 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 1831 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1832 } 1833 if (dev_is_pci(mdev->dev)) { 1834 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1835 if (dev->hif2) 1836 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 1837 } 1838 1839 /* load firmware */ 1840 ret = mt7996_mcu_init_firmware(dev); 1841 if (ret) 1842 goto out; 1843 1844 /* set the necessary init items */ 1845 ret = mt7996_mcu_set_eeprom(dev); 1846 if (ret) 1847 goto out; 1848 1849 mt7996_mac_init(dev); 1850 mt7996_init_txpower(&dev->phy); 1851 mt7996_init_txpower(phy2); 1852 mt7996_init_txpower(phy3); 1853 ret = mt7996_txbf_init(dev); 1854 1855 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) { 1856 ret = mt7996_run(&dev->phy); 1857 if (ret) 1858 goto out; 1859 } 1860 1861 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) { 1862 ret = mt7996_run(phy2); 1863 if (ret) 1864 goto out; 1865 } 1866 1867 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) { 1868 ret = mt7996_run(phy3); 1869 if (ret) 1870 goto out; 1871 } 1872 1873 out: 1874 /* reset done */ 1875 clear_bit(MT76_RESET, &dev->mphy.state); 1876 if (phy2) 1877 clear_bit(MT76_RESET, &phy2->mt76->state); 1878 if (phy3) 1879 clear_bit(MT76_RESET, &phy3->mt76->state); 1880 1881 napi_enable(&dev->mt76.tx_napi); 1882 local_bh_disable(); 1883 napi_schedule(&dev->mt76.tx_napi); 1884 local_bh_enable(); 1885 1886 mt76_worker_enable(&dev->mt76.tx_worker); 1887 return ret; 1888 } 1889 1890 static void 1891 mt7996_mac_full_reset(struct mt7996_dev *dev) 1892 { 1893 struct mt7996_phy *phy2, *phy3; 1894 int i; 1895 1896 phy2 = mt7996_phy2(dev); 1897 phy3 = mt7996_phy3(dev); 1898 dev->recovery.hw_full_reset = true; 1899 1900 wake_up(&dev->mt76.mcu.wait); 1901 ieee80211_stop_queues(mt76_hw(dev)); 1902 if (phy2) 1903 ieee80211_stop_queues(phy2->mt76->hw); 1904 if (phy3) 1905 ieee80211_stop_queues(phy3->mt76->hw); 1906 1907 cancel_work_sync(&dev->wed_rro.work); 1908 cancel_delayed_work_sync(&dev->mphy.mac_work); 1909 if (phy2) 1910 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1911 if (phy3) 1912 cancel_delayed_work_sync(&phy3->mt76->mac_work); 1913 1914 mutex_lock(&dev->mt76.mutex); 1915 for (i = 0; i < 10; i++) { 1916 if (!mt7996_mac_restart(dev)) 1917 break; 1918 } 1919 mutex_unlock(&dev->mt76.mutex); 1920 1921 if (i == 10) 1922 dev_err(dev->mt76.dev, "chip full reset failed\n"); 1923 1924 ieee80211_restart_hw(mt76_hw(dev)); 1925 if (phy2) 1926 ieee80211_restart_hw(phy2->mt76->hw); 1927 if (phy3) 1928 ieee80211_restart_hw(phy3->mt76->hw); 1929 1930 ieee80211_wake_queues(mt76_hw(dev)); 1931 if (phy2) 1932 ieee80211_wake_queues(phy2->mt76->hw); 1933 if (phy3) 1934 ieee80211_wake_queues(phy3->mt76->hw); 1935 1936 dev->recovery.hw_full_reset = false; 1937 ieee80211_queue_delayed_work(mt76_hw(dev), 1938 &dev->mphy.mac_work, 1939 MT7996_WATCHDOG_TIME); 1940 if (phy2) 1941 ieee80211_queue_delayed_work(phy2->mt76->hw, 1942 &phy2->mt76->mac_work, 1943 MT7996_WATCHDOG_TIME); 1944 if (phy3) 1945 ieee80211_queue_delayed_work(phy3->mt76->hw, 1946 &phy3->mt76->mac_work, 1947 MT7996_WATCHDOG_TIME); 1948 } 1949 1950 void mt7996_mac_reset_work(struct work_struct *work) 1951 { 1952 struct mt7996_phy *phy2, *phy3; 1953 struct mt7996_dev *dev; 1954 int i; 1955 1956 dev = container_of(work, struct mt7996_dev, reset_work); 1957 phy2 = mt7996_phy2(dev); 1958 phy3 = mt7996_phy3(dev); 1959 1960 /* chip full reset */ 1961 if (dev->recovery.restart) { 1962 /* disable WA/WM WDT */ 1963 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 1964 MT_MCU_CMD_WDT_MASK); 1965 1966 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 1967 dev->recovery.wa_reset_count++; 1968 else 1969 dev->recovery.wm_reset_count++; 1970 1971 mt7996_mac_full_reset(dev); 1972 1973 /* enable mcu irq */ 1974 mt7996_irq_enable(dev, MT_INT_MCU_CMD); 1975 mt7996_irq_disable(dev, 0); 1976 1977 /* enable WA/WM WDT */ 1978 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 1979 1980 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 1981 dev->recovery.restart = false; 1982 return; 1983 } 1984 1985 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 1986 return; 1987 1988 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.", 1989 wiphy_name(dev->mt76.hw->wiphy)); 1990 1991 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) 1992 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2); 1993 1994 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 1995 mtk_wed_device_stop(&dev->mt76.mmio.wed); 1996 1997 ieee80211_stop_queues(mt76_hw(dev)); 1998 if (phy2) 1999 ieee80211_stop_queues(phy2->mt76->hw); 2000 if (phy3) 2001 ieee80211_stop_queues(phy3->mt76->hw); 2002 2003 set_bit(MT76_RESET, &dev->mphy.state); 2004 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2005 wake_up(&dev->mt76.mcu.wait); 2006 2007 cancel_work_sync(&dev->wed_rro.work); 2008 cancel_delayed_work_sync(&dev->mphy.mac_work); 2009 if (phy2) { 2010 set_bit(MT76_RESET, &phy2->mt76->state); 2011 cancel_delayed_work_sync(&phy2->mt76->mac_work); 2012 } 2013 if (phy3) { 2014 set_bit(MT76_RESET, &phy3->mt76->state); 2015 cancel_delayed_work_sync(&phy3->mt76->mac_work); 2016 } 2017 mt76_worker_disable(&dev->mt76.tx_worker); 2018 mt76_for_each_q_rx(&dev->mt76, i) { 2019 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2020 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2021 continue; 2022 2023 napi_disable(&dev->mt76.napi[i]); 2024 } 2025 napi_disable(&dev->mt76.tx_napi); 2026 2027 mutex_lock(&dev->mt76.mutex); 2028 2029 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 2030 2031 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 2032 mt7996_dma_reset(dev, false); 2033 2034 mt7996_tx_token_put(dev); 2035 idr_init(&dev->mt76.token); 2036 2037 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 2038 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 2039 } 2040 2041 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2042 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2043 2044 /* enable DMA Tx/Tx and interrupt */ 2045 mt7996_dma_start(dev, false, false); 2046 2047 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 2048 u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 | 2049 dev->mt76.mmio.irqmask; 2050 2051 if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) 2052 wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND; 2053 2054 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 2055 2056 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask, 2057 true); 2058 mt7996_irq_enable(dev, wed_irq_mask); 2059 mt7996_irq_disable(dev, 0); 2060 } 2061 2062 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { 2063 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT); 2064 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, 2065 MT_INT_TX_RX_DONE_EXT); 2066 } 2067 2068 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2069 clear_bit(MT76_RESET, &dev->mphy.state); 2070 if (phy2) 2071 clear_bit(MT76_RESET, &phy2->mt76->state); 2072 if (phy3) 2073 clear_bit(MT76_RESET, &phy3->mt76->state); 2074 2075 mt76_for_each_q_rx(&dev->mt76, i) { 2076 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2077 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2078 continue; 2079 2080 napi_enable(&dev->mt76.napi[i]); 2081 local_bh_disable(); 2082 napi_schedule(&dev->mt76.napi[i]); 2083 local_bh_enable(); 2084 } 2085 2086 tasklet_schedule(&dev->mt76.irq_tasklet); 2087 2088 mt76_worker_enable(&dev->mt76.tx_worker); 2089 2090 napi_enable(&dev->mt76.tx_napi); 2091 local_bh_disable(); 2092 napi_schedule(&dev->mt76.tx_napi); 2093 local_bh_enable(); 2094 2095 ieee80211_wake_queues(mt76_hw(dev)); 2096 if (phy2) 2097 ieee80211_wake_queues(phy2->mt76->hw); 2098 if (phy3) 2099 ieee80211_wake_queues(phy3->mt76->hw); 2100 2101 mutex_unlock(&dev->mt76.mutex); 2102 2103 mt7996_update_beacons(dev); 2104 2105 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 2106 MT7996_WATCHDOG_TIME); 2107 if (phy2) 2108 ieee80211_queue_delayed_work(phy2->mt76->hw, 2109 &phy2->mt76->mac_work, 2110 MT7996_WATCHDOG_TIME); 2111 if (phy3) 2112 ieee80211_queue_delayed_work(phy3->mt76->hw, 2113 &phy3->mt76->mac_work, 2114 MT7996_WATCHDOG_TIME); 2115 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.", 2116 wiphy_name(dev->mt76.hw->wiphy)); 2117 } 2118 2119 /* firmware coredump */ 2120 void mt7996_mac_dump_work(struct work_struct *work) 2121 { 2122 const struct mt7996_mem_region *mem_region; 2123 struct mt7996_crash_data *crash_data; 2124 struct mt7996_dev *dev; 2125 struct mt7996_mem_hdr *hdr; 2126 size_t buf_len; 2127 int i; 2128 u32 num; 2129 u8 *buf; 2130 2131 dev = container_of(work, struct mt7996_dev, dump_work); 2132 2133 mutex_lock(&dev->dump_mutex); 2134 2135 crash_data = mt7996_coredump_new(dev); 2136 if (!crash_data) { 2137 mutex_unlock(&dev->dump_mutex); 2138 goto skip_coredump; 2139 } 2140 2141 mem_region = mt7996_coredump_get_mem_layout(dev, &num); 2142 if (!mem_region || !crash_data->memdump_buf_len) { 2143 mutex_unlock(&dev->dump_mutex); 2144 goto skip_memdump; 2145 } 2146 2147 buf = crash_data->memdump_buf; 2148 buf_len = crash_data->memdump_buf_len; 2149 2150 /* dumping memory content... */ 2151 memset(buf, 0, buf_len); 2152 for (i = 0; i < num; i++) { 2153 if (mem_region->len > buf_len) { 2154 dev_warn(dev->mt76.dev, "%s len %zu is too large\n", 2155 mem_region->name, mem_region->len); 2156 break; 2157 } 2158 2159 /* reserve space for the header */ 2160 hdr = (void *)buf; 2161 buf += sizeof(*hdr); 2162 buf_len -= sizeof(*hdr); 2163 2164 mt7996_memcpy_fromio(dev, buf, mem_region->start, 2165 mem_region->len); 2166 2167 hdr->start = mem_region->start; 2168 hdr->len = mem_region->len; 2169 2170 if (!mem_region->len) 2171 /* note: the header remains, just with zero length */ 2172 break; 2173 2174 buf += mem_region->len; 2175 buf_len -= mem_region->len; 2176 2177 mem_region++; 2178 } 2179 2180 mutex_unlock(&dev->dump_mutex); 2181 2182 skip_memdump: 2183 mt7996_coredump_submit(dev); 2184 skip_coredump: 2185 queue_work(dev->mt76.wq, &dev->reset_work); 2186 } 2187 2188 void mt7996_reset(struct mt7996_dev *dev) 2189 { 2190 if (!dev->recovery.hw_init_done) 2191 return; 2192 2193 if (dev->recovery.hw_full_reset) 2194 return; 2195 2196 /* wm/wa exception: do full recovery */ 2197 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 2198 dev->recovery.restart = true; 2199 dev_info(dev->mt76.dev, 2200 "%s indicated firmware crash, attempting recovery\n", 2201 wiphy_name(dev->mt76.hw->wiphy)); 2202 2203 mt7996_irq_disable(dev, MT_INT_MCU_CMD); 2204 queue_work(dev->mt76.wq, &dev->dump_work); 2205 return; 2206 } 2207 2208 queue_work(dev->mt76.wq, &dev->reset_work); 2209 wake_up(&dev->reset_wait); 2210 } 2211 2212 void mt7996_mac_update_stats(struct mt7996_phy *phy) 2213 { 2214 struct mt76_mib_stats *mib = &phy->mib; 2215 struct mt7996_dev *dev = phy->dev; 2216 u8 band_idx = phy->mt76->band_idx; 2217 u32 cnt; 2218 int i; 2219 2220 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx)); 2221 mib->fcs_err_cnt += cnt; 2222 2223 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx)); 2224 mib->rx_fifo_full_cnt += cnt; 2225 2226 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx)); 2227 mib->rx_mpdu_cnt += cnt; 2228 2229 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx)); 2230 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 2231 2232 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx)); 2233 mib->rx_vector_mismatch_cnt += cnt; 2234 2235 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx)); 2236 mib->rx_delimiter_fail_cnt += cnt; 2237 2238 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx)); 2239 mib->rx_len_mismatch_cnt += cnt; 2240 2241 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx)); 2242 mib->tx_ampdu_cnt += cnt; 2243 2244 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx)); 2245 mib->tx_stop_q_empty_cnt += cnt; 2246 2247 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx)); 2248 mib->tx_mpdu_attempts_cnt += cnt; 2249 2250 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx)); 2251 mib->tx_mpdu_success_cnt += cnt; 2252 2253 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx)); 2254 mib->rx_ampdu_cnt += cnt; 2255 2256 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx)); 2257 mib->rx_ampdu_bytes_cnt += cnt; 2258 2259 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx)); 2260 mib->rx_ampdu_valid_subframe_cnt += cnt; 2261 2262 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx)); 2263 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 2264 2265 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx)); 2266 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt); 2267 2268 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx)); 2269 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt); 2270 2271 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx)); 2272 mib->rx_pfdrop_cnt += cnt; 2273 2274 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx)); 2275 mib->rx_vec_queue_overflow_drop_cnt += cnt; 2276 2277 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx)); 2278 mib->rx_ba_cnt += cnt; 2279 2280 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx)); 2281 mib->tx_bf_ebf_ppdu_cnt += cnt; 2282 2283 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx)); 2284 mib->tx_bf_ibf_ppdu_cnt += cnt; 2285 2286 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx)); 2287 mib->tx_mu_bf_cnt += cnt; 2288 2289 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx)); 2290 mib->tx_mu_mpdu_cnt += cnt; 2291 2292 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx)); 2293 mib->tx_mu_acked_mpdu_cnt += cnt; 2294 2295 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx)); 2296 mib->tx_su_acked_mpdu_cnt += cnt; 2297 2298 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx)); 2299 mib->tx_bf_rx_fb_ht_cnt += cnt; 2300 mib->tx_bf_rx_fb_all_cnt += cnt; 2301 2302 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx)); 2303 mib->tx_bf_rx_fb_vht_cnt += cnt; 2304 mib->tx_bf_rx_fb_all_cnt += cnt; 2305 2306 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx)); 2307 mib->tx_bf_rx_fb_he_cnt += cnt; 2308 mib->tx_bf_rx_fb_all_cnt += cnt; 2309 2310 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx)); 2311 mib->tx_bf_rx_fb_eht_cnt += cnt; 2312 mib->tx_bf_rx_fb_all_cnt += cnt; 2313 2314 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx)); 2315 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); 2316 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); 2317 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); 2318 2319 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx)); 2320 mib->tx_bf_fb_trig_cnt += cnt; 2321 2322 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx)); 2323 mib->tx_bf_fb_cpl_cnt += cnt; 2324 2325 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 2326 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 2327 mib->tx_amsdu[i] += cnt; 2328 mib->tx_amsdu_cnt += cnt; 2329 } 2330 2331 /* rts count */ 2332 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx)); 2333 mib->rts_cnt += cnt; 2334 2335 /* rts retry count */ 2336 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx)); 2337 mib->rts_retries_cnt += cnt; 2338 2339 /* ba miss count */ 2340 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx)); 2341 mib->ba_miss_cnt += cnt; 2342 2343 /* ack fail count */ 2344 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx)); 2345 mib->ack_fail_cnt += cnt; 2346 2347 for (i = 0; i < 16; i++) { 2348 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2349 phy->mt76->aggr_stats[i] += cnt; 2350 } 2351 } 2352 2353 void mt7996_mac_sta_rc_work(struct work_struct *work) 2354 { 2355 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); 2356 struct ieee80211_bss_conf *link_conf; 2357 struct ieee80211_link_sta *link_sta; 2358 struct mt7996_sta_link *msta_link; 2359 struct mt7996_vif_link *link; 2360 struct mt76_vif_link *mlink; 2361 struct ieee80211_sta *sta; 2362 struct ieee80211_vif *vif; 2363 struct mt7996_sta *msta; 2364 struct mt7996_vif *mvif; 2365 LIST_HEAD(list); 2366 u32 changed; 2367 u8 link_id; 2368 2369 rcu_read_lock(); 2370 spin_lock_bh(&dev->mt76.sta_poll_lock); 2371 list_splice_init(&dev->sta_rc_list, &list); 2372 2373 while (!list_empty(&list)) { 2374 msta_link = list_first_entry(&list, struct mt7996_sta_link, 2375 rc_list); 2376 list_del_init(&msta_link->rc_list); 2377 2378 changed = msta_link->changed; 2379 msta_link->changed = 0; 2380 2381 sta = wcid_to_sta(&msta_link->wcid); 2382 link_id = msta_link->wcid.link_id; 2383 msta = msta_link->sta; 2384 mvif = msta->vif; 2385 vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); 2386 2387 mlink = rcu_dereference(mvif->mt76.link[link_id]); 2388 if (!mlink) 2389 continue; 2390 2391 link_sta = rcu_dereference(sta->link[link_id]); 2392 if (!link_sta) 2393 continue; 2394 2395 link_conf = rcu_dereference(vif->link_conf[link_id]); 2396 if (!link_conf) 2397 continue; 2398 2399 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2400 2401 link = (struct mt7996_vif_link *)mlink; 2402 2403 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2404 IEEE80211_RC_NSS_CHANGED | 2405 IEEE80211_RC_BW_CHANGED)) 2406 mt7996_mcu_add_rate_ctrl(dev, vif, link_conf, 2407 link_sta, link, msta_link, 2408 true); 2409 2410 if (changed & IEEE80211_RC_SMPS_CHANGED) 2411 mt7996_mcu_set_fixed_field(dev, link_sta, link, 2412 msta_link, NULL, 2413 RATE_PARAM_MMPS_UPDATE); 2414 2415 spin_lock_bh(&dev->mt76.sta_poll_lock); 2416 } 2417 2418 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2419 rcu_read_unlock(); 2420 } 2421 2422 void mt7996_mac_work(struct work_struct *work) 2423 { 2424 struct mt7996_phy *phy; 2425 struct mt76_phy *mphy; 2426 2427 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2428 mac_work.work); 2429 phy = mphy->priv; 2430 2431 mutex_lock(&mphy->dev->mutex); 2432 2433 mt76_update_survey(mphy); 2434 if (++mphy->mac_work_count == 5) { 2435 mphy->mac_work_count = 0; 2436 2437 mt7996_mac_update_stats(phy); 2438 2439 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE); 2440 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { 2441 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT); 2442 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT); 2443 } 2444 } 2445 2446 mutex_unlock(&mphy->dev->mutex); 2447 2448 mt76_tx_status_check(mphy->dev, false); 2449 2450 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2451 MT7996_WATCHDOG_TIME); 2452 } 2453 2454 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy) 2455 { 2456 struct mt7996_dev *dev = phy->dev; 2457 int rdd_idx = mt7996_get_rdd_idx(phy, false); 2458 2459 if (rdd_idx < 0) 2460 return; 2461 2462 mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0); 2463 } 2464 2465 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx) 2466 { 2467 int err, region; 2468 2469 switch (dev->mt76.region) { 2470 case NL80211_DFS_ETSI: 2471 region = 0; 2472 break; 2473 case NL80211_DFS_JP: 2474 region = 2; 2475 break; 2476 case NL80211_DFS_FCC: 2477 default: 2478 region = 1; 2479 break; 2480 } 2481 2482 err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region); 2483 if (err < 0) 2484 return err; 2485 2486 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1); 2487 } 2488 2489 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) 2490 { 2491 struct mt7996_dev *dev = phy->dev; 2492 int err, rdd_idx; 2493 2494 rdd_idx = mt7996_get_rdd_idx(phy, false); 2495 if (rdd_idx < 0) 2496 return -EINVAL; 2497 2498 /* start CAC */ 2499 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0); 2500 if (err < 0) 2501 return err; 2502 2503 err = mt7996_dfs_start_rdd(dev, rdd_idx); 2504 2505 return err; 2506 } 2507 2508 static int 2509 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy) 2510 { 2511 const struct mt7996_dfs_radar_spec *radar_specs; 2512 struct mt7996_dev *dev = phy->dev; 2513 int err, i; 2514 2515 switch (dev->mt76.region) { 2516 case NL80211_DFS_FCC: 2517 radar_specs = &fcc_radar_specs; 2518 err = mt7996_mcu_set_fcc5_lpn(dev, 8); 2519 if (err < 0) 2520 return err; 2521 break; 2522 case NL80211_DFS_ETSI: 2523 radar_specs = &etsi_radar_specs; 2524 break; 2525 case NL80211_DFS_JP: 2526 radar_specs = &jp_radar_specs; 2527 break; 2528 default: 2529 return -EINVAL; 2530 } 2531 2532 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2533 err = mt7996_mcu_set_radar_th(dev, i, 2534 &radar_specs->radar_pattern[i]); 2535 if (err < 0) 2536 return err; 2537 } 2538 2539 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2540 } 2541 2542 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) 2543 { 2544 struct mt7996_dev *dev = phy->dev; 2545 enum mt76_dfs_state dfs_state, prev_state; 2546 int err, rdd_idx = mt7996_get_rdd_idx(phy, false); 2547 2548 prev_state = phy->mt76->dfs_state; 2549 dfs_state = mt76_phy_dfs_state(phy->mt76); 2550 2551 if (prev_state == dfs_state || rdd_idx < 0) 2552 return 0; 2553 2554 if (prev_state == MT_DFS_STATE_UNKNOWN) 2555 mt7996_dfs_stop_radar_detector(phy); 2556 2557 if (dfs_state == MT_DFS_STATE_DISABLED) 2558 goto stop; 2559 2560 if (prev_state <= MT_DFS_STATE_DISABLED) { 2561 err = mt7996_dfs_init_radar_specs(phy); 2562 if (err < 0) 2563 return err; 2564 2565 err = mt7996_dfs_start_radar_detector(phy); 2566 if (err < 0) 2567 return err; 2568 2569 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2570 } 2571 2572 if (dfs_state == MT_DFS_STATE_CAC) 2573 return 0; 2574 2575 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0); 2576 if (err < 0) { 2577 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2578 return err; 2579 } 2580 2581 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2582 return 0; 2583 2584 stop: 2585 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0); 2586 if (err < 0) 2587 return err; 2588 2589 mt7996_dfs_stop_radar_detector(phy); 2590 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2591 2592 return 0; 2593 } 2594 2595 static int 2596 mt7996_mac_twt_duration_align(int duration) 2597 { 2598 return duration << 8; 2599 } 2600 2601 static u64 2602 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev, 2603 struct mt7996_twt_flow *flow) 2604 { 2605 struct mt7996_twt_flow *iter, *iter_next; 2606 u32 duration = flow->duration << 8; 2607 u64 start_tsf; 2608 2609 iter = list_first_entry_or_null(&dev->twt_list, 2610 struct mt7996_twt_flow, list); 2611 if (!iter || !iter->sched || iter->start_tsf > duration) { 2612 /* add flow as first entry in the list */ 2613 list_add(&flow->list, &dev->twt_list); 2614 return 0; 2615 } 2616 2617 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2618 start_tsf = iter->start_tsf + 2619 mt7996_mac_twt_duration_align(iter->duration); 2620 if (list_is_last(&iter->list, &dev->twt_list)) 2621 break; 2622 2623 if (!iter_next->sched || 2624 iter_next->start_tsf > start_tsf + duration) { 2625 list_add(&flow->list, &iter->list); 2626 goto out; 2627 } 2628 } 2629 2630 /* add flow as last entry in the list */ 2631 list_add_tail(&flow->list, &dev->twt_list); 2632 out: 2633 return start_tsf; 2634 } 2635 2636 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2637 { 2638 struct ieee80211_twt_params *twt_agrt; 2639 u64 interval, duration; 2640 u16 mantissa; 2641 u8 exp; 2642 2643 /* only individual agreement supported */ 2644 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2645 return -EOPNOTSUPP; 2646 2647 /* only 256us unit supported */ 2648 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2649 return -EOPNOTSUPP; 2650 2651 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2652 2653 /* explicit agreement not supported */ 2654 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2655 return -EOPNOTSUPP; 2656 2657 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2658 le16_to_cpu(twt_agrt->req_type)); 2659 mantissa = le16_to_cpu(twt_agrt->mantissa); 2660 duration = twt_agrt->min_twt_dur << 8; 2661 2662 interval = (u64)mantissa << exp; 2663 if (interval < duration) 2664 return -EOPNOTSUPP; 2665 2666 return 0; 2667 } 2668 2669 static bool 2670 mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link, 2671 struct ieee80211_twt_params *twt_agrt) 2672 { 2673 u16 type = le16_to_cpu(twt_agrt->req_type); 2674 u8 exp; 2675 int i; 2676 2677 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2678 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) { 2679 struct mt7996_twt_flow *f; 2680 2681 if (!(msta_link->twt.flowid_mask & BIT(i))) 2682 continue; 2683 2684 f = &msta_link->twt.flow[i]; 2685 if (f->duration == twt_agrt->min_twt_dur && 2686 f->mantissa == twt_agrt->mantissa && 2687 f->exp == exp && 2688 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2689 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2690 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2691 return true; 2692 } 2693 2694 return false; 2695 } 2696 2697 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, 2698 struct ieee80211_sta *sta, 2699 struct ieee80211_twt_setup *twt) 2700 { 2701 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2702 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 2703 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2704 struct mt7996_sta_link *msta_link = &msta->deflink; 2705 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2706 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2707 struct mt7996_dev *dev = mt7996_hw_dev(hw); 2708 struct mt7996_twt_flow *flow; 2709 u8 flowid, table_id, exp; 2710 2711 if (mt7996_mac_check_twt_req(twt)) 2712 goto out; 2713 2714 mutex_lock(&dev->mt76.mutex); 2715 2716 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT) 2717 goto unlock; 2718 2719 if (hweight8(msta_link->twt.flowid_mask) == 2720 ARRAY_SIZE(msta_link->twt.flow)) 2721 goto unlock; 2722 2723 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) { 2724 setup_cmd = TWT_SETUP_CMD_DICTATE; 2725 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR; 2726 goto unlock; 2727 } 2728 2729 if (mt7996_mac_twt_param_equal(msta_link, twt_agrt)) 2730 goto unlock; 2731 2732 flowid = ffs(~msta_link->twt.flowid_mask) - 1; 2733 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 2734 twt_agrt->req_type |= le16_encode_bits(flowid, 2735 IEEE80211_TWT_REQTYPE_FLOWID); 2736 2737 table_id = ffs(~dev->twt.table_mask) - 1; 2738 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2739 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2740 2741 flow = &msta_link->twt.flow[flowid]; 2742 memset(flow, 0, sizeof(*flow)); 2743 INIT_LIST_HEAD(&flow->list); 2744 flow->wcid = msta_link->wcid.idx; 2745 flow->table_id = table_id; 2746 flow->id = flowid; 2747 flow->duration = twt_agrt->min_twt_dur; 2748 flow->mantissa = twt_agrt->mantissa; 2749 flow->exp = exp; 2750 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2751 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2752 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2753 2754 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2755 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2756 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2757 u64 flow_tsf, curr_tsf; 2758 u32 rem; 2759 2760 flow->sched = true; 2761 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow); 2762 curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink); 2763 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2764 flow_tsf = curr_tsf + interval - rem; 2765 twt_agrt->twt = cpu_to_le64(flow_tsf); 2766 } else { 2767 list_add_tail(&flow->list, &dev->twt_list); 2768 } 2769 flow->tsf = le64_to_cpu(twt_agrt->twt); 2770 2771 if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow, 2772 MCU_TWT_AGRT_ADD)) 2773 goto unlock; 2774 2775 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2776 dev->twt.table_mask |= BIT(table_id); 2777 msta_link->twt.flowid_mask |= BIT(flowid); 2778 dev->twt.n_agrt++; 2779 2780 unlock: 2781 mutex_unlock(&dev->mt76.mutex); 2782 out: 2783 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 2784 twt_agrt->req_type |= 2785 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 2786 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED; 2787 } 2788 2789 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, 2790 struct mt7996_vif_link *link, 2791 struct mt7996_sta_link *msta_link, 2792 u8 flowid) 2793 { 2794 struct mt7996_twt_flow *flow; 2795 2796 lockdep_assert_held(&dev->mt76.mutex); 2797 2798 if (flowid >= ARRAY_SIZE(msta_link->twt.flow)) 2799 return; 2800 2801 if (!(msta_link->twt.flowid_mask & BIT(flowid))) 2802 return; 2803 2804 flow = &msta_link->twt.flow[flowid]; 2805 if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE)) 2806 return; 2807 2808 list_del_init(&flow->list); 2809 msta_link->twt.flowid_mask &= ~BIT(flowid); 2810 dev->twt.table_mask &= ~BIT(flow->table_id); 2811 dev->twt.n_agrt--; 2812 } 2813