1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/timekeeping.h> 8 #include "coredump.h" 9 #include "mt7996.h" 10 #include "../dma.h" 11 #include "mac.h" 12 #include "mcu.h" 13 14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 15 16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = { 17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 18 .radar_pattern = { 19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 27 }, 28 }; 29 30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = { 31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 32 .radar_pattern = { 33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 38 }, 39 }; 40 41 static const struct mt7996_dfs_radar_spec jp_radar_specs = { 42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 43 .radar_pattern = { 44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 52 }, 53 }; 54 55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, 56 u16 idx, bool unicast) 57 { 58 struct mt7996_sta *sta; 59 struct mt76_wcid *wcid; 60 61 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 62 return NULL; 63 64 wcid = rcu_dereference(dev->mt76.wcid[idx]); 65 if (unicast || !wcid) 66 return wcid; 67 68 if (!wcid->sta) 69 return NULL; 70 71 sta = container_of(wcid, struct mt7996_sta, wcid); 72 if (!sta->vif) 73 return NULL; 74 75 return &sta->vif->sta.wcid; 76 } 77 78 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask) 79 { 80 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 81 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 82 83 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 84 0, 5000); 85 } 86 87 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw) 88 { 89 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 90 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 91 92 return MT_WTBL_LMAC_OFFS(wcid, dw); 93 } 94 95 static void mt7996_mac_sta_poll(struct mt7996_dev *dev) 96 { 97 static const u8 ac_to_tid[] = { 98 [IEEE80211_AC_BE] = 0, 99 [IEEE80211_AC_BK] = 1, 100 [IEEE80211_AC_VI] = 4, 101 [IEEE80211_AC_VO] = 6 102 }; 103 struct ieee80211_sta *sta; 104 struct mt7996_sta *msta; 105 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 106 LIST_HEAD(sta_poll_list); 107 int i; 108 109 spin_lock_bh(&dev->mt76.sta_poll_lock); 110 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); 111 spin_unlock_bh(&dev->mt76.sta_poll_lock); 112 113 rcu_read_lock(); 114 115 while (true) { 116 bool clear = false; 117 u32 addr, val; 118 u16 idx; 119 s8 rssi[4]; 120 121 spin_lock_bh(&dev->mt76.sta_poll_lock); 122 if (list_empty(&sta_poll_list)) { 123 spin_unlock_bh(&dev->mt76.sta_poll_lock); 124 break; 125 } 126 msta = list_first_entry(&sta_poll_list, 127 struct mt7996_sta, wcid.poll_list); 128 list_del_init(&msta->wcid.poll_list); 129 spin_unlock_bh(&dev->mt76.sta_poll_lock); 130 131 idx = msta->wcid.idx; 132 133 /* refresh peer's airtime reporting */ 134 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20); 135 136 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 137 u32 tx_last = msta->airtime_ac[i]; 138 u32 rx_last = msta->airtime_ac[i + 4]; 139 140 msta->airtime_ac[i] = mt76_rr(dev, addr); 141 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 142 143 tx_time[i] = msta->airtime_ac[i] - tx_last; 144 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 145 146 if ((tx_last | rx_last) & BIT(30)) 147 clear = true; 148 149 addr += 8; 150 } 151 152 if (clear) { 153 mt7996_mac_wtbl_update(dev, idx, 154 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 155 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 156 } 157 158 if (!msta->wcid.sta) 159 continue; 160 161 sta = container_of((void *)msta, struct ieee80211_sta, 162 drv_priv); 163 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 164 u8 q = mt76_connac_lmac_mapping(i); 165 u32 tx_cur = tx_time[q]; 166 u32 rx_cur = rx_time[q]; 167 u8 tid = ac_to_tid[i]; 168 169 if (!tx_cur && !rx_cur) 170 continue; 171 172 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); 173 } 174 175 /* get signal strength of resp frames (CTS/BA/ACK) */ 176 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34); 177 val = mt76_rr(dev, addr); 178 179 rssi[0] = to_rssi(GENMASK(7, 0), val); 180 rssi[1] = to_rssi(GENMASK(15, 8), val); 181 rssi[2] = to_rssi(GENMASK(23, 16), val); 182 rssi[3] = to_rssi(GENMASK(31, 14), val); 183 184 msta->ack_signal = 185 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); 186 187 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); 188 } 189 190 rcu_read_unlock(); 191 } 192 193 void mt7996_mac_enable_rtscts(struct mt7996_dev *dev, 194 struct ieee80211_vif *vif, bool enable) 195 { 196 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 197 u32 addr; 198 199 addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5); 200 if (enable) 201 mt76_set(dev, addr, BIT(5)); 202 else 203 mt76_clear(dev, addr, BIT(5)); 204 } 205 206 /* The HW does not translate the mac header to 802.3 for mesh point */ 207 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 208 { 209 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 210 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 211 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid; 212 __le32 *rxd = (__le32 *)skb->data; 213 struct ieee80211_sta *sta; 214 struct ieee80211_vif *vif; 215 struct ieee80211_hdr hdr; 216 u16 frame_control; 217 218 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != 219 MT_RXD3_NORMAL_U2M) 220 return -EINVAL; 221 222 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) 223 return -EINVAL; 224 225 if (!msta || !msta->vif) 226 return -EINVAL; 227 228 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 229 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 230 231 /* store the info from RXD and ethhdr to avoid being overridden */ 232 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); 233 hdr.frame_control = cpu_to_le16(frame_control); 234 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); 235 hdr.duration_id = 0; 236 237 ether_addr_copy(hdr.addr1, vif->addr); 238 ether_addr_copy(hdr.addr2, sta->addr); 239 switch (frame_control & (IEEE80211_FCTL_TODS | 240 IEEE80211_FCTL_FROMDS)) { 241 case 0: 242 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); 243 break; 244 case IEEE80211_FCTL_FROMDS: 245 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 246 break; 247 case IEEE80211_FCTL_TODS: 248 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 249 break; 250 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 251 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 252 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 253 break; 254 default: 255 return -EINVAL; 256 } 257 258 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 259 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 260 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 261 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 262 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) 263 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 264 else 265 skb_pull(skb, 2); 266 267 if (ieee80211_has_order(hdr.frame_control)) 268 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], 269 IEEE80211_HT_CTL_LEN); 270 if (ieee80211_is_data_qos(hdr.frame_control)) { 271 __le16 qos_ctrl; 272 273 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); 274 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, 275 IEEE80211_QOS_CTL_LEN); 276 } 277 278 if (ieee80211_has_a4(hdr.frame_control)) 279 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 280 else 281 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 282 283 return 0; 284 } 285 286 static int 287 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, 288 struct mt76_rx_status *status, 289 struct ieee80211_supported_band *sband, 290 __le32 *rxv, u8 *mode) 291 { 292 u32 v0, v2; 293 u8 stbc, gi, bw, dcm, nss; 294 int i, idx; 295 bool cck = false; 296 297 v0 = le32_to_cpu(rxv[0]); 298 v2 = le32_to_cpu(rxv[2]); 299 300 idx = FIELD_GET(MT_PRXV_TX_RATE, v0); 301 i = idx; 302 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; 303 304 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); 305 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); 306 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); 307 dcm = FIELD_GET(MT_PRXV_DCM, v2); 308 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); 309 310 switch (*mode) { 311 case MT_PHY_TYPE_CCK: 312 cck = true; 313 fallthrough; 314 case MT_PHY_TYPE_OFDM: 315 i = mt76_get_rate(&dev->mt76, sband, i, cck); 316 break; 317 case MT_PHY_TYPE_HT_GF: 318 case MT_PHY_TYPE_HT: 319 status->encoding = RX_ENC_HT; 320 if (gi) 321 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 322 if (i > 31) 323 return -EINVAL; 324 break; 325 case MT_PHY_TYPE_VHT: 326 status->nss = nss; 327 status->encoding = RX_ENC_VHT; 328 if (gi) 329 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 330 if (i > 11) 331 return -EINVAL; 332 break; 333 case MT_PHY_TYPE_HE_MU: 334 case MT_PHY_TYPE_HE_SU: 335 case MT_PHY_TYPE_HE_EXT_SU: 336 case MT_PHY_TYPE_HE_TB: 337 status->nss = nss; 338 status->encoding = RX_ENC_HE; 339 i &= GENMASK(3, 0); 340 341 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 342 status->he_gi = gi; 343 344 status->he_dcm = dcm; 345 break; 346 case MT_PHY_TYPE_EHT_SU: 347 case MT_PHY_TYPE_EHT_TRIG: 348 case MT_PHY_TYPE_EHT_MU: 349 status->nss = nss; 350 status->encoding = RX_ENC_EHT; 351 i &= GENMASK(3, 0); 352 353 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) 354 status->eht.gi = gi; 355 break; 356 default: 357 return -EINVAL; 358 } 359 status->rate_idx = i; 360 361 switch (bw) { 362 case IEEE80211_STA_RX_BW_20: 363 break; 364 case IEEE80211_STA_RX_BW_40: 365 if (*mode & MT_PHY_TYPE_HE_EXT_SU && 366 (idx & MT_PRXV_TX_ER_SU_106T)) { 367 status->bw = RATE_INFO_BW_HE_RU; 368 status->he_ru = 369 NL80211_RATE_INFO_HE_RU_ALLOC_106; 370 } else { 371 status->bw = RATE_INFO_BW_40; 372 } 373 break; 374 case IEEE80211_STA_RX_BW_80: 375 status->bw = RATE_INFO_BW_80; 376 break; 377 case IEEE80211_STA_RX_BW_160: 378 status->bw = RATE_INFO_BW_160; 379 break; 380 /* rxv reports bw 320-1 and 320-2 separately */ 381 case IEEE80211_STA_RX_BW_320: 382 case IEEE80211_STA_RX_BW_320 + 1: 383 status->bw = RATE_INFO_BW_320; 384 break; 385 default: 386 return -EINVAL; 387 } 388 389 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 390 if (*mode < MT_PHY_TYPE_HE_SU && gi) 391 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 392 393 return 0; 394 } 395 396 static void 397 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q, 398 struct mt7996_sta *msta, struct sk_buff *skb, 399 u32 info) 400 { 401 struct ieee80211_vif *vif; 402 struct wireless_dev *wdev; 403 404 if (!msta || !msta->vif) 405 return; 406 407 if (!mt76_queue_is_wed_rx(q)) 408 return; 409 410 if (!(info & MT_DMA_INFO_PPE_VLD)) 411 return; 412 413 vif = container_of((void *)msta->vif, struct ieee80211_vif, 414 drv_priv); 415 wdev = ieee80211_vif_to_wdev(vif); 416 skb->dev = wdev->netdev; 417 418 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, 419 FIELD_GET(MT_DMA_PPE_CPU_REASON, info), 420 FIELD_GET(MT_DMA_PPE_ENTRY, info)); 421 } 422 423 static int 424 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, 425 struct sk_buff *skb, u32 *info) 426 { 427 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 428 struct mt76_phy *mphy = &dev->mt76.phy; 429 struct mt7996_phy *phy = &dev->phy; 430 struct ieee80211_supported_band *sband; 431 __le32 *rxd = (__le32 *)skb->data; 432 __le32 *rxv = NULL; 433 u32 rxd0 = le32_to_cpu(rxd[0]); 434 u32 rxd1 = le32_to_cpu(rxd[1]); 435 u32 rxd2 = le32_to_cpu(rxd[2]); 436 u32 rxd3 = le32_to_cpu(rxd[3]); 437 u32 rxd4 = le32_to_cpu(rxd[4]); 438 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; 439 u32 csum_status = *(u32 *)skb->cb; 440 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; 441 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; 442 bool unicast, insert_ccmp_hdr = false; 443 u8 remove_pad, amsdu_info, band_idx; 444 u8 mode = 0, qos_ctl = 0; 445 bool hdr_trans; 446 u16 hdr_gap; 447 u16 seq_ctrl = 0; 448 __le16 fc = 0; 449 int idx; 450 u8 hw_aggr = false; 451 struct mt7996_sta *msta = NULL; 452 453 hw_aggr = status->aggr; 454 memset(status, 0, sizeof(*status)); 455 456 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); 457 mphy = dev->mt76.phys[band_idx]; 458 phy = mphy->priv; 459 status->phy_idx = mphy->band_idx; 460 461 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 462 return -EINVAL; 463 464 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 465 return -EINVAL; 466 467 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 468 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 469 return -EINVAL; 470 471 /* ICV error or CCMP/BIP/WPI MIC error */ 472 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 473 status->flag |= RX_FLAG_ONLY_MONITOR; 474 475 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 476 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 477 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast); 478 479 if (status->wcid) { 480 msta = container_of(status->wcid, struct mt7996_sta, wcid); 481 spin_lock_bh(&dev->mt76.sta_poll_lock); 482 if (list_empty(&msta->wcid.poll_list)) 483 list_add_tail(&msta->wcid.poll_list, 484 &dev->mt76.sta_poll_list); 485 spin_unlock_bh(&dev->mt76.sta_poll_lock); 486 } 487 488 status->freq = mphy->chandef.chan->center_freq; 489 status->band = mphy->chandef.chan->band; 490 if (status->band == NL80211_BAND_5GHZ) 491 sband = &mphy->sband_5g.sband; 492 else if (status->band == NL80211_BAND_6GHZ) 493 sband = &mphy->sband_6g.sband; 494 else 495 sband = &mphy->sband_2g.sband; 496 497 if (!sband->channels) 498 return -EINVAL; 499 500 if ((rxd3 & csum_mask) == csum_mask && 501 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 502 skb->ip_summed = CHECKSUM_UNNECESSARY; 503 504 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR) 505 status->flag |= RX_FLAG_FAILED_FCS_CRC; 506 507 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 508 status->flag |= RX_FLAG_MMIC_ERROR; 509 510 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 511 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 512 status->flag |= RX_FLAG_DECRYPTED; 513 status->flag |= RX_FLAG_IV_STRIPPED; 514 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 515 } 516 517 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 518 519 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 520 return -EINVAL; 521 522 rxd += 8; 523 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 524 u32 v0 = le32_to_cpu(rxd[0]); 525 u32 v2 = le32_to_cpu(rxd[2]); 526 527 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); 528 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); 529 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); 530 531 rxd += 4; 532 if ((u8 *)rxd - skb->data >= skb->len) 533 return -EINVAL; 534 } 535 536 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 537 u8 *data = (u8 *)rxd; 538 539 if (status->flag & RX_FLAG_DECRYPTED) { 540 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 541 case MT_CIPHER_AES_CCMP: 542 case MT_CIPHER_CCMP_CCX: 543 case MT_CIPHER_CCMP_256: 544 insert_ccmp_hdr = 545 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 546 fallthrough; 547 case MT_CIPHER_TKIP: 548 case MT_CIPHER_TKIP_NO_MIC: 549 case MT_CIPHER_GCMP: 550 case MT_CIPHER_GCMP_256: 551 status->iv[0] = data[5]; 552 status->iv[1] = data[4]; 553 status->iv[2] = data[3]; 554 status->iv[3] = data[2]; 555 status->iv[4] = data[1]; 556 status->iv[5] = data[0]; 557 break; 558 default: 559 break; 560 } 561 } 562 rxd += 4; 563 if ((u8 *)rxd - skb->data >= skb->len) 564 return -EINVAL; 565 } 566 567 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 568 status->timestamp = le32_to_cpu(rxd[0]); 569 status->flag |= RX_FLAG_MACTIME_START; 570 571 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 572 status->flag |= RX_FLAG_AMPDU_DETAILS; 573 574 /* all subframes of an A-MPDU have the same timestamp */ 575 if (phy->rx_ampdu_ts != status->timestamp) { 576 if (!++phy->ampdu_ref) 577 phy->ampdu_ref++; 578 } 579 phy->rx_ampdu_ts = status->timestamp; 580 581 status->ampdu_ref = phy->ampdu_ref; 582 } 583 584 rxd += 4; 585 if ((u8 *)rxd - skb->data >= skb->len) 586 return -EINVAL; 587 } 588 589 /* RXD Group 3 - P-RXV */ 590 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 591 u32 v3; 592 int ret; 593 594 rxv = rxd; 595 rxd += 4; 596 if ((u8 *)rxd - skb->data >= skb->len) 597 return -EINVAL; 598 599 v3 = le32_to_cpu(rxv[3]); 600 601 status->chains = mphy->antenna_mask; 602 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); 603 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); 604 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); 605 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); 606 607 /* RXD Group 5 - C-RXV */ 608 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 609 rxd += 24; 610 if ((u8 *)rxd - skb->data >= skb->len) 611 return -EINVAL; 612 } 613 614 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode); 615 if (ret < 0) 616 return ret; 617 } 618 619 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 620 status->amsdu = !!amsdu_info; 621 if (status->amsdu) { 622 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 623 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 624 } 625 626 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 627 if (hdr_trans && ieee80211_has_morefrags(fc)) { 628 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) 629 return -EINVAL; 630 hdr_trans = false; 631 } else { 632 int pad_start = 0; 633 634 skb_pull(skb, hdr_gap); 635 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) { 636 pad_start = ieee80211_get_hdrlen_from_skb(skb); 637 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 638 /* When header translation failure is indicated, 639 * the hardware will insert an extra 2-byte field 640 * containing the data length after the protocol 641 * type field. This happens either when the LLC-SNAP 642 * pattern did not match, or if a VLAN header was 643 * detected. 644 */ 645 pad_start = 12; 646 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 647 pad_start += 4; 648 else 649 pad_start = 0; 650 } 651 652 if (pad_start) { 653 memmove(skb->data + 2, skb->data, pad_start); 654 skb_pull(skb, 2); 655 } 656 } 657 658 if (!hdr_trans) { 659 struct ieee80211_hdr *hdr; 660 661 if (insert_ccmp_hdr) { 662 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 663 664 mt76_insert_ccmp_hdr(skb, key_id); 665 } 666 667 hdr = mt76_skb_get_hdr(skb); 668 fc = hdr->frame_control; 669 if (ieee80211_is_data_qos(fc)) { 670 u8 *qos = ieee80211_get_qos_ctl(hdr); 671 672 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 673 qos_ctl = *qos; 674 675 /* Mesh DA/SA/Length will be stripped after hardware 676 * de-amsdu, so here needs to clear amsdu present bit 677 * to mark it as a normal mesh frame. 678 */ 679 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu) 680 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 681 } 682 } else { 683 status->flag |= RX_FLAG_8023; 684 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, 685 *info); 686 } 687 688 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 689 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); 690 691 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr) 692 return 0; 693 694 status->aggr = unicast && 695 !ieee80211_is_qos_nullfunc(fc); 696 status->qos_ctl = qos_ctl; 697 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 698 699 return 0; 700 } 701 702 static void 703 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, 704 struct sk_buff *skb, struct mt76_wcid *wcid) 705 { 706 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 707 u8 fc_type, fc_stype; 708 u16 ethertype; 709 bool wmm = false; 710 u32 val; 711 712 if (wcid->sta) { 713 struct ieee80211_sta *sta; 714 715 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 716 wmm = sta->wme; 717 } 718 719 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 720 FIELD_PREP(MT_TXD1_TID, tid); 721 722 ethertype = get_unaligned_be16(&skb->data[12]); 723 if (ethertype >= ETH_P_802_3_MIN) 724 val |= MT_TXD1_ETH_802_3; 725 726 txwi[1] |= cpu_to_le32(val); 727 728 fc_type = IEEE80211_FTYPE_DATA >> 2; 729 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 730 731 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 732 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 733 734 txwi[2] |= cpu_to_le32(val); 735 736 if (wcid->amsdu) 737 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU); 738 } 739 740 static void 741 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, 742 struct sk_buff *skb, struct ieee80211_key_conf *key) 743 { 744 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 745 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 746 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 747 bool multicast = is_multicast_ether_addr(hdr->addr1); 748 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 749 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl; 750 u8 fc_type, fc_stype; 751 u32 val; 752 753 if (ieee80211_is_action(fc) && 754 mgmt->u.action.category == WLAN_CATEGORY_BACK && 755 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) 756 tid = MT_TX_ADDBA; 757 else if (ieee80211_is_mgmt(hdr->frame_control)) 758 tid = MT_TX_NORMAL; 759 760 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 761 FIELD_PREP(MT_TXD1_HDR_INFO, 762 ieee80211_get_hdrlen_from_skb(skb) / 2) | 763 FIELD_PREP(MT_TXD1_TID, tid); 764 765 if (!ieee80211_is_data(fc) || multicast || 766 info->flags & IEEE80211_TX_CTL_USE_MINRATE) 767 val |= MT_TXD1_FIXED_RATE; 768 769 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && 770 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 771 val |= MT_TXD1_BIP; 772 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 773 } 774 775 txwi[1] |= cpu_to_le32(val); 776 777 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 778 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 779 780 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 781 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 782 783 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc)) 784 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST); 785 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 786 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID); 787 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 788 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST); 789 else 790 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE); 791 792 txwi[2] |= cpu_to_le32(val); 793 794 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); 795 if (ieee80211_is_beacon(fc)) { 796 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 797 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 798 } 799 800 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 801 u16 seqno = le16_to_cpu(sc); 802 803 if (ieee80211_is_back_req(hdr->frame_control)) { 804 struct ieee80211_bar *bar; 805 806 bar = (struct ieee80211_bar *)skb->data; 807 seqno = le16_to_cpu(bar->start_seq_num); 808 } 809 810 val = MT_TXD3_SN_VALID | 811 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 812 txwi[3] |= cpu_to_le32(val); 813 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); 814 } 815 } 816 817 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, 818 struct sk_buff *skb, struct mt76_wcid *wcid, 819 struct ieee80211_key_conf *key, int pid, 820 enum mt76_txq_id qid, u32 changed) 821 { 822 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 823 struct ieee80211_vif *vif = info->control.vif; 824 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 825 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 826 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 827 struct mt76_vif *mvif; 828 u16 tx_count = 15; 829 u32 val; 830 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | 831 BSS_CHANGED_FILS_DISCOVERY)); 832 bool beacon = !!(changed & (BSS_CHANGED_BEACON | 833 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc); 834 835 mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL; 836 if (mvif) { 837 omac_idx = mvif->omac_idx; 838 wmm_idx = mvif->wmm_idx; 839 band_idx = mvif->band_idx; 840 } 841 842 if (inband_disc) { 843 p_fmt = MT_TX_TYPE_FW; 844 q_idx = MT_LMAC_ALTX0; 845 } else if (beacon) { 846 p_fmt = MT_TX_TYPE_FW; 847 q_idx = MT_LMAC_BCN0; 848 } else if (qid >= MT_TXQ_PSD) { 849 p_fmt = MT_TX_TYPE_CT; 850 q_idx = MT_LMAC_ALTX0; 851 } else { 852 p_fmt = MT_TX_TYPE_CT; 853 q_idx = wmm_idx * MT7996_MAX_WMM_SETS + 854 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); 855 } 856 857 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 858 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 859 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 860 txwi[0] = cpu_to_le32(val); 861 862 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 863 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 864 865 if (band_idx) 866 val |= FIELD_PREP(MT_TXD1_TGID, band_idx); 867 868 txwi[1] = cpu_to_le32(val); 869 txwi[2] = 0; 870 871 val = MT_TXD3_SW_POWER_MGMT | 872 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 873 if (key) 874 val |= MT_TXD3_PROTECT_FRAME; 875 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 876 val |= MT_TXD3_NO_ACK; 877 878 txwi[3] = cpu_to_le32(val); 879 txwi[4] = 0; 880 881 val = FIELD_PREP(MT_TXD5_PID, pid); 882 if (pid >= MT_PACKET_ID_FIRST) 883 val |= MT_TXD5_TX_STATUS_HOST; 884 txwi[5] = cpu_to_le32(val); 885 886 val = MT_TXD6_DIS_MAT | MT_TXD6_DAS; 887 if (is_mt7996(&dev->mt76)) 888 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1); 889 else 890 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1); 891 txwi[6] = cpu_to_le32(val); 892 txwi[7] = 0; 893 894 if (is_8023) 895 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid); 896 else 897 mt7996_mac_write_txwi_80211(dev, txwi, skb, key); 898 899 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { 900 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 901 bool mcast = ieee80211_is_data(hdr->frame_control) && 902 is_multicast_ether_addr(hdr->addr1); 903 u8 idx = MT7996_BASIC_RATES_TBL; 904 905 if (mvif) { 906 if (mcast && mvif->mcast_rates_idx) 907 idx = mvif->mcast_rates_idx; 908 else if (beacon && mvif->beacon_rates_idx) 909 idx = mvif->beacon_rates_idx; 910 else 911 idx = mvif->basic_rates_idx; 912 } 913 914 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW; 915 txwi[6] |= cpu_to_le32(val); 916 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 917 } 918 } 919 920 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 921 enum mt76_txq_id qid, struct mt76_wcid *wcid, 922 struct ieee80211_sta *sta, 923 struct mt76_tx_info *tx_info) 924 { 925 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 926 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 927 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 928 struct ieee80211_key_conf *key = info->control.hw_key; 929 struct ieee80211_vif *vif = info->control.vif; 930 struct mt76_connac_txp_common *txp; 931 struct mt76_txwi_cache *t; 932 int id, i, pid, nbuf = tx_info->nbuf - 1; 933 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 934 u8 *txwi = (u8 *)txwi_ptr; 935 936 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 937 return -EINVAL; 938 939 if (!wcid) 940 wcid = &dev->mt76.global_wcid; 941 942 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 943 t->skb = tx_info->skb; 944 945 id = mt76_token_consume(mdev, &t); 946 if (id < 0) 947 return id; 948 949 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 950 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 951 pid, qid, 0); 952 953 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE); 954 for (i = 0; i < nbuf; i++) { 955 u16 len; 956 957 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len); 958 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 959 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H, 960 tx_info->buf[i + 1].addr >> 32); 961 #endif 962 963 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 964 txp->fw.len[i] = cpu_to_le16(len); 965 } 966 txp->fw.nbuf = nbuf; 967 968 txp->fw.flags = 969 cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD); 970 971 if (!key) 972 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 973 974 if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control)) 975 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 976 977 if (vif) { 978 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 979 980 txp->fw.bss_idx = mvif->mt76.idx; 981 } 982 983 txp->fw.token = cpu_to_le16(id); 984 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff); 985 986 tx_info->skb = NULL; 987 988 /* pass partial skb header to fw */ 989 tx_info->buf[1].len = MT_CT_PARSE_LEN; 990 tx_info->buf[1].skip_unmap = true; 991 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 992 993 return 0; 994 } 995 996 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 997 { 998 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 999 __le32 *txwi = ptr; 1000 u32 val; 1001 1002 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 1003 1004 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 1005 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 1006 txwi[0] = cpu_to_le32(val); 1007 1008 val = BIT(31) | 1009 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 1010 txwi[1] = cpu_to_le32(val); 1011 1012 txp->token = cpu_to_le16(token_id); 1013 txp->nbuf = 1; 1014 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 1015 1016 return MT_TXD_SIZE + sizeof(*txp); 1017 } 1018 1019 static void 1020 mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb) 1021 { 1022 struct mt7996_sta *msta; 1023 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1024 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1025 u16 fc, tid; 1026 1027 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 1028 return; 1029 1030 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1031 if (tid >= 6) /* skip VO queue */ 1032 return; 1033 1034 if (is_8023) { 1035 fc = IEEE80211_FTYPE_DATA | 1036 (sta->wme ? IEEE80211_STYPE_QOS_DATA : IEEE80211_STYPE_DATA); 1037 } else { 1038 /* No need to get precise TID for Action/Management Frame, 1039 * since it will not meet the following Frame Control 1040 * condition anyway. 1041 */ 1042 1043 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1044 1045 fc = le16_to_cpu(hdr->frame_control) & 1046 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); 1047 } 1048 1049 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1050 return; 1051 1052 msta = (struct mt7996_sta *)sta->drv_priv; 1053 if (!test_and_set_bit(tid, &msta->wcid.ampdu_state)) 1054 ieee80211_start_tx_ba_session(sta, tid, 0); 1055 } 1056 1057 static void 1058 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, 1059 struct ieee80211_sta *sta, struct list_head *free_list) 1060 { 1061 struct mt76_dev *mdev = &dev->mt76; 1062 struct mt76_wcid *wcid; 1063 __le32 *txwi; 1064 u16 wcid_idx; 1065 1066 mt76_connac_txp_skb_unmap(mdev, t); 1067 if (!t->skb) 1068 goto out; 1069 1070 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 1071 if (sta) { 1072 wcid = (struct mt76_wcid *)sta->drv_priv; 1073 wcid_idx = wcid->idx; 1074 1075 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 1076 mt7996_tx_check_aggr(sta, t->skb); 1077 } else { 1078 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); 1079 } 1080 1081 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 1082 1083 out: 1084 t->skb = NULL; 1085 mt76_put_txwi(mdev, t); 1086 } 1087 1088 static void 1089 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) 1090 { 1091 __le32 *tx_free = (__le32 *)data, *cur_info; 1092 struct mt76_dev *mdev = &dev->mt76; 1093 struct mt76_phy *phy2 = mdev->phys[MT_BAND1]; 1094 struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; 1095 struct mt76_txwi_cache *txwi; 1096 struct ieee80211_sta *sta = NULL; 1097 struct mt76_wcid *wcid = NULL; 1098 LIST_HEAD(free_list); 1099 struct sk_buff *skb, *tmp; 1100 void *end = data + len; 1101 bool wake = false; 1102 u16 total, count = 0; 1103 1104 /* clean DMA queues and unmap buffers first */ 1105 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1106 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1107 if (phy2) { 1108 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false); 1109 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false); 1110 } 1111 if (phy3) { 1112 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false); 1113 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); 1114 } 1115 1116 if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 5)) 1117 return; 1118 1119 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); 1120 for (cur_info = &tx_free[2]; count < total; cur_info++) { 1121 u32 msdu, info; 1122 u8 i; 1123 1124 if (WARN_ON_ONCE((void *)cur_info >= end)) 1125 return; 1126 /* 1'b1: new wcid pair. 1127 * 1'b0: msdu_id with the same 'wcid pair' as above. 1128 */ 1129 info = le32_to_cpu(*cur_info); 1130 if (info & MT_TXFREE_INFO_PAIR) { 1131 struct mt7996_sta *msta; 1132 u16 idx; 1133 1134 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); 1135 wcid = rcu_dereference(dev->mt76.wcid[idx]); 1136 sta = wcid_to_sta(wcid); 1137 if (!sta) 1138 continue; 1139 1140 msta = container_of(wcid, struct mt7996_sta, wcid); 1141 spin_lock_bh(&mdev->sta_poll_lock); 1142 if (list_empty(&msta->wcid.poll_list)) 1143 list_add_tail(&msta->wcid.poll_list, 1144 &mdev->sta_poll_list); 1145 spin_unlock_bh(&mdev->sta_poll_lock); 1146 continue; 1147 } else if (info & MT_TXFREE_INFO_HEADER) { 1148 u32 tx_retries = 0, tx_failed = 0; 1149 1150 if (!wcid) 1151 continue; 1152 1153 tx_retries = 1154 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; 1155 tx_failed = tx_retries + 1156 !!FIELD_GET(MT_TXFREE_INFO_STAT, info); 1157 1158 wcid->stats.tx_retries += tx_retries; 1159 wcid->stats.tx_failed += tx_failed; 1160 continue; 1161 } 1162 1163 for (i = 0; i < 2; i++) { 1164 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; 1165 if (msdu == MT_TXFREE_INFO_MSDU_ID) 1166 continue; 1167 1168 count++; 1169 txwi = mt76_token_release(mdev, msdu, &wake); 1170 if (!txwi) 1171 continue; 1172 1173 mt7996_txwi_free(dev, txwi, sta, &free_list); 1174 } 1175 } 1176 1177 mt7996_mac_sta_poll(dev); 1178 1179 if (wake) 1180 mt76_set_tx_blocked(&dev->mt76, false); 1181 1182 mt76_worker_schedule(&dev->mt76.tx_worker); 1183 1184 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1185 skb_list_del_init(skb); 1186 napi_consume_skb(skb, 1); 1187 } 1188 } 1189 1190 static bool 1191 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, 1192 int pid, __le32 *txs_data) 1193 { 1194 struct mt76_sta_stats *stats = &wcid->stats; 1195 struct ieee80211_supported_band *sband; 1196 struct mt76_dev *mdev = &dev->mt76; 1197 struct mt76_phy *mphy; 1198 struct ieee80211_tx_info *info; 1199 struct sk_buff_head list; 1200 struct rate_info rate = {}; 1201 struct sk_buff *skb = NULL; 1202 bool cck = false; 1203 u32 txrate, txs, mode, stbc; 1204 1205 txs = le32_to_cpu(txs_data[0]); 1206 1207 mt76_tx_status_lock(mdev, &list); 1208 1209 /* only report MPDU TXS */ 1210 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) { 1211 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); 1212 if (skb) { 1213 info = IEEE80211_SKB_CB(skb); 1214 if (!(txs & MT_TXS0_ACK_ERROR_MASK)) 1215 info->flags |= IEEE80211_TX_STAT_ACK; 1216 1217 info->status.ampdu_len = 1; 1218 info->status.ampdu_ack_len = 1219 !!(info->flags & IEEE80211_TX_STAT_ACK); 1220 1221 info->status.rates[0].idx = -1; 1222 } 1223 } 1224 1225 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) { 1226 struct ieee80211_sta *sta; 1227 u8 tid; 1228 1229 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 1230 tid = FIELD_GET(MT_TXS0_TID, txs); 1231 ieee80211_refresh_tx_agg_session_timer(sta, tid); 1232 } 1233 1234 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1235 1236 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); 1237 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; 1238 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); 1239 1240 if (stbc && rate.nss > 1) 1241 rate.nss >>= 1; 1242 1243 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) 1244 stats->tx_nss[rate.nss - 1]++; 1245 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) 1246 stats->tx_mcs[rate.mcs]++; 1247 1248 mode = FIELD_GET(MT_TX_RATE_MODE, txrate); 1249 switch (mode) { 1250 case MT_PHY_TYPE_CCK: 1251 cck = true; 1252 fallthrough; 1253 case MT_PHY_TYPE_OFDM: 1254 mphy = mt76_dev_phy(mdev, wcid->phy_idx); 1255 1256 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1257 sband = &mphy->sband_5g.sband; 1258 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 1259 sband = &mphy->sband_6g.sband; 1260 else 1261 sband = &mphy->sband_2g.sband; 1262 1263 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); 1264 rate.legacy = sband->bitrates[rate.mcs].bitrate; 1265 break; 1266 case MT_PHY_TYPE_HT: 1267 case MT_PHY_TYPE_HT_GF: 1268 if (rate.mcs > 31) 1269 goto out; 1270 1271 rate.flags = RATE_INFO_FLAGS_MCS; 1272 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1273 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1274 break; 1275 case MT_PHY_TYPE_VHT: 1276 if (rate.mcs > 9) 1277 goto out; 1278 1279 rate.flags = RATE_INFO_FLAGS_VHT_MCS; 1280 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1281 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1282 break; 1283 case MT_PHY_TYPE_HE_SU: 1284 case MT_PHY_TYPE_HE_EXT_SU: 1285 case MT_PHY_TYPE_HE_TB: 1286 case MT_PHY_TYPE_HE_MU: 1287 if (rate.mcs > 11) 1288 goto out; 1289 1290 rate.he_gi = wcid->rate.he_gi; 1291 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); 1292 rate.flags = RATE_INFO_FLAGS_HE_MCS; 1293 break; 1294 case MT_PHY_TYPE_EHT_SU: 1295 case MT_PHY_TYPE_EHT_TRIG: 1296 case MT_PHY_TYPE_EHT_MU: 1297 if (rate.mcs > 13) 1298 goto out; 1299 1300 rate.eht_gi = wcid->rate.eht_gi; 1301 rate.flags = RATE_INFO_FLAGS_EHT_MCS; 1302 break; 1303 default: 1304 goto out; 1305 } 1306 1307 stats->tx_mode[mode]++; 1308 1309 switch (FIELD_GET(MT_TXS0_BW, txs)) { 1310 case IEEE80211_STA_RX_BW_320: 1311 rate.bw = RATE_INFO_BW_320; 1312 stats->tx_bw[4]++; 1313 break; 1314 case IEEE80211_STA_RX_BW_160: 1315 rate.bw = RATE_INFO_BW_160; 1316 stats->tx_bw[3]++; 1317 break; 1318 case IEEE80211_STA_RX_BW_80: 1319 rate.bw = RATE_INFO_BW_80; 1320 stats->tx_bw[2]++; 1321 break; 1322 case IEEE80211_STA_RX_BW_40: 1323 rate.bw = RATE_INFO_BW_40; 1324 stats->tx_bw[1]++; 1325 break; 1326 default: 1327 rate.bw = RATE_INFO_BW_20; 1328 stats->tx_bw[0]++; 1329 break; 1330 } 1331 wcid->rate = rate; 1332 1333 out: 1334 if (skb) 1335 mt76_tx_status_skb_done(mdev, skb, &list); 1336 mt76_tx_status_unlock(mdev, &list); 1337 1338 return !!skb; 1339 } 1340 1341 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) 1342 { 1343 struct mt7996_sta *msta = NULL; 1344 struct mt76_wcid *wcid; 1345 __le32 *txs_data = data; 1346 u16 wcidx; 1347 u8 pid; 1348 1349 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1350 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1351 1352 if (pid < MT_PACKET_ID_NO_SKB) 1353 return; 1354 1355 if (wcidx >= mt7996_wtbl_size(dev)) 1356 return; 1357 1358 rcu_read_lock(); 1359 1360 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1361 if (!wcid) 1362 goto out; 1363 1364 msta = container_of(wcid, struct mt7996_sta, wcid); 1365 1366 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data); 1367 1368 if (!wcid->sta) 1369 goto out; 1370 1371 spin_lock_bh(&dev->mt76.sta_poll_lock); 1372 if (list_empty(&msta->wcid.poll_list)) 1373 list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); 1374 spin_unlock_bh(&dev->mt76.sta_poll_lock); 1375 1376 out: 1377 rcu_read_unlock(); 1378 } 1379 1380 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) 1381 { 1382 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1383 __le32 *rxd = (__le32 *)data; 1384 __le32 *end = (__le32 *)&rxd[len / 4]; 1385 enum rx_pkt_type type; 1386 1387 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1388 if (type != PKT_TYPE_NORMAL) { 1389 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1390 1391 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1392 MT_RXD0_SW_PKT_TYPE_FRAME)) 1393 return true; 1394 } 1395 1396 switch (type) { 1397 case PKT_TYPE_TXRX_NOTIFY: 1398 mt7996_mac_tx_free(dev, data, len); 1399 return false; 1400 case PKT_TYPE_TXS: 1401 for (rxd += 4; rxd + 8 <= end; rxd += 8) 1402 mt7996_mac_add_txs(dev, rxd); 1403 return false; 1404 case PKT_TYPE_RX_FW_MONITOR: 1405 mt7996_debugfs_rx_fw_monitor(dev, data, len); 1406 return false; 1407 default: 1408 return true; 1409 } 1410 } 1411 1412 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1413 struct sk_buff *skb, u32 *info) 1414 { 1415 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1416 __le32 *rxd = (__le32 *)skb->data; 1417 __le32 *end = (__le32 *)&skb->data[skb->len]; 1418 enum rx_pkt_type type; 1419 1420 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1421 if (type != PKT_TYPE_NORMAL) { 1422 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1423 1424 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1425 MT_RXD0_SW_PKT_TYPE_FRAME)) 1426 type = PKT_TYPE_NORMAL; 1427 } 1428 1429 switch (type) { 1430 case PKT_TYPE_TXRX_NOTIFY: 1431 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) && 1432 q == MT_RXQ_TXFREE_BAND2) { 1433 dev_kfree_skb(skb); 1434 break; 1435 } 1436 1437 mt7996_mac_tx_free(dev, skb->data, skb->len); 1438 napi_consume_skb(skb, 1); 1439 break; 1440 case PKT_TYPE_RX_EVENT: 1441 mt7996_mcu_rx_event(dev, skb); 1442 break; 1443 case PKT_TYPE_TXS: 1444 for (rxd += 4; rxd + 8 <= end; rxd += 8) 1445 mt7996_mac_add_txs(dev, rxd); 1446 dev_kfree_skb(skb); 1447 break; 1448 case PKT_TYPE_RX_FW_MONITOR: 1449 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1450 dev_kfree_skb(skb); 1451 break; 1452 case PKT_TYPE_NORMAL: 1453 if (!mt7996_mac_fill_rx(dev, q, skb, info)) { 1454 mt76_rx(&dev->mt76, q, skb); 1455 return; 1456 } 1457 fallthrough; 1458 default: 1459 dev_kfree_skb(skb); 1460 break; 1461 } 1462 } 1463 1464 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy) 1465 { 1466 struct mt7996_dev *dev = phy->dev; 1467 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx); 1468 1469 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN); 1470 mt76_set(dev, reg, BIT(11) | BIT(9)); 1471 } 1472 1473 void mt7996_mac_reset_counters(struct mt7996_phy *phy) 1474 { 1475 struct mt7996_dev *dev = phy->dev; 1476 u8 band_idx = phy->mt76->band_idx; 1477 int i; 1478 1479 for (i = 0; i < 16; i++) 1480 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 1481 1482 phy->mt76->survey_time = ktime_get_boottime(); 1483 1484 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 1485 1486 /* reset airtime counters */ 1487 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx), 1488 MT_WF_RMAC_MIB_RXTIME_CLR); 1489 1490 mt7996_mcu_get_chan_mib_info(phy, true); 1491 } 1492 1493 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy) 1494 { 1495 s16 coverage_class = phy->coverage_class; 1496 struct mt7996_dev *dev = phy->dev; 1497 struct mt7996_phy *phy2 = mt7996_phy2(dev); 1498 struct mt7996_phy *phy3 = mt7996_phy3(dev); 1499 u32 reg_offset; 1500 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1501 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1502 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1503 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1504 u8 band_idx = phy->mt76->band_idx; 1505 int offset; 1506 1507 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1508 return; 1509 1510 if (phy2) 1511 coverage_class = max_t(s16, dev->phy.coverage_class, 1512 phy2->coverage_class); 1513 1514 if (phy3) 1515 coverage_class = max_t(s16, coverage_class, 1516 phy3->coverage_class); 1517 1518 offset = 3 * coverage_class; 1519 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1520 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1521 1522 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset); 1523 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset); 1524 } 1525 1526 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band) 1527 { 1528 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band), 1529 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY | 1530 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR); 1531 1532 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band), 1533 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5)); 1534 } 1535 1536 static u8 1537 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx) 1538 { 1539 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1540 struct mt7996_dev *dev = phy->dev; 1541 u32 val, sum = 0, n = 0; 1542 int ant, i; 1543 1544 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) { 1545 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant); 1546 1547 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1548 val = mt76_rr(dev, reg); 1549 sum += val * nf_power[i]; 1550 n += val; 1551 } 1552 } 1553 1554 return n ? sum / n : 0; 1555 } 1556 1557 void mt7996_update_channel(struct mt76_phy *mphy) 1558 { 1559 struct mt7996_phy *phy = mphy->priv; 1560 struct mt76_channel_state *state = mphy->chan_state; 1561 int nf; 1562 1563 mt7996_mcu_get_chan_mib_info(phy, false); 1564 1565 nf = mt7996_phy_get_nf(phy, mphy->band_idx); 1566 if (!phy->noise) 1567 phy->noise = nf << 4; 1568 else if (nf) 1569 phy->noise += nf - (phy->noise >> 4); 1570 1571 state->noise = -(phy->noise >> 4); 1572 } 1573 1574 static bool 1575 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state) 1576 { 1577 bool ret; 1578 1579 ret = wait_event_timeout(dev->reset_wait, 1580 (READ_ONCE(dev->recovery.state) & state), 1581 MT7996_RESET_TIMEOUT); 1582 1583 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1584 return ret; 1585 } 1586 1587 static void 1588 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1589 { 1590 struct ieee80211_hw *hw = priv; 1591 1592 switch (vif->type) { 1593 case NL80211_IFTYPE_MESH_POINT: 1594 case NL80211_IFTYPE_ADHOC: 1595 case NL80211_IFTYPE_AP: 1596 mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon); 1597 break; 1598 default: 1599 break; 1600 } 1601 } 1602 1603 static void 1604 mt7996_update_beacons(struct mt7996_dev *dev) 1605 { 1606 struct mt76_phy *phy2, *phy3; 1607 1608 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1609 IEEE80211_IFACE_ITER_RESUME_ALL, 1610 mt7996_update_vif_beacon, dev->mt76.hw); 1611 1612 phy2 = dev->mt76.phys[MT_BAND1]; 1613 if (!phy2) 1614 return; 1615 1616 ieee80211_iterate_active_interfaces(phy2->hw, 1617 IEEE80211_IFACE_ITER_RESUME_ALL, 1618 mt7996_update_vif_beacon, phy2->hw); 1619 1620 phy3 = dev->mt76.phys[MT_BAND2]; 1621 if (!phy3) 1622 return; 1623 1624 ieee80211_iterate_active_interfaces(phy3->hw, 1625 IEEE80211_IFACE_ITER_RESUME_ALL, 1626 mt7996_update_vif_beacon, phy3->hw); 1627 } 1628 1629 void mt7996_tx_token_put(struct mt7996_dev *dev) 1630 { 1631 struct mt76_txwi_cache *txwi; 1632 int id; 1633 1634 spin_lock_bh(&dev->mt76.token_lock); 1635 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1636 mt7996_txwi_free(dev, txwi, NULL, NULL); 1637 dev->mt76.token_count--; 1638 } 1639 spin_unlock_bh(&dev->mt76.token_lock); 1640 idr_destroy(&dev->mt76.token); 1641 } 1642 1643 static int 1644 mt7996_mac_restart(struct mt7996_dev *dev) 1645 { 1646 struct mt7996_phy *phy2, *phy3; 1647 struct mt76_dev *mdev = &dev->mt76; 1648 int i, ret; 1649 1650 phy2 = mt7996_phy2(dev); 1651 phy3 = mt7996_phy3(dev); 1652 1653 if (dev->hif2) { 1654 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 1655 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1656 } 1657 1658 if (dev_is_pci(mdev->dev)) { 1659 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1660 if (dev->hif2) 1661 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 1662 } 1663 1664 set_bit(MT76_RESET, &dev->mphy.state); 1665 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1666 wake_up(&dev->mt76.mcu.wait); 1667 if (phy2) 1668 set_bit(MT76_RESET, &phy2->mt76->state); 1669 if (phy3) 1670 set_bit(MT76_RESET, &phy3->mt76->state); 1671 1672 /* lock/unlock all queues to ensure that no tx is pending */ 1673 mt76_txq_schedule_all(&dev->mphy); 1674 if (phy2) 1675 mt76_txq_schedule_all(phy2->mt76); 1676 if (phy3) 1677 mt76_txq_schedule_all(phy3->mt76); 1678 1679 /* disable all tx/rx napi */ 1680 mt76_worker_disable(&dev->mt76.tx_worker); 1681 mt76_for_each_q_rx(mdev, i) { 1682 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 1683 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 1684 continue; 1685 1686 if (mdev->q_rx[i].ndesc) 1687 napi_disable(&dev->mt76.napi[i]); 1688 } 1689 napi_disable(&dev->mt76.tx_napi); 1690 1691 /* token reinit */ 1692 mt7996_tx_token_put(dev); 1693 idr_init(&dev->mt76.token); 1694 1695 mt7996_dma_reset(dev, true); 1696 1697 local_bh_disable(); 1698 mt76_for_each_q_rx(mdev, i) { 1699 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 1700 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 1701 continue; 1702 1703 if (mdev->q_rx[i].ndesc) { 1704 napi_enable(&dev->mt76.napi[i]); 1705 napi_schedule(&dev->mt76.napi[i]); 1706 } 1707 } 1708 local_bh_enable(); 1709 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1710 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 1711 1712 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 1713 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 1714 if (dev->hif2) { 1715 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 1716 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1717 } 1718 if (dev_is_pci(mdev->dev)) { 1719 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1720 if (dev->hif2) 1721 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 1722 } 1723 1724 /* load firmware */ 1725 ret = mt7996_mcu_init_firmware(dev); 1726 if (ret) 1727 goto out; 1728 1729 /* set the necessary init items */ 1730 ret = mt7996_mcu_set_eeprom(dev); 1731 if (ret) 1732 goto out; 1733 1734 mt7996_mac_init(dev); 1735 mt7996_init_txpower(&dev->phy); 1736 mt7996_init_txpower(phy2); 1737 mt7996_init_txpower(phy3); 1738 ret = mt7996_txbf_init(dev); 1739 1740 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) { 1741 ret = mt7996_run(dev->mphy.hw); 1742 if (ret) 1743 goto out; 1744 } 1745 1746 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) { 1747 ret = mt7996_run(phy2->mt76->hw); 1748 if (ret) 1749 goto out; 1750 } 1751 1752 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) { 1753 ret = mt7996_run(phy3->mt76->hw); 1754 if (ret) 1755 goto out; 1756 } 1757 1758 out: 1759 /* reset done */ 1760 clear_bit(MT76_RESET, &dev->mphy.state); 1761 if (phy2) 1762 clear_bit(MT76_RESET, &phy2->mt76->state); 1763 if (phy3) 1764 clear_bit(MT76_RESET, &phy3->mt76->state); 1765 1766 local_bh_disable(); 1767 napi_enable(&dev->mt76.tx_napi); 1768 napi_schedule(&dev->mt76.tx_napi); 1769 local_bh_enable(); 1770 1771 mt76_worker_enable(&dev->mt76.tx_worker); 1772 return ret; 1773 } 1774 1775 static void 1776 mt7996_mac_full_reset(struct mt7996_dev *dev) 1777 { 1778 struct mt7996_phy *phy2, *phy3; 1779 int i; 1780 1781 phy2 = mt7996_phy2(dev); 1782 phy3 = mt7996_phy3(dev); 1783 dev->recovery.hw_full_reset = true; 1784 1785 wake_up(&dev->mt76.mcu.wait); 1786 ieee80211_stop_queues(mt76_hw(dev)); 1787 if (phy2) 1788 ieee80211_stop_queues(phy2->mt76->hw); 1789 if (phy3) 1790 ieee80211_stop_queues(phy3->mt76->hw); 1791 1792 cancel_work_sync(&dev->wed_rro.work); 1793 cancel_delayed_work_sync(&dev->mphy.mac_work); 1794 if (phy2) 1795 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1796 if (phy3) 1797 cancel_delayed_work_sync(&phy3->mt76->mac_work); 1798 1799 mutex_lock(&dev->mt76.mutex); 1800 for (i = 0; i < 10; i++) { 1801 if (!mt7996_mac_restart(dev)) 1802 break; 1803 } 1804 mutex_unlock(&dev->mt76.mutex); 1805 1806 if (i == 10) 1807 dev_err(dev->mt76.dev, "chip full reset failed\n"); 1808 1809 ieee80211_restart_hw(mt76_hw(dev)); 1810 if (phy2) 1811 ieee80211_restart_hw(phy2->mt76->hw); 1812 if (phy3) 1813 ieee80211_restart_hw(phy3->mt76->hw); 1814 1815 ieee80211_wake_queues(mt76_hw(dev)); 1816 if (phy2) 1817 ieee80211_wake_queues(phy2->mt76->hw); 1818 if (phy3) 1819 ieee80211_wake_queues(phy3->mt76->hw); 1820 1821 dev->recovery.hw_full_reset = false; 1822 ieee80211_queue_delayed_work(mt76_hw(dev), 1823 &dev->mphy.mac_work, 1824 MT7996_WATCHDOG_TIME); 1825 if (phy2) 1826 ieee80211_queue_delayed_work(phy2->mt76->hw, 1827 &phy2->mt76->mac_work, 1828 MT7996_WATCHDOG_TIME); 1829 if (phy3) 1830 ieee80211_queue_delayed_work(phy3->mt76->hw, 1831 &phy3->mt76->mac_work, 1832 MT7996_WATCHDOG_TIME); 1833 } 1834 1835 void mt7996_mac_reset_work(struct work_struct *work) 1836 { 1837 struct mt7996_phy *phy2, *phy3; 1838 struct mt7996_dev *dev; 1839 int i; 1840 1841 dev = container_of(work, struct mt7996_dev, reset_work); 1842 phy2 = mt7996_phy2(dev); 1843 phy3 = mt7996_phy3(dev); 1844 1845 /* chip full reset */ 1846 if (dev->recovery.restart) { 1847 /* disable WA/WM WDT */ 1848 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 1849 MT_MCU_CMD_WDT_MASK); 1850 1851 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 1852 dev->recovery.wa_reset_count++; 1853 else 1854 dev->recovery.wm_reset_count++; 1855 1856 mt7996_mac_full_reset(dev); 1857 1858 /* enable mcu irq */ 1859 mt7996_irq_enable(dev, MT_INT_MCU_CMD); 1860 mt7996_irq_disable(dev, 0); 1861 1862 /* enable WA/WM WDT */ 1863 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 1864 1865 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 1866 dev->recovery.restart = false; 1867 return; 1868 } 1869 1870 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 1871 return; 1872 1873 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.", 1874 wiphy_name(dev->mt76.hw->wiphy)); 1875 1876 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) 1877 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2); 1878 1879 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 1880 mtk_wed_device_stop(&dev->mt76.mmio.wed); 1881 1882 ieee80211_stop_queues(mt76_hw(dev)); 1883 if (phy2) 1884 ieee80211_stop_queues(phy2->mt76->hw); 1885 if (phy3) 1886 ieee80211_stop_queues(phy3->mt76->hw); 1887 1888 set_bit(MT76_RESET, &dev->mphy.state); 1889 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1890 wake_up(&dev->mt76.mcu.wait); 1891 1892 cancel_work_sync(&dev->wed_rro.work); 1893 cancel_delayed_work_sync(&dev->mphy.mac_work); 1894 if (phy2) { 1895 set_bit(MT76_RESET, &phy2->mt76->state); 1896 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1897 } 1898 if (phy3) { 1899 set_bit(MT76_RESET, &phy3->mt76->state); 1900 cancel_delayed_work_sync(&phy3->mt76->mac_work); 1901 } 1902 mt76_worker_disable(&dev->mt76.tx_worker); 1903 mt76_for_each_q_rx(&dev->mt76, i) { 1904 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 1905 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 1906 continue; 1907 1908 napi_disable(&dev->mt76.napi[i]); 1909 } 1910 napi_disable(&dev->mt76.tx_napi); 1911 1912 mutex_lock(&dev->mt76.mutex); 1913 1914 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1915 1916 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1917 mt7996_dma_reset(dev, false); 1918 1919 mt7996_tx_token_put(dev); 1920 idr_init(&dev->mt76.token); 1921 1922 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1923 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1924 } 1925 1926 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1927 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1928 1929 /* enable DMA Tx/Tx and interrupt */ 1930 mt7996_dma_start(dev, false, false); 1931 1932 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 1933 u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 | 1934 dev->mt76.mmio.irqmask; 1935 1936 if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) 1937 wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND; 1938 1939 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 1940 1941 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask, 1942 true); 1943 mt7996_irq_enable(dev, wed_irq_mask); 1944 mt7996_irq_disable(dev, 0); 1945 } 1946 1947 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { 1948 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT); 1949 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, 1950 MT_INT_TX_RX_DONE_EXT); 1951 } 1952 1953 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1954 clear_bit(MT76_RESET, &dev->mphy.state); 1955 if (phy2) 1956 clear_bit(MT76_RESET, &phy2->mt76->state); 1957 if (phy3) 1958 clear_bit(MT76_RESET, &phy3->mt76->state); 1959 1960 local_bh_disable(); 1961 mt76_for_each_q_rx(&dev->mt76, i) { 1962 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 1963 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 1964 continue; 1965 1966 napi_enable(&dev->mt76.napi[i]); 1967 napi_schedule(&dev->mt76.napi[i]); 1968 } 1969 local_bh_enable(); 1970 1971 tasklet_schedule(&dev->mt76.irq_tasklet); 1972 1973 mt76_worker_enable(&dev->mt76.tx_worker); 1974 1975 local_bh_disable(); 1976 napi_enable(&dev->mt76.tx_napi); 1977 napi_schedule(&dev->mt76.tx_napi); 1978 local_bh_enable(); 1979 1980 ieee80211_wake_queues(mt76_hw(dev)); 1981 if (phy2) 1982 ieee80211_wake_queues(phy2->mt76->hw); 1983 if (phy3) 1984 ieee80211_wake_queues(phy3->mt76->hw); 1985 1986 mutex_unlock(&dev->mt76.mutex); 1987 1988 mt7996_update_beacons(dev); 1989 1990 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1991 MT7996_WATCHDOG_TIME); 1992 if (phy2) 1993 ieee80211_queue_delayed_work(phy2->mt76->hw, 1994 &phy2->mt76->mac_work, 1995 MT7996_WATCHDOG_TIME); 1996 if (phy3) 1997 ieee80211_queue_delayed_work(phy3->mt76->hw, 1998 &phy3->mt76->mac_work, 1999 MT7996_WATCHDOG_TIME); 2000 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.", 2001 wiphy_name(dev->mt76.hw->wiphy)); 2002 } 2003 2004 /* firmware coredump */ 2005 void mt7996_mac_dump_work(struct work_struct *work) 2006 { 2007 const struct mt7996_mem_region *mem_region; 2008 struct mt7996_crash_data *crash_data; 2009 struct mt7996_dev *dev; 2010 struct mt7996_mem_hdr *hdr; 2011 size_t buf_len; 2012 int i; 2013 u32 num; 2014 u8 *buf; 2015 2016 dev = container_of(work, struct mt7996_dev, dump_work); 2017 2018 mutex_lock(&dev->dump_mutex); 2019 2020 crash_data = mt7996_coredump_new(dev); 2021 if (!crash_data) { 2022 mutex_unlock(&dev->dump_mutex); 2023 goto skip_coredump; 2024 } 2025 2026 mem_region = mt7996_coredump_get_mem_layout(dev, &num); 2027 if (!mem_region || !crash_data->memdump_buf_len) { 2028 mutex_unlock(&dev->dump_mutex); 2029 goto skip_memdump; 2030 } 2031 2032 buf = crash_data->memdump_buf; 2033 buf_len = crash_data->memdump_buf_len; 2034 2035 /* dumping memory content... */ 2036 memset(buf, 0, buf_len); 2037 for (i = 0; i < num; i++) { 2038 if (mem_region->len > buf_len) { 2039 dev_warn(dev->mt76.dev, "%s len %zu is too large\n", 2040 mem_region->name, mem_region->len); 2041 break; 2042 } 2043 2044 /* reserve space for the header */ 2045 hdr = (void *)buf; 2046 buf += sizeof(*hdr); 2047 buf_len -= sizeof(*hdr); 2048 2049 mt7996_memcpy_fromio(dev, buf, mem_region->start, 2050 mem_region->len); 2051 2052 hdr->start = mem_region->start; 2053 hdr->len = mem_region->len; 2054 2055 if (!mem_region->len) 2056 /* note: the header remains, just with zero length */ 2057 break; 2058 2059 buf += mem_region->len; 2060 buf_len -= mem_region->len; 2061 2062 mem_region++; 2063 } 2064 2065 mutex_unlock(&dev->dump_mutex); 2066 2067 skip_memdump: 2068 mt7996_coredump_submit(dev); 2069 skip_coredump: 2070 queue_work(dev->mt76.wq, &dev->reset_work); 2071 } 2072 2073 void mt7996_reset(struct mt7996_dev *dev) 2074 { 2075 if (!dev->recovery.hw_init_done) 2076 return; 2077 2078 if (dev->recovery.hw_full_reset) 2079 return; 2080 2081 /* wm/wa exception: do full recovery */ 2082 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 2083 dev->recovery.restart = true; 2084 dev_info(dev->mt76.dev, 2085 "%s indicated firmware crash, attempting recovery\n", 2086 wiphy_name(dev->mt76.hw->wiphy)); 2087 2088 mt7996_irq_disable(dev, MT_INT_MCU_CMD); 2089 queue_work(dev->mt76.wq, &dev->dump_work); 2090 return; 2091 } 2092 2093 queue_work(dev->mt76.wq, &dev->reset_work); 2094 wake_up(&dev->reset_wait); 2095 } 2096 2097 void mt7996_mac_update_stats(struct mt7996_phy *phy) 2098 { 2099 struct mt76_mib_stats *mib = &phy->mib; 2100 struct mt7996_dev *dev = phy->dev; 2101 u8 band_idx = phy->mt76->band_idx; 2102 u32 cnt; 2103 int i; 2104 2105 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx)); 2106 mib->fcs_err_cnt += cnt; 2107 2108 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx)); 2109 mib->rx_fifo_full_cnt += cnt; 2110 2111 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx)); 2112 mib->rx_mpdu_cnt += cnt; 2113 2114 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx)); 2115 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 2116 2117 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx)); 2118 mib->rx_vector_mismatch_cnt += cnt; 2119 2120 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx)); 2121 mib->rx_delimiter_fail_cnt += cnt; 2122 2123 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx)); 2124 mib->rx_len_mismatch_cnt += cnt; 2125 2126 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx)); 2127 mib->tx_ampdu_cnt += cnt; 2128 2129 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx)); 2130 mib->tx_stop_q_empty_cnt += cnt; 2131 2132 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx)); 2133 mib->tx_mpdu_attempts_cnt += cnt; 2134 2135 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx)); 2136 mib->tx_mpdu_success_cnt += cnt; 2137 2138 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx)); 2139 mib->rx_ampdu_cnt += cnt; 2140 2141 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx)); 2142 mib->rx_ampdu_bytes_cnt += cnt; 2143 2144 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx)); 2145 mib->rx_ampdu_valid_subframe_cnt += cnt; 2146 2147 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx)); 2148 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 2149 2150 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx)); 2151 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt); 2152 2153 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx)); 2154 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt); 2155 2156 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx)); 2157 mib->rx_pfdrop_cnt += cnt; 2158 2159 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx)); 2160 mib->rx_vec_queue_overflow_drop_cnt += cnt; 2161 2162 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx)); 2163 mib->rx_ba_cnt += cnt; 2164 2165 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx)); 2166 mib->tx_bf_ebf_ppdu_cnt += cnt; 2167 2168 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx)); 2169 mib->tx_bf_ibf_ppdu_cnt += cnt; 2170 2171 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx)); 2172 mib->tx_mu_bf_cnt += cnt; 2173 2174 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx)); 2175 mib->tx_mu_mpdu_cnt += cnt; 2176 2177 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx)); 2178 mib->tx_mu_acked_mpdu_cnt += cnt; 2179 2180 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx)); 2181 mib->tx_su_acked_mpdu_cnt += cnt; 2182 2183 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx)); 2184 mib->tx_bf_rx_fb_ht_cnt += cnt; 2185 mib->tx_bf_rx_fb_all_cnt += cnt; 2186 2187 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx)); 2188 mib->tx_bf_rx_fb_vht_cnt += cnt; 2189 mib->tx_bf_rx_fb_all_cnt += cnt; 2190 2191 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx)); 2192 mib->tx_bf_rx_fb_he_cnt += cnt; 2193 mib->tx_bf_rx_fb_all_cnt += cnt; 2194 2195 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx)); 2196 mib->tx_bf_rx_fb_eht_cnt += cnt; 2197 mib->tx_bf_rx_fb_all_cnt += cnt; 2198 2199 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx)); 2200 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); 2201 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); 2202 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); 2203 2204 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx)); 2205 mib->tx_bf_fb_trig_cnt += cnt; 2206 2207 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx)); 2208 mib->tx_bf_fb_cpl_cnt += cnt; 2209 2210 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 2211 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 2212 mib->tx_amsdu[i] += cnt; 2213 mib->tx_amsdu_cnt += cnt; 2214 } 2215 2216 /* rts count */ 2217 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx)); 2218 mib->rts_cnt += cnt; 2219 2220 /* rts retry count */ 2221 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx)); 2222 mib->rts_retries_cnt += cnt; 2223 2224 /* ba miss count */ 2225 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx)); 2226 mib->ba_miss_cnt += cnt; 2227 2228 /* ack fail count */ 2229 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx)); 2230 mib->ack_fail_cnt += cnt; 2231 2232 for (i = 0; i < 16; i++) { 2233 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2234 phy->mt76->aggr_stats[i] += cnt; 2235 } 2236 } 2237 2238 void mt7996_mac_sta_rc_work(struct work_struct *work) 2239 { 2240 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); 2241 struct ieee80211_sta *sta; 2242 struct ieee80211_vif *vif; 2243 struct mt7996_sta *msta; 2244 u32 changed; 2245 LIST_HEAD(list); 2246 2247 spin_lock_bh(&dev->mt76.sta_poll_lock); 2248 list_splice_init(&dev->sta_rc_list, &list); 2249 2250 while (!list_empty(&list)) { 2251 msta = list_first_entry(&list, struct mt7996_sta, rc_list); 2252 list_del_init(&msta->rc_list); 2253 changed = msta->changed; 2254 msta->changed = 0; 2255 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2256 2257 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 2258 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 2259 2260 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2261 IEEE80211_RC_NSS_CHANGED | 2262 IEEE80211_RC_BW_CHANGED)) 2263 mt7996_mcu_add_rate_ctrl(dev, vif, sta, true); 2264 2265 if (changed & IEEE80211_RC_SMPS_CHANGED) 2266 mt7996_mcu_set_fixed_field(dev, vif, sta, NULL, 2267 RATE_PARAM_MMPS_UPDATE); 2268 2269 spin_lock_bh(&dev->mt76.sta_poll_lock); 2270 } 2271 2272 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2273 } 2274 2275 void mt7996_mac_work(struct work_struct *work) 2276 { 2277 struct mt7996_phy *phy; 2278 struct mt76_phy *mphy; 2279 2280 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2281 mac_work.work); 2282 phy = mphy->priv; 2283 2284 mutex_lock(&mphy->dev->mutex); 2285 2286 mt76_update_survey(mphy); 2287 if (++mphy->mac_work_count == 5) { 2288 mphy->mac_work_count = 0; 2289 2290 mt7996_mac_update_stats(phy); 2291 2292 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE); 2293 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { 2294 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT); 2295 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT); 2296 } 2297 } 2298 2299 mutex_unlock(&mphy->dev->mutex); 2300 2301 mt76_tx_status_check(mphy->dev, false); 2302 2303 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2304 MT7996_WATCHDOG_TIME); 2305 } 2306 2307 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy) 2308 { 2309 struct mt7996_dev *dev = phy->dev; 2310 2311 if (phy->rdd_state & BIT(0)) 2312 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0, 2313 MT_RX_SEL0, 0); 2314 if (phy->rdd_state & BIT(1)) 2315 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1, 2316 MT_RX_SEL0, 0); 2317 } 2318 2319 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain) 2320 { 2321 int err, region; 2322 2323 switch (dev->mt76.region) { 2324 case NL80211_DFS_ETSI: 2325 region = 0; 2326 break; 2327 case NL80211_DFS_JP: 2328 region = 2; 2329 break; 2330 case NL80211_DFS_FCC: 2331 default: 2332 region = 1; 2333 break; 2334 } 2335 2336 err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain, 2337 MT_RX_SEL0, region); 2338 if (err < 0) 2339 return err; 2340 2341 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, 2342 MT_RX_SEL0, 1); 2343 } 2344 2345 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) 2346 { 2347 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2348 struct mt7996_dev *dev = phy->dev; 2349 u8 band_idx = phy->mt76->band_idx; 2350 int err; 2351 2352 /* start CAC */ 2353 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx, 2354 MT_RX_SEL0, 0); 2355 if (err < 0) 2356 return err; 2357 2358 err = mt7996_dfs_start_rdd(dev, band_idx); 2359 if (err < 0) 2360 return err; 2361 2362 phy->rdd_state |= BIT(band_idx); 2363 2364 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2365 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2366 err = mt7996_dfs_start_rdd(dev, 1); 2367 if (err < 0) 2368 return err; 2369 2370 phy->rdd_state |= BIT(1); 2371 } 2372 2373 return 0; 2374 } 2375 2376 static int 2377 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy) 2378 { 2379 const struct mt7996_dfs_radar_spec *radar_specs; 2380 struct mt7996_dev *dev = phy->dev; 2381 int err, i; 2382 2383 switch (dev->mt76.region) { 2384 case NL80211_DFS_FCC: 2385 radar_specs = &fcc_radar_specs; 2386 err = mt7996_mcu_set_fcc5_lpn(dev, 8); 2387 if (err < 0) 2388 return err; 2389 break; 2390 case NL80211_DFS_ETSI: 2391 radar_specs = &etsi_radar_specs; 2392 break; 2393 case NL80211_DFS_JP: 2394 radar_specs = &jp_radar_specs; 2395 break; 2396 default: 2397 return -EINVAL; 2398 } 2399 2400 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2401 err = mt7996_mcu_set_radar_th(dev, i, 2402 &radar_specs->radar_pattern[i]); 2403 if (err < 0) 2404 return err; 2405 } 2406 2407 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2408 } 2409 2410 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) 2411 { 2412 struct mt7996_dev *dev = phy->dev; 2413 enum mt76_dfs_state dfs_state, prev_state; 2414 int err; 2415 2416 prev_state = phy->mt76->dfs_state; 2417 dfs_state = mt76_phy_dfs_state(phy->mt76); 2418 2419 if (prev_state == dfs_state) 2420 return 0; 2421 2422 if (prev_state == MT_DFS_STATE_UNKNOWN) 2423 mt7996_dfs_stop_radar_detector(phy); 2424 2425 if (dfs_state == MT_DFS_STATE_DISABLED) 2426 goto stop; 2427 2428 if (prev_state <= MT_DFS_STATE_DISABLED) { 2429 err = mt7996_dfs_init_radar_specs(phy); 2430 if (err < 0) 2431 return err; 2432 2433 err = mt7996_dfs_start_radar_detector(phy); 2434 if (err < 0) 2435 return err; 2436 2437 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2438 } 2439 2440 if (dfs_state == MT_DFS_STATE_CAC) 2441 return 0; 2442 2443 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, 2444 phy->mt76->band_idx, MT_RX_SEL0, 0); 2445 if (err < 0) { 2446 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2447 return err; 2448 } 2449 2450 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2451 return 0; 2452 2453 stop: 2454 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, 2455 phy->mt76->band_idx, MT_RX_SEL0, 0); 2456 if (err < 0) 2457 return err; 2458 2459 mt7996_dfs_stop_radar_detector(phy); 2460 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2461 2462 return 0; 2463 } 2464 2465 static int 2466 mt7996_mac_twt_duration_align(int duration) 2467 { 2468 return duration << 8; 2469 } 2470 2471 static u64 2472 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev, 2473 struct mt7996_twt_flow *flow) 2474 { 2475 struct mt7996_twt_flow *iter, *iter_next; 2476 u32 duration = flow->duration << 8; 2477 u64 start_tsf; 2478 2479 iter = list_first_entry_or_null(&dev->twt_list, 2480 struct mt7996_twt_flow, list); 2481 if (!iter || !iter->sched || iter->start_tsf > duration) { 2482 /* add flow as first entry in the list */ 2483 list_add(&flow->list, &dev->twt_list); 2484 return 0; 2485 } 2486 2487 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2488 start_tsf = iter->start_tsf + 2489 mt7996_mac_twt_duration_align(iter->duration); 2490 if (list_is_last(&iter->list, &dev->twt_list)) 2491 break; 2492 2493 if (!iter_next->sched || 2494 iter_next->start_tsf > start_tsf + duration) { 2495 list_add(&flow->list, &iter->list); 2496 goto out; 2497 } 2498 } 2499 2500 /* add flow as last entry in the list */ 2501 list_add_tail(&flow->list, &dev->twt_list); 2502 out: 2503 return start_tsf; 2504 } 2505 2506 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2507 { 2508 struct ieee80211_twt_params *twt_agrt; 2509 u64 interval, duration; 2510 u16 mantissa; 2511 u8 exp; 2512 2513 /* only individual agreement supported */ 2514 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2515 return -EOPNOTSUPP; 2516 2517 /* only 256us unit supported */ 2518 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2519 return -EOPNOTSUPP; 2520 2521 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2522 2523 /* explicit agreement not supported */ 2524 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2525 return -EOPNOTSUPP; 2526 2527 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2528 le16_to_cpu(twt_agrt->req_type)); 2529 mantissa = le16_to_cpu(twt_agrt->mantissa); 2530 duration = twt_agrt->min_twt_dur << 8; 2531 2532 interval = (u64)mantissa << exp; 2533 if (interval < duration) 2534 return -EOPNOTSUPP; 2535 2536 return 0; 2537 } 2538 2539 static bool 2540 mt7996_mac_twt_param_equal(struct mt7996_sta *msta, 2541 struct ieee80211_twt_params *twt_agrt) 2542 { 2543 u16 type = le16_to_cpu(twt_agrt->req_type); 2544 u8 exp; 2545 int i; 2546 2547 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2548 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) { 2549 struct mt7996_twt_flow *f; 2550 2551 if (!(msta->twt.flowid_mask & BIT(i))) 2552 continue; 2553 2554 f = &msta->twt.flow[i]; 2555 if (f->duration == twt_agrt->min_twt_dur && 2556 f->mantissa == twt_agrt->mantissa && 2557 f->exp == exp && 2558 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2559 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2560 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2561 return true; 2562 } 2563 2564 return false; 2565 } 2566 2567 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, 2568 struct ieee80211_sta *sta, 2569 struct ieee80211_twt_setup *twt) 2570 { 2571 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2572 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 2573 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2574 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2575 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2576 struct mt7996_dev *dev = mt7996_hw_dev(hw); 2577 struct mt7996_twt_flow *flow; 2578 u8 flowid, table_id, exp; 2579 2580 if (mt7996_mac_check_twt_req(twt)) 2581 goto out; 2582 2583 mutex_lock(&dev->mt76.mutex); 2584 2585 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT) 2586 goto unlock; 2587 2588 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2589 goto unlock; 2590 2591 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) { 2592 setup_cmd = TWT_SETUP_CMD_DICTATE; 2593 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR; 2594 goto unlock; 2595 } 2596 2597 if (mt7996_mac_twt_param_equal(msta, twt_agrt)) 2598 goto unlock; 2599 2600 flowid = ffs(~msta->twt.flowid_mask) - 1; 2601 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 2602 twt_agrt->req_type |= le16_encode_bits(flowid, 2603 IEEE80211_TWT_REQTYPE_FLOWID); 2604 2605 table_id = ffs(~dev->twt.table_mask) - 1; 2606 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2607 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2608 2609 flow = &msta->twt.flow[flowid]; 2610 memset(flow, 0, sizeof(*flow)); 2611 INIT_LIST_HEAD(&flow->list); 2612 flow->wcid = msta->wcid.idx; 2613 flow->table_id = table_id; 2614 flow->id = flowid; 2615 flow->duration = twt_agrt->min_twt_dur; 2616 flow->mantissa = twt_agrt->mantissa; 2617 flow->exp = exp; 2618 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2619 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2620 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2621 2622 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2623 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2624 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2625 u64 flow_tsf, curr_tsf; 2626 u32 rem; 2627 2628 flow->sched = true; 2629 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow); 2630 curr_tsf = __mt7996_get_tsf(hw, msta->vif); 2631 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2632 flow_tsf = curr_tsf + interval - rem; 2633 twt_agrt->twt = cpu_to_le64(flow_tsf); 2634 } else { 2635 list_add_tail(&flow->list, &dev->twt_list); 2636 } 2637 flow->tsf = le64_to_cpu(twt_agrt->twt); 2638 2639 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2640 goto unlock; 2641 2642 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2643 dev->twt.table_mask |= BIT(table_id); 2644 msta->twt.flowid_mask |= BIT(flowid); 2645 dev->twt.n_agrt++; 2646 2647 unlock: 2648 mutex_unlock(&dev->mt76.mutex); 2649 out: 2650 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 2651 twt_agrt->req_type |= 2652 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 2653 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED; 2654 } 2655 2656 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, 2657 struct mt7996_sta *msta, 2658 u8 flowid) 2659 { 2660 struct mt7996_twt_flow *flow; 2661 2662 lockdep_assert_held(&dev->mt76.mutex); 2663 2664 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2665 return; 2666 2667 if (!(msta->twt.flowid_mask & BIT(flowid))) 2668 return; 2669 2670 flow = &msta->twt.flow[flowid]; 2671 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, 2672 MCU_TWT_AGRT_DELETE)) 2673 return; 2674 2675 list_del_init(&flow->list); 2676 msta->twt.flowid_mask &= ~BIT(flowid); 2677 dev->twt.table_mask &= ~BIT(flow->table_id); 2678 dev->twt.n_agrt--; 2679 } 2680