1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/timekeeping.h> 8 #include "coredump.h" 9 #include "mt7996.h" 10 #include "../dma.h" 11 #include "mac.h" 12 #include "mcu.h" 13 14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 15 16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = { 17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 18 .radar_pattern = { 19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 27 }, 28 }; 29 30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = { 31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 32 .radar_pattern = { 33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 38 }, 39 }; 40 41 static const struct mt7996_dfs_radar_spec jp_radar_specs = { 42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 43 .radar_pattern = { 44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 52 }, 53 }; 54 55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, 56 u16 idx, u8 band_idx) 57 { 58 struct mt7996_sta_link *msta_link; 59 struct mt7996_sta *msta; 60 struct mt7996_vif *mvif; 61 struct mt76_wcid *wcid; 62 int i; 63 64 wcid = mt76_wcid_ptr(dev, idx); 65 if (!wcid || !wcid->sta) 66 return NULL; 67 68 if (!mt7996_band_valid(dev, band_idx)) 69 return NULL; 70 71 if (wcid->phy_idx == band_idx) 72 return wcid; 73 74 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 75 msta = msta_link->sta; 76 if (!msta || !msta->vif) 77 return NULL; 78 79 mvif = msta->vif; 80 for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) { 81 struct mt76_vif_link *mlink; 82 83 mlink = rcu_dereference(mvif->mt76.link[i]); 84 if (!mlink) 85 continue; 86 87 if (mlink->band_idx != band_idx) 88 continue; 89 90 msta_link = rcu_dereference(msta->link[i]); 91 break; 92 } 93 94 return &msta_link->wcid; 95 } 96 97 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask) 98 { 99 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 100 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 101 102 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 103 0, 5000); 104 } 105 106 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw) 107 { 108 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 109 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 110 111 return MT_WTBL_LMAC_OFFS(wcid, dw); 112 } 113 114 static void mt7996_mac_sta_poll(struct mt7996_dev *dev) 115 { 116 static const u8 ac_to_tid[] = { 117 [IEEE80211_AC_BE] = 0, 118 [IEEE80211_AC_BK] = 1, 119 [IEEE80211_AC_VI] = 4, 120 [IEEE80211_AC_VO] = 6 121 }; 122 struct mt7996_sta_link *msta_link; 123 struct mt76_vif_link *mlink; 124 struct ieee80211_sta *sta; 125 struct mt7996_sta *msta; 126 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 127 LIST_HEAD(sta_poll_list); 128 struct mt76_wcid *wcid; 129 int i; 130 131 spin_lock_bh(&dev->mt76.sta_poll_lock); 132 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); 133 spin_unlock_bh(&dev->mt76.sta_poll_lock); 134 135 rcu_read_lock(); 136 137 while (true) { 138 bool clear = false; 139 u32 addr, val; 140 u16 idx; 141 s8 rssi[4]; 142 143 spin_lock_bh(&dev->mt76.sta_poll_lock); 144 if (list_empty(&sta_poll_list)) { 145 spin_unlock_bh(&dev->mt76.sta_poll_lock); 146 break; 147 } 148 msta_link = list_first_entry(&sta_poll_list, 149 struct mt7996_sta_link, 150 wcid.poll_list); 151 msta = msta_link->sta; 152 wcid = &msta_link->wcid; 153 list_del_init(&wcid->poll_list); 154 spin_unlock_bh(&dev->mt76.sta_poll_lock); 155 156 idx = wcid->idx; 157 158 /* refresh peer's airtime reporting */ 159 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20); 160 161 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 162 u32 tx_last = msta_link->airtime_ac[i]; 163 u32 rx_last = msta_link->airtime_ac[i + 4]; 164 165 msta_link->airtime_ac[i] = mt76_rr(dev, addr); 166 msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 167 168 tx_time[i] = msta_link->airtime_ac[i] - tx_last; 169 rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last; 170 171 if ((tx_last | rx_last) & BIT(30)) 172 clear = true; 173 174 addr += 8; 175 } 176 177 if (clear) { 178 mt7996_mac_wtbl_update(dev, idx, 179 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 180 memset(msta_link->airtime_ac, 0, 181 sizeof(msta_link->airtime_ac)); 182 } 183 184 if (!wcid->sta) 185 continue; 186 187 sta = container_of((void *)msta, struct ieee80211_sta, 188 drv_priv); 189 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 190 u8 q = mt76_connac_lmac_mapping(i); 191 u32 tx_cur = tx_time[q]; 192 u32 rx_cur = rx_time[q]; 193 u8 tid = ac_to_tid[i]; 194 195 if (!tx_cur && !rx_cur) 196 continue; 197 198 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); 199 } 200 201 /* get signal strength of resp frames (CTS/BA/ACK) */ 202 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34); 203 val = mt76_rr(dev, addr); 204 205 rssi[0] = to_rssi(GENMASK(7, 0), val); 206 rssi[1] = to_rssi(GENMASK(15, 8), val); 207 rssi[2] = to_rssi(GENMASK(23, 16), val); 208 rssi[3] = to_rssi(GENMASK(31, 14), val); 209 210 mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]); 211 if (mlink) { 212 struct mt76_phy *mphy = mt76_vif_link_phy(mlink); 213 214 if (mphy) 215 msta_link->ack_signal = 216 mt76_rx_signal(mphy->antenna_mask, 217 rssi); 218 } 219 220 ewma_avg_signal_add(&msta_link->avg_ack_signal, 221 -msta_link->ack_signal); 222 } 223 224 rcu_read_unlock(); 225 } 226 227 /* The HW does not translate the mac header to 802.3 for mesh point */ 228 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 229 { 230 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 231 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 232 struct mt7996_sta_link *msta_link = (void *)status->wcid; 233 struct mt7996_sta *msta = msta_link->sta; 234 struct ieee80211_bss_conf *link_conf; 235 __le32 *rxd = (__le32 *)skb->data; 236 struct ieee80211_sta *sta; 237 struct ieee80211_vif *vif; 238 struct ieee80211_hdr hdr; 239 u16 frame_control; 240 241 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != 242 MT_RXD3_NORMAL_U2M) 243 return -EINVAL; 244 245 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) 246 return -EINVAL; 247 248 if (!msta || !msta->vif) 249 return -EINVAL; 250 251 sta = wcid_to_sta(status->wcid); 252 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 253 link_conf = rcu_dereference(vif->link_conf[msta_link->wcid.link_id]); 254 if (!link_conf) 255 return -EINVAL; 256 257 /* store the info from RXD and ethhdr to avoid being overridden */ 258 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); 259 hdr.frame_control = cpu_to_le16(frame_control); 260 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); 261 hdr.duration_id = 0; 262 263 ether_addr_copy(hdr.addr1, vif->addr); 264 ether_addr_copy(hdr.addr2, sta->addr); 265 switch (frame_control & (IEEE80211_FCTL_TODS | 266 IEEE80211_FCTL_FROMDS)) { 267 case 0: 268 ether_addr_copy(hdr.addr3, link_conf->bssid); 269 break; 270 case IEEE80211_FCTL_FROMDS: 271 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 272 break; 273 case IEEE80211_FCTL_TODS: 274 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 275 break; 276 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 277 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 278 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 279 break; 280 default: 281 return -EINVAL; 282 } 283 284 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 285 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 286 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 287 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 288 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) 289 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 290 else 291 skb_pull(skb, 2); 292 293 if (ieee80211_has_order(hdr.frame_control)) 294 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], 295 IEEE80211_HT_CTL_LEN); 296 if (ieee80211_is_data_qos(hdr.frame_control)) { 297 __le16 qos_ctrl; 298 299 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); 300 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, 301 IEEE80211_QOS_CTL_LEN); 302 } 303 304 if (ieee80211_has_a4(hdr.frame_control)) 305 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 306 else 307 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 308 309 return 0; 310 } 311 312 static int 313 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, 314 struct mt76_rx_status *status, 315 struct ieee80211_supported_band *sband, 316 __le32 *rxv, u8 *mode) 317 { 318 u32 v0, v2; 319 u8 stbc, gi, bw, dcm, nss; 320 int i, idx; 321 bool cck = false; 322 323 v0 = le32_to_cpu(rxv[0]); 324 v2 = le32_to_cpu(rxv[2]); 325 326 idx = FIELD_GET(MT_PRXV_TX_RATE, v0); 327 i = idx; 328 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; 329 330 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); 331 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); 332 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); 333 dcm = FIELD_GET(MT_PRXV_DCM, v2); 334 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); 335 336 switch (*mode) { 337 case MT_PHY_TYPE_CCK: 338 cck = true; 339 fallthrough; 340 case MT_PHY_TYPE_OFDM: 341 i = mt76_get_rate(&dev->mt76, sband, i, cck); 342 break; 343 case MT_PHY_TYPE_HT_GF: 344 case MT_PHY_TYPE_HT: 345 status->encoding = RX_ENC_HT; 346 if (gi) 347 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 348 if (i > 31) 349 return -EINVAL; 350 break; 351 case MT_PHY_TYPE_VHT: 352 status->nss = nss; 353 status->encoding = RX_ENC_VHT; 354 if (gi) 355 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 356 if (i > 11) 357 return -EINVAL; 358 break; 359 case MT_PHY_TYPE_HE_MU: 360 case MT_PHY_TYPE_HE_SU: 361 case MT_PHY_TYPE_HE_EXT_SU: 362 case MT_PHY_TYPE_HE_TB: 363 status->nss = nss; 364 status->encoding = RX_ENC_HE; 365 i &= GENMASK(3, 0); 366 367 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 368 status->he_gi = gi; 369 370 status->he_dcm = dcm; 371 break; 372 case MT_PHY_TYPE_EHT_SU: 373 case MT_PHY_TYPE_EHT_TRIG: 374 case MT_PHY_TYPE_EHT_MU: 375 status->nss = nss; 376 status->encoding = RX_ENC_EHT; 377 i &= GENMASK(3, 0); 378 379 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) 380 status->eht.gi = gi; 381 break; 382 default: 383 return -EINVAL; 384 } 385 status->rate_idx = i; 386 387 switch (bw) { 388 case IEEE80211_STA_RX_BW_20: 389 break; 390 case IEEE80211_STA_RX_BW_40: 391 if (*mode & MT_PHY_TYPE_HE_EXT_SU && 392 (idx & MT_PRXV_TX_ER_SU_106T)) { 393 status->bw = RATE_INFO_BW_HE_RU; 394 status->he_ru = 395 NL80211_RATE_INFO_HE_RU_ALLOC_106; 396 } else { 397 status->bw = RATE_INFO_BW_40; 398 } 399 break; 400 case IEEE80211_STA_RX_BW_80: 401 status->bw = RATE_INFO_BW_80; 402 break; 403 case IEEE80211_STA_RX_BW_160: 404 status->bw = RATE_INFO_BW_160; 405 break; 406 /* rxv reports bw 320-1 and 320-2 separately */ 407 case IEEE80211_STA_RX_BW_320: 408 case IEEE80211_STA_RX_BW_320 + 1: 409 status->bw = RATE_INFO_BW_320; 410 break; 411 default: 412 return -EINVAL; 413 } 414 415 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 416 if (*mode < MT_PHY_TYPE_HE_SU && gi) 417 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 418 419 return 0; 420 } 421 422 static void 423 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q, 424 struct mt7996_sta *msta, struct sk_buff *skb, 425 u32 info) 426 { 427 struct ieee80211_vif *vif; 428 struct wireless_dev *wdev; 429 430 if (!msta || !msta->vif) 431 return; 432 433 if (!mt76_queue_is_wed_rx(q)) 434 return; 435 436 if (!(info & MT_DMA_INFO_PPE_VLD)) 437 return; 438 439 vif = container_of((void *)msta->vif, struct ieee80211_vif, 440 drv_priv); 441 wdev = ieee80211_vif_to_wdev(vif); 442 skb->dev = wdev->netdev; 443 444 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, 445 FIELD_GET(MT_DMA_PPE_CPU_REASON, info), 446 FIELD_GET(MT_DMA_PPE_ENTRY, info)); 447 } 448 449 static int 450 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, 451 struct sk_buff *skb, u32 *info) 452 { 453 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 454 struct mt76_phy *mphy = &dev->mt76.phy; 455 struct mt7996_phy *phy = &dev->phy; 456 struct ieee80211_supported_band *sband; 457 __le32 *rxd = (__le32 *)skb->data; 458 __le32 *rxv = NULL; 459 u32 rxd0 = le32_to_cpu(rxd[0]); 460 u32 rxd1 = le32_to_cpu(rxd[1]); 461 u32 rxd2 = le32_to_cpu(rxd[2]); 462 u32 rxd3 = le32_to_cpu(rxd[3]); 463 u32 rxd4 = le32_to_cpu(rxd[4]); 464 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; 465 u32 csum_status = *(u32 *)skb->cb; 466 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; 467 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; 468 bool unicast, insert_ccmp_hdr = false; 469 u8 remove_pad, amsdu_info, band_idx; 470 u8 mode = 0, qos_ctl = 0; 471 bool hdr_trans; 472 u16 hdr_gap; 473 u16 seq_ctrl = 0; 474 __le16 fc = 0; 475 int idx; 476 u8 hw_aggr = false; 477 struct mt7996_sta *msta = NULL; 478 479 hw_aggr = status->aggr; 480 memset(status, 0, sizeof(*status)); 481 482 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); 483 mphy = dev->mt76.phys[band_idx]; 484 phy = mphy->priv; 485 status->phy_idx = mphy->band_idx; 486 487 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 488 return -EINVAL; 489 490 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 491 return -EINVAL; 492 493 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 494 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 495 return -EINVAL; 496 497 /* ICV error or CCMP/BIP/WPI MIC error */ 498 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 499 status->flag |= RX_FLAG_ONLY_MONITOR; 500 501 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 502 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 503 status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx); 504 505 if (status->wcid) { 506 struct mt7996_sta_link *msta_link; 507 508 msta_link = container_of(status->wcid, struct mt7996_sta_link, 509 wcid); 510 msta = msta_link->sta; 511 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 512 } 513 514 status->freq = mphy->chandef.chan->center_freq; 515 status->band = mphy->chandef.chan->band; 516 if (status->band == NL80211_BAND_5GHZ) 517 sband = &mphy->sband_5g.sband; 518 else if (status->band == NL80211_BAND_6GHZ) 519 sband = &mphy->sband_6g.sband; 520 else 521 sband = &mphy->sband_2g.sband; 522 523 if (!sband->channels) 524 return -EINVAL; 525 526 if ((rxd3 & csum_mask) == csum_mask && 527 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 528 skb->ip_summed = CHECKSUM_UNNECESSARY; 529 530 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR) 531 status->flag |= RX_FLAG_FAILED_FCS_CRC; 532 533 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 534 status->flag |= RX_FLAG_MMIC_ERROR; 535 536 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 537 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 538 status->flag |= RX_FLAG_DECRYPTED; 539 status->flag |= RX_FLAG_IV_STRIPPED; 540 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 541 } 542 543 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 544 545 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 546 return -EINVAL; 547 548 rxd += 8; 549 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 550 u32 v0 = le32_to_cpu(rxd[0]); 551 u32 v2 = le32_to_cpu(rxd[2]); 552 553 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); 554 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); 555 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); 556 557 rxd += 4; 558 if ((u8 *)rxd - skb->data >= skb->len) 559 return -EINVAL; 560 } 561 562 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 563 u8 *data = (u8 *)rxd; 564 565 if (status->flag & RX_FLAG_DECRYPTED) { 566 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 567 case MT_CIPHER_AES_CCMP: 568 case MT_CIPHER_CCMP_CCX: 569 case MT_CIPHER_CCMP_256: 570 insert_ccmp_hdr = 571 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 572 fallthrough; 573 case MT_CIPHER_TKIP: 574 case MT_CIPHER_TKIP_NO_MIC: 575 case MT_CIPHER_GCMP: 576 case MT_CIPHER_GCMP_256: 577 status->iv[0] = data[5]; 578 status->iv[1] = data[4]; 579 status->iv[2] = data[3]; 580 status->iv[3] = data[2]; 581 status->iv[4] = data[1]; 582 status->iv[5] = data[0]; 583 break; 584 default: 585 break; 586 } 587 } 588 rxd += 4; 589 if ((u8 *)rxd - skb->data >= skb->len) 590 return -EINVAL; 591 } 592 593 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 594 status->timestamp = le32_to_cpu(rxd[0]); 595 status->flag |= RX_FLAG_MACTIME_START; 596 597 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 598 status->flag |= RX_FLAG_AMPDU_DETAILS; 599 600 /* all subframes of an A-MPDU have the same timestamp */ 601 if (phy->rx_ampdu_ts != status->timestamp) { 602 if (!++phy->ampdu_ref) 603 phy->ampdu_ref++; 604 } 605 phy->rx_ampdu_ts = status->timestamp; 606 607 status->ampdu_ref = phy->ampdu_ref; 608 } 609 610 rxd += 4; 611 if ((u8 *)rxd - skb->data >= skb->len) 612 return -EINVAL; 613 } 614 615 /* RXD Group 3 - P-RXV */ 616 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 617 u32 v3; 618 int ret; 619 620 rxv = rxd; 621 rxd += 4; 622 if ((u8 *)rxd - skb->data >= skb->len) 623 return -EINVAL; 624 625 v3 = le32_to_cpu(rxv[3]); 626 627 status->chains = mphy->antenna_mask; 628 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); 629 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); 630 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); 631 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); 632 633 /* RXD Group 5 - C-RXV */ 634 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 635 rxd += 24; 636 if ((u8 *)rxd - skb->data >= skb->len) 637 return -EINVAL; 638 } 639 640 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode); 641 if (ret < 0) 642 return ret; 643 } 644 645 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 646 status->amsdu = !!amsdu_info; 647 if (status->amsdu) { 648 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 649 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 650 } 651 652 /* IEEE 802.11 fragmentation can only be applied to unicast frames. 653 * Hence, drop fragments with multicast/broadcast RA. 654 * This check fixes vulnerabilities, like CVE-2020-26145. 655 */ 656 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) && 657 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M) 658 return -EINVAL; 659 660 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 661 if (hdr_trans && ieee80211_has_morefrags(fc)) { 662 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) 663 return -EINVAL; 664 hdr_trans = false; 665 } else { 666 int pad_start = 0; 667 668 skb_pull(skb, hdr_gap); 669 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) { 670 pad_start = ieee80211_get_hdrlen_from_skb(skb); 671 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 672 /* When header translation failure is indicated, 673 * the hardware will insert an extra 2-byte field 674 * containing the data length after the protocol 675 * type field. This happens either when the LLC-SNAP 676 * pattern did not match, or if a VLAN header was 677 * detected. 678 */ 679 pad_start = 12; 680 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 681 pad_start += 4; 682 else 683 pad_start = 0; 684 } 685 686 if (pad_start) { 687 memmove(skb->data + 2, skb->data, pad_start); 688 skb_pull(skb, 2); 689 } 690 } 691 692 if (!hdr_trans) { 693 struct ieee80211_hdr *hdr; 694 695 if (insert_ccmp_hdr) { 696 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 697 698 mt76_insert_ccmp_hdr(skb, key_id); 699 } 700 701 hdr = mt76_skb_get_hdr(skb); 702 fc = hdr->frame_control; 703 if (ieee80211_is_data_qos(fc)) { 704 u8 *qos = ieee80211_get_qos_ctl(hdr); 705 706 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 707 qos_ctl = *qos; 708 709 /* Mesh DA/SA/Length will be stripped after hardware 710 * de-amsdu, so here needs to clear amsdu present bit 711 * to mark it as a normal mesh frame. 712 */ 713 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu) 714 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 715 } 716 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data); 717 } else { 718 status->flag |= RX_FLAG_8023; 719 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, 720 *info); 721 } 722 723 if (rxv && !(status->flag & RX_FLAG_8023)) { 724 switch (status->encoding) { 725 case RX_ENC_EHT: 726 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode); 727 break; 728 case RX_ENC_HE: 729 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); 730 break; 731 default: 732 break; 733 } 734 } 735 736 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr) 737 return 0; 738 739 status->aggr = unicast && 740 !ieee80211_is_qos_nullfunc(fc); 741 status->qos_ctl = qos_ctl; 742 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 743 744 return 0; 745 } 746 747 static void 748 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, 749 struct sk_buff *skb, struct mt76_wcid *wcid) 750 { 751 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 752 u8 fc_type, fc_stype; 753 u16 ethertype; 754 bool wmm = false; 755 u32 val; 756 757 if (wcid->sta) { 758 struct ieee80211_sta *sta = wcid_to_sta(wcid); 759 760 wmm = sta->wme; 761 } 762 763 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 764 FIELD_PREP(MT_TXD1_TID, tid); 765 766 ethertype = get_unaligned_be16(&skb->data[12]); 767 if (ethertype >= ETH_P_802_3_MIN) 768 val |= MT_TXD1_ETH_802_3; 769 770 txwi[1] |= cpu_to_le32(val); 771 772 fc_type = IEEE80211_FTYPE_DATA >> 2; 773 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 774 775 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 776 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 777 778 txwi[2] |= cpu_to_le32(val); 779 780 if (wcid->amsdu) 781 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU); 782 } 783 784 static void 785 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, 786 struct sk_buff *skb, 787 struct ieee80211_key_conf *key, 788 struct mt76_wcid *wcid) 789 { 790 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 791 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 792 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 793 bool multicast = is_multicast_ether_addr(hdr->addr1); 794 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 795 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl; 796 u16 seqno = le16_to_cpu(sc); 797 u8 fc_type, fc_stype; 798 u32 val; 799 800 if (ieee80211_is_action(fc) && 801 mgmt->u.action.category == WLAN_CATEGORY_BACK && 802 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { 803 if (is_mt7990(&dev->mt76)) 804 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid)); 805 else 806 txwi[7] |= cpu_to_le32(MT_TXD7_MAC_TXD); 807 808 tid = MT_TX_ADDBA; 809 } else if (ieee80211_is_mgmt(hdr->frame_control)) { 810 tid = MT_TX_NORMAL; 811 } 812 813 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 814 FIELD_PREP(MT_TXD1_HDR_INFO, 815 ieee80211_get_hdrlen_from_skb(skb) / 2) | 816 FIELD_PREP(MT_TXD1_TID, tid); 817 818 if (!ieee80211_is_data(fc) || multicast || 819 info->flags & IEEE80211_TX_CTL_USE_MINRATE) 820 val |= MT_TXD1_FIXED_RATE; 821 822 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb)) { 823 val |= MT_TXD1_BIP; 824 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 825 } 826 827 txwi[1] |= cpu_to_le32(val); 828 829 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 830 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 831 832 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 833 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 834 835 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc)) 836 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST); 837 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 838 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID); 839 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 840 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST); 841 else 842 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE); 843 844 txwi[2] |= cpu_to_le32(val); 845 846 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); 847 if (ieee80211_is_beacon(fc)) { 848 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 849 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 850 } 851 852 if (multicast && ieee80211_vif_is_mld(info->control.vif)) { 853 val = MT_TXD3_SN_VALID | 854 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 855 txwi[3] |= cpu_to_le32(val); 856 } 857 858 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 859 if (ieee80211_is_back_req(hdr->frame_control)) { 860 struct ieee80211_bar *bar; 861 862 bar = (struct ieee80211_bar *)skb->data; 863 seqno = le16_to_cpu(bar->start_seq_num); 864 } 865 866 val = MT_TXD3_SN_VALID | 867 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 868 txwi[3] |= cpu_to_le32(val); 869 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); 870 } 871 872 if (ieee80211_vif_is_mld(info->control.vif) && 873 (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))) 874 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 875 876 if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) && 877 ieee80211_vif_is_mld(info->control.vif)) { 878 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 879 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 880 } 881 882 if (!wcid->sta && ieee80211_is_mgmt(fc)) 883 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 884 } 885 886 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, 887 struct sk_buff *skb, struct mt76_wcid *wcid, 888 struct ieee80211_key_conf *key, int pid, 889 enum mt76_txq_id qid, u32 changed) 890 { 891 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 892 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 893 struct ieee80211_vif *vif = info->control.vif; 894 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 895 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 896 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 897 struct mt76_vif_link *mlink = NULL; 898 struct mt7996_vif *mvif; 899 unsigned int link_id; 900 u16 tx_count = 15; 901 u32 val; 902 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | 903 BSS_CHANGED_FILS_DISCOVERY)); 904 bool beacon = !!(changed & (BSS_CHANGED_BEACON | 905 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc); 906 907 if (wcid != &dev->mt76.global_wcid) 908 link_id = wcid->link_id; 909 else 910 link_id = u32_get_bits(info->control.flags, 911 IEEE80211_TX_CTRL_MLO_LINK); 912 913 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; 914 if (mvif) { 915 if (wcid->offchannel) 916 mlink = rcu_dereference(mvif->mt76.offchannel_link); 917 if (!mlink) 918 mlink = rcu_dereference(mvif->mt76.link[link_id]); 919 } 920 921 if (mlink) { 922 omac_idx = mlink->omac_idx; 923 wmm_idx = mlink->wmm_idx; 924 band_idx = mlink->band_idx; 925 } 926 927 if (inband_disc) { 928 p_fmt = MT_TX_TYPE_FW; 929 q_idx = MT_LMAC_ALTX0; 930 } else if (beacon) { 931 p_fmt = MT_TX_TYPE_FW; 932 q_idx = MT_LMAC_BCN0; 933 } else if (qid >= MT_TXQ_PSD) { 934 p_fmt = MT_TX_TYPE_CT; 935 q_idx = MT_LMAC_ALTX0; 936 } else { 937 p_fmt = MT_TX_TYPE_CT; 938 q_idx = wmm_idx * MT7996_MAX_WMM_SETS + 939 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); 940 } 941 942 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 943 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 944 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 945 txwi[0] = cpu_to_le32(val); 946 947 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 948 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 949 950 if (band_idx) 951 val |= FIELD_PREP(MT_TXD1_TGID, band_idx); 952 953 txwi[1] = cpu_to_le32(val); 954 txwi[2] = 0; 955 956 val = MT_TXD3_SW_POWER_MGMT | 957 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 958 if (key) 959 val |= MT_TXD3_PROTECT_FRAME; 960 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 961 val |= MT_TXD3_NO_ACK; 962 963 txwi[3] = cpu_to_le32(val); 964 txwi[4] = 0; 965 966 val = FIELD_PREP(MT_TXD5_PID, pid); 967 if (pid >= MT_PACKET_ID_FIRST) 968 val |= MT_TXD5_TX_STATUS_HOST; 969 txwi[5] = cpu_to_le32(val); 970 971 val = MT_TXD6_DAS | MT_TXD6_VTA; 972 if ((q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) || 973 skb->protocol == cpu_to_be16(ETH_P_PAE)) 974 val |= MT_TXD6_DIS_MAT; 975 976 if (is_mt7996(&dev->mt76)) 977 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1); 978 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control)) 979 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1); 980 981 txwi[6] = cpu_to_le32(val); 982 txwi[7] = 0; 983 984 if (is_8023) 985 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid); 986 else 987 mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid); 988 989 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { 990 bool mcast = ieee80211_is_data(hdr->frame_control) && 991 is_multicast_ether_addr(hdr->addr1); 992 u8 idx = MT7996_BASIC_RATES_TBL; 993 994 if (mlink) { 995 if (mcast && mlink->mcast_rates_idx) 996 idx = mlink->mcast_rates_idx; 997 else if (beacon && mlink->beacon_rates_idx) 998 idx = mlink->beacon_rates_idx; 999 else 1000 idx = mlink->basic_rates_idx; 1001 } 1002 1003 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW; 1004 if (mcast) 1005 val |= MT_TXD6_DIS_MAT; 1006 txwi[6] |= cpu_to_le32(val); 1007 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 1008 } 1009 } 1010 1011 static bool 1012 mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb) 1013 { 1014 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1015 1016 if (ieee80211_is_mgmt(hdr->frame_control)) 1017 return true; 1018 1019 /* for SDO to bypass specific data frame */ 1020 if (!mt7996_has_wa(dev)) { 1021 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) 1022 return true; 1023 1024 if (ieee80211_has_a4(hdr->frame_control) && 1025 !ieee80211_is_data_present(hdr->frame_control)) 1026 return true; 1027 } 1028 1029 return false; 1030 } 1031 1032 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1033 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1034 struct ieee80211_sta *sta, 1035 struct mt76_tx_info *tx_info) 1036 { 1037 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1038 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1039 struct ieee80211_key_conf *key = info->control.hw_key; 1040 struct ieee80211_vif *vif = info->control.vif; 1041 struct mt76_txwi_cache *t; 1042 int id, i, pid, nbuf = tx_info->nbuf - 1; 1043 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1044 __le32 *ptr = (__le32 *)txwi_ptr; 1045 u8 *txwi = (u8 *)txwi_ptr; 1046 1047 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 1048 return -EINVAL; 1049 1050 if (!wcid) 1051 wcid = &dev->mt76.global_wcid; 1052 1053 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 1054 t->skb = tx_info->skb; 1055 1056 id = mt76_token_consume(mdev, &t); 1057 if (id < 0) 1058 return id; 1059 1060 /* Since the rules of HW MLD address translation are not fully 1061 * compatible with 802.11 EAPOL frame, we do the translation by 1062 * software 1063 */ 1064 if (tx_info->skb->protocol == cpu_to_be16(ETH_P_PAE) && sta->mlo) { 1065 struct ieee80211_hdr *hdr = (void *)tx_info->skb->data; 1066 struct ieee80211_bss_conf *link_conf; 1067 struct ieee80211_link_sta *link_sta; 1068 1069 link_conf = rcu_dereference(vif->link_conf[wcid->link_id]); 1070 if (!link_conf) 1071 return -EINVAL; 1072 1073 link_sta = rcu_dereference(sta->link[wcid->link_id]); 1074 if (!link_sta) 1075 return -EINVAL; 1076 1077 dma_sync_single_for_cpu(mdev->dma_dev, tx_info->buf[1].addr, 1078 tx_info->buf[1].len, DMA_TO_DEVICE); 1079 1080 memcpy(hdr->addr1, link_sta->addr, ETH_ALEN); 1081 memcpy(hdr->addr2, link_conf->addr, ETH_ALEN); 1082 if (ieee80211_has_a4(hdr->frame_control)) { 1083 memcpy(hdr->addr3, sta->addr, ETH_ALEN); 1084 memcpy(hdr->addr4, vif->addr, ETH_ALEN); 1085 } else if (ieee80211_has_tods(hdr->frame_control)) { 1086 memcpy(hdr->addr3, sta->addr, ETH_ALEN); 1087 } else if (ieee80211_has_fromds(hdr->frame_control)) { 1088 memcpy(hdr->addr3, vif->addr, ETH_ALEN); 1089 } 1090 1091 dma_sync_single_for_device(mdev->dma_dev, tx_info->buf[1].addr, 1092 tx_info->buf[1].len, DMA_TO_DEVICE); 1093 } 1094 1095 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1096 memset(txwi_ptr, 0, MT_TXD_SIZE); 1097 /* Transmit non qos data by 802.11 header and need to fill txd by host*/ 1098 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1099 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 1100 pid, qid, 0); 1101 1102 /* MT7996 and MT7992 require driver to provide the MAC TXP for AddBA 1103 * req 1104 */ 1105 if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) { 1106 u32 val; 1107 1108 ptr = (__le32 *)(txwi + MT_TXD_SIZE); 1109 memset((void *)ptr, 0, sizeof(struct mt76_connac_fw_txp)); 1110 1111 val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) | 1112 MT_TXP0_TOKEN_ID0_VALID_MASK; 1113 ptr[0] = cpu_to_le32(val); 1114 1115 val = FIELD_PREP(MT_TXP1_TID_ADDBA, 1116 tx_info->skb->priority & 1117 IEEE80211_QOS_CTL_TID_MASK); 1118 ptr[1] = cpu_to_le32(val); 1119 ptr[2] = cpu_to_le32(tx_info->buf[1].addr & 0xFFFFFFFF); 1120 1121 val = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[1].len) | 1122 MT_TXP3_ML0_MASK; 1123 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1124 val |= FIELD_PREP(MT_TXP3_DMA_ADDR_H, 1125 tx_info->buf[1].addr >> 32); 1126 #endif 1127 ptr[3] = cpu_to_le32(val); 1128 } else { 1129 struct mt76_connac_txp_common *txp; 1130 1131 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE); 1132 for (i = 0; i < nbuf; i++) { 1133 u16 len; 1134 1135 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len); 1136 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1137 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H, 1138 tx_info->buf[i + 1].addr >> 32); 1139 #endif 1140 1141 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 1142 txp->fw.len[i] = cpu_to_le16(len); 1143 } 1144 txp->fw.nbuf = nbuf; 1145 1146 txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST); 1147 1148 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1149 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD); 1150 1151 if (!key) 1152 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 1153 1154 if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb)) 1155 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 1156 1157 if (vif) { 1158 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 1159 struct mt76_vif_link *mlink = NULL; 1160 1161 if (wcid->offchannel) 1162 mlink = rcu_dereference(mvif->mt76.offchannel_link); 1163 if (!mlink) 1164 mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]); 1165 1166 txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx; 1167 } 1168 1169 txp->fw.token = cpu_to_le16(id); 1170 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff); 1171 } 1172 1173 tx_info->skb = NULL; 1174 1175 /* pass partial skb header to fw */ 1176 tx_info->buf[1].len = MT_CT_PARSE_LEN; 1177 tx_info->buf[1].skip_unmap = true; 1178 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 1179 1180 return 0; 1181 } 1182 1183 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 1184 { 1185 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 1186 __le32 *txwi = ptr; 1187 u32 val; 1188 1189 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 1190 1191 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 1192 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 1193 txwi[0] = cpu_to_le32(val); 1194 1195 val = BIT(31) | 1196 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 1197 txwi[1] = cpu_to_le32(val); 1198 1199 txp->token = cpu_to_le16(token_id); 1200 txp->nbuf = 1; 1201 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 1202 1203 return MT_TXD_SIZE + sizeof(*txp); 1204 } 1205 1206 static void 1207 mt7996_tx_check_aggr(struct ieee80211_link_sta *link_sta, 1208 struct mt76_wcid *wcid, struct sk_buff *skb) 1209 { 1210 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1211 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1212 u16 fc, tid; 1213 1214 if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) 1215 return; 1216 1217 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1218 if (tid >= 6) /* skip VO queue */ 1219 return; 1220 1221 if (is_8023) { 1222 fc = IEEE80211_FTYPE_DATA | 1223 (link_sta->sta->wme ? IEEE80211_STYPE_QOS_DATA 1224 : IEEE80211_STYPE_DATA); 1225 } else { 1226 /* No need to get precise TID for Action/Management Frame, 1227 * since it will not meet the following Frame Control 1228 * condition anyway. 1229 */ 1230 1231 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1232 1233 fc = le16_to_cpu(hdr->frame_control) & 1234 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); 1235 } 1236 1237 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1238 return; 1239 1240 if (!test_and_set_bit(tid, &wcid->ampdu_state)) 1241 ieee80211_start_tx_ba_session(link_sta->sta, tid, 0); 1242 } 1243 1244 static void 1245 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, 1246 struct ieee80211_link_sta *link_sta, 1247 struct mt76_wcid *wcid, struct list_head *free_list) 1248 { 1249 struct mt76_dev *mdev = &dev->mt76; 1250 __le32 *txwi; 1251 u16 wcid_idx; 1252 1253 mt76_connac_txp_skb_unmap(mdev, t); 1254 if (!t->skb) 1255 goto out; 1256 1257 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 1258 if (link_sta) { 1259 wcid_idx = wcid->idx; 1260 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) { 1261 struct mt7996_sta *msta; 1262 1263 /* AMPDU state is stored in the primary link */ 1264 msta = (void *)link_sta->sta->drv_priv; 1265 mt7996_tx_check_aggr(link_sta, &msta->deflink.wcid, 1266 t->skb); 1267 } 1268 } else { 1269 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); 1270 } 1271 1272 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 1273 1274 out: 1275 t->skb = NULL; 1276 mt76_put_txwi(mdev, t); 1277 } 1278 1279 static void 1280 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) 1281 { 1282 __le32 *tx_free = (__le32 *)data, *cur_info; 1283 struct mt76_dev *mdev = &dev->mt76; 1284 struct mt76_phy *phy2 = mdev->phys[MT_BAND1]; 1285 struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; 1286 struct ieee80211_link_sta *link_sta = NULL; 1287 struct mt76_txwi_cache *txwi; 1288 struct mt76_wcid *wcid = NULL; 1289 LIST_HEAD(free_list); 1290 struct sk_buff *skb, *tmp; 1291 void *end = data + len; 1292 bool wake = false; 1293 u16 total, count = 0; 1294 u8 ver; 1295 1296 /* clean DMA queues and unmap buffers first */ 1297 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1298 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1299 if (phy2) { 1300 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false); 1301 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false); 1302 } 1303 if (phy3) { 1304 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false); 1305 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); 1306 } 1307 1308 ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER); 1309 if (WARN_ON_ONCE(ver < 5)) 1310 return; 1311 1312 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); 1313 for (cur_info = &tx_free[2]; count < total; cur_info++) { 1314 u32 msdu, info; 1315 u8 i; 1316 1317 if (WARN_ON_ONCE((void *)cur_info >= end)) 1318 return; 1319 /* 1'b1: new wcid pair. 1320 * 1'b0: msdu_id with the same 'wcid pair' as above. 1321 */ 1322 info = le32_to_cpu(*cur_info); 1323 if (info & MT_TXFREE_INFO_PAIR) { 1324 struct ieee80211_sta *sta; 1325 unsigned long valid_links; 1326 struct mt7996_sta *msta; 1327 unsigned int id; 1328 u16 idx; 1329 1330 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); 1331 wcid = mt76_wcid_ptr(dev, idx); 1332 sta = wcid_to_sta(wcid); 1333 if (!sta) { 1334 link_sta = NULL; 1335 goto next; 1336 } 1337 1338 link_sta = rcu_dereference(sta->link[wcid->link_id]); 1339 if (!link_sta) 1340 goto next; 1341 1342 msta = (struct mt7996_sta *)sta->drv_priv; 1343 valid_links = sta->valid_links ?: BIT(0); 1344 1345 /* For MLD STA, add all link's wcid to sta_poll_list */ 1346 for_each_set_bit(id, &valid_links, 1347 IEEE80211_MLD_MAX_NUM_LINKS) { 1348 struct mt7996_sta_link *msta_link; 1349 1350 msta_link = rcu_dereference(msta->link[id]); 1351 if (!msta_link) 1352 continue; 1353 1354 mt76_wcid_add_poll(&dev->mt76, 1355 &msta_link->wcid); 1356 } 1357 next: 1358 /* ver 7 has a new DW with pair = 1, skip it */ 1359 if (ver == 7 && ((void *)(cur_info + 1) < end) && 1360 (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR)) 1361 cur_info++; 1362 continue; 1363 } else if (info & MT_TXFREE_INFO_HEADER) { 1364 u32 tx_retries = 0, tx_failed = 0; 1365 1366 if (!wcid) 1367 continue; 1368 1369 tx_retries = 1370 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; 1371 tx_failed = tx_retries + 1372 !!FIELD_GET(MT_TXFREE_INFO_STAT, info); 1373 1374 wcid->stats.tx_retries += tx_retries; 1375 wcid->stats.tx_failed += tx_failed; 1376 continue; 1377 } 1378 1379 for (i = 0; i < 2; i++) { 1380 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; 1381 if (msdu == MT_TXFREE_INFO_MSDU_ID) 1382 continue; 1383 1384 count++; 1385 txwi = mt76_token_release(mdev, msdu, &wake); 1386 if (!txwi) 1387 continue; 1388 1389 mt7996_txwi_free(dev, txwi, link_sta, wcid, 1390 &free_list); 1391 } 1392 } 1393 1394 mt7996_mac_sta_poll(dev); 1395 1396 if (wake) 1397 mt76_set_tx_blocked(&dev->mt76, false); 1398 1399 mt76_worker_schedule(&dev->mt76.tx_worker); 1400 1401 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1402 skb_list_del_init(skb); 1403 napi_consume_skb(skb, 1); 1404 } 1405 } 1406 1407 static bool 1408 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, 1409 int pid, __le32 *txs_data) 1410 { 1411 struct mt76_sta_stats *stats = &wcid->stats; 1412 struct ieee80211_supported_band *sband; 1413 struct mt76_dev *mdev = &dev->mt76; 1414 struct mt76_phy *mphy; 1415 struct ieee80211_tx_info *info; 1416 struct sk_buff_head list; 1417 struct rate_info rate = {}; 1418 struct sk_buff *skb = NULL; 1419 bool cck = false; 1420 u32 txrate, txs, mode, stbc; 1421 1422 txs = le32_to_cpu(txs_data[0]); 1423 1424 mt76_tx_status_lock(mdev, &list); 1425 1426 /* only report MPDU TXS */ 1427 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) { 1428 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); 1429 if (skb) { 1430 info = IEEE80211_SKB_CB(skb); 1431 if (!(txs & MT_TXS0_ACK_ERROR_MASK)) 1432 info->flags |= IEEE80211_TX_STAT_ACK; 1433 1434 info->status.ampdu_len = 1; 1435 info->status.ampdu_ack_len = 1436 !!(info->flags & IEEE80211_TX_STAT_ACK); 1437 1438 info->status.rates[0].idx = -1; 1439 } 1440 } 1441 1442 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) { 1443 struct ieee80211_sta *sta; 1444 u8 tid; 1445 1446 sta = wcid_to_sta(wcid); 1447 tid = FIELD_GET(MT_TXS0_TID, txs); 1448 ieee80211_refresh_tx_agg_session_timer(sta, tid); 1449 } 1450 1451 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1452 1453 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); 1454 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; 1455 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); 1456 1457 if (stbc && rate.nss > 1) 1458 rate.nss >>= 1; 1459 1460 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) 1461 stats->tx_nss[rate.nss - 1]++; 1462 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) 1463 stats->tx_mcs[rate.mcs]++; 1464 1465 mode = FIELD_GET(MT_TX_RATE_MODE, txrate); 1466 switch (mode) { 1467 case MT_PHY_TYPE_CCK: 1468 cck = true; 1469 fallthrough; 1470 case MT_PHY_TYPE_OFDM: 1471 mphy = mt76_dev_phy(mdev, wcid->phy_idx); 1472 1473 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1474 sband = &mphy->sband_5g.sband; 1475 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 1476 sband = &mphy->sband_6g.sband; 1477 else 1478 sband = &mphy->sband_2g.sband; 1479 1480 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); 1481 rate.legacy = sband->bitrates[rate.mcs].bitrate; 1482 break; 1483 case MT_PHY_TYPE_HT: 1484 case MT_PHY_TYPE_HT_GF: 1485 if (rate.mcs > 31) 1486 goto out; 1487 1488 rate.flags = RATE_INFO_FLAGS_MCS; 1489 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1490 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1491 break; 1492 case MT_PHY_TYPE_VHT: 1493 if (rate.mcs > 9) 1494 goto out; 1495 1496 rate.flags = RATE_INFO_FLAGS_VHT_MCS; 1497 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1498 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1499 break; 1500 case MT_PHY_TYPE_HE_SU: 1501 case MT_PHY_TYPE_HE_EXT_SU: 1502 case MT_PHY_TYPE_HE_TB: 1503 case MT_PHY_TYPE_HE_MU: 1504 if (rate.mcs > 11) 1505 goto out; 1506 1507 rate.he_gi = wcid->rate.he_gi; 1508 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); 1509 rate.flags = RATE_INFO_FLAGS_HE_MCS; 1510 break; 1511 case MT_PHY_TYPE_EHT_SU: 1512 case MT_PHY_TYPE_EHT_TRIG: 1513 case MT_PHY_TYPE_EHT_MU: 1514 if (rate.mcs > 13) 1515 goto out; 1516 1517 rate.eht_gi = wcid->rate.eht_gi; 1518 rate.flags = RATE_INFO_FLAGS_EHT_MCS; 1519 break; 1520 default: 1521 goto out; 1522 } 1523 1524 stats->tx_mode[mode]++; 1525 1526 switch (FIELD_GET(MT_TXS0_BW, txs)) { 1527 case IEEE80211_STA_RX_BW_320: 1528 rate.bw = RATE_INFO_BW_320; 1529 stats->tx_bw[4]++; 1530 break; 1531 case IEEE80211_STA_RX_BW_160: 1532 rate.bw = RATE_INFO_BW_160; 1533 stats->tx_bw[3]++; 1534 break; 1535 case IEEE80211_STA_RX_BW_80: 1536 rate.bw = RATE_INFO_BW_80; 1537 stats->tx_bw[2]++; 1538 break; 1539 case IEEE80211_STA_RX_BW_40: 1540 rate.bw = RATE_INFO_BW_40; 1541 stats->tx_bw[1]++; 1542 break; 1543 default: 1544 rate.bw = RATE_INFO_BW_20; 1545 stats->tx_bw[0]++; 1546 break; 1547 } 1548 wcid->rate = rate; 1549 1550 out: 1551 if (skb) 1552 mt76_tx_status_skb_done(mdev, skb, &list); 1553 mt76_tx_status_unlock(mdev, &list); 1554 1555 return !!skb; 1556 } 1557 1558 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) 1559 { 1560 struct mt7996_sta_link *msta_link; 1561 struct mt76_wcid *wcid; 1562 __le32 *txs_data = data; 1563 u16 wcidx; 1564 u8 pid; 1565 1566 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1567 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1568 1569 if (pid < MT_PACKET_ID_NO_SKB) 1570 return; 1571 1572 rcu_read_lock(); 1573 1574 wcid = mt76_wcid_ptr(dev, wcidx); 1575 if (!wcid) 1576 goto out; 1577 1578 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data); 1579 1580 if (!wcid->sta) 1581 goto out; 1582 1583 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 1584 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 1585 1586 out: 1587 rcu_read_unlock(); 1588 } 1589 1590 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) 1591 { 1592 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1593 __le32 *rxd = (__le32 *)data; 1594 __le32 *end = (__le32 *)&rxd[len / 4]; 1595 enum rx_pkt_type type; 1596 1597 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1598 if (type != PKT_TYPE_NORMAL) { 1599 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1600 1601 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1602 MT_RXD0_SW_PKT_TYPE_FRAME)) 1603 return true; 1604 } 1605 1606 switch (type) { 1607 case PKT_TYPE_TXRX_NOTIFY: 1608 mt7996_mac_tx_free(dev, data, len); 1609 return false; 1610 case PKT_TYPE_TXS: 1611 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1612 mt7996_mac_add_txs(dev, rxd); 1613 return false; 1614 case PKT_TYPE_RX_FW_MONITOR: 1615 mt7996_debugfs_rx_fw_monitor(dev, data, len); 1616 return false; 1617 default: 1618 return true; 1619 } 1620 } 1621 1622 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1623 struct sk_buff *skb, u32 *info) 1624 { 1625 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1626 __le32 *rxd = (__le32 *)skb->data; 1627 __le32 *end = (__le32 *)&skb->data[skb->len]; 1628 enum rx_pkt_type type; 1629 1630 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1631 if (type != PKT_TYPE_NORMAL) { 1632 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1633 1634 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1635 MT_RXD0_SW_PKT_TYPE_FRAME)) 1636 type = PKT_TYPE_NORMAL; 1637 } 1638 1639 switch (type) { 1640 case PKT_TYPE_TXRX_NOTIFY: 1641 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) && 1642 q == MT_RXQ_TXFREE_BAND2) { 1643 dev_kfree_skb(skb); 1644 break; 1645 } 1646 1647 mt7996_mac_tx_free(dev, skb->data, skb->len); 1648 napi_consume_skb(skb, 1); 1649 break; 1650 case PKT_TYPE_RX_EVENT: 1651 mt7996_mcu_rx_event(dev, skb); 1652 break; 1653 case PKT_TYPE_TXS: 1654 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1655 mt7996_mac_add_txs(dev, rxd); 1656 dev_kfree_skb(skb); 1657 break; 1658 case PKT_TYPE_RX_FW_MONITOR: 1659 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1660 dev_kfree_skb(skb); 1661 break; 1662 case PKT_TYPE_NORMAL: 1663 if (!mt7996_mac_fill_rx(dev, q, skb, info)) { 1664 mt76_rx(&dev->mt76, q, skb); 1665 return; 1666 } 1667 fallthrough; 1668 default: 1669 dev_kfree_skb(skb); 1670 break; 1671 } 1672 } 1673 1674 static struct mt7996_msdu_page * 1675 mt7996_msdu_page_get_from_cache(struct mt7996_dev *dev) 1676 { 1677 struct mt7996_msdu_page *p = NULL; 1678 1679 spin_lock(&dev->wed_rro.lock); 1680 1681 if (!list_empty(&dev->wed_rro.page_cache)) { 1682 p = list_first_entry(&dev->wed_rro.page_cache, 1683 struct mt7996_msdu_page, list); 1684 if (p) 1685 list_del(&p->list); 1686 } 1687 1688 spin_unlock(&dev->wed_rro.lock); 1689 1690 return p; 1691 } 1692 1693 static struct mt7996_msdu_page *mt7996_msdu_page_get(struct mt7996_dev *dev) 1694 { 1695 struct mt7996_msdu_page *p; 1696 1697 p = mt7996_msdu_page_get_from_cache(dev); 1698 if (!p) { 1699 p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC); 1700 if (p) 1701 INIT_LIST_HEAD(&p->list); 1702 } 1703 1704 return p; 1705 } 1706 1707 static void mt7996_msdu_page_put_to_cache(struct mt7996_dev *dev, 1708 struct mt7996_msdu_page *p) 1709 { 1710 if (p->buf) { 1711 mt76_put_page_pool_buf(p->buf, false); 1712 p->buf = NULL; 1713 } 1714 1715 spin_lock(&dev->wed_rro.lock); 1716 list_add(&p->list, &dev->wed_rro.page_cache); 1717 spin_unlock(&dev->wed_rro.lock); 1718 } 1719 1720 static void mt7996_msdu_page_free_cache(struct mt7996_dev *dev) 1721 { 1722 while (true) { 1723 struct mt7996_msdu_page *p; 1724 1725 p = mt7996_msdu_page_get_from_cache(dev); 1726 if (!p) 1727 break; 1728 1729 if (p->buf) 1730 mt76_put_page_pool_buf(p->buf, false); 1731 1732 kfree(p); 1733 } 1734 } 1735 1736 static u32 mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr) 1737 { 1738 u32 val = 0; 1739 int i = 0; 1740 1741 while (dma_addr) { 1742 val += (u32)((dma_addr & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE; 1743 dma_addr >>= 8; 1744 i += 13; 1745 } 1746 1747 return val % MT7996_RRO_MSDU_PG_HASH_SIZE; 1748 } 1749 1750 static struct mt7996_msdu_page * 1751 mt7996_rro_msdu_page_get(struct mt7996_dev *dev, dma_addr_t dma_addr) 1752 { 1753 u32 hash = mt7996_msdu_page_hash_from_addr(dma_addr); 1754 struct mt7996_msdu_page *p, *tmp, *addr = NULL; 1755 1756 spin_lock(&dev->wed_rro.lock); 1757 1758 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[hash], 1759 list) { 1760 if (p->dma_addr == dma_addr) { 1761 list_del(&p->list); 1762 addr = p; 1763 break; 1764 } 1765 } 1766 1767 spin_unlock(&dev->wed_rro.lock); 1768 1769 return addr; 1770 } 1771 1772 static void mt7996_rx_token_put(struct mt7996_dev *dev) 1773 { 1774 int i; 1775 1776 for (i = 0; i < dev->mt76.rx_token_size; i++) { 1777 struct mt76_txwi_cache *t; 1778 1779 t = mt76_rx_token_release(&dev->mt76, i); 1780 if (!t || !t->ptr) 1781 continue; 1782 1783 mt76_put_page_pool_buf(t->ptr, false); 1784 t->dma_addr = 0; 1785 t->ptr = NULL; 1786 1787 mt76_put_rxwi(&dev->mt76, t); 1788 } 1789 } 1790 1791 void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev) 1792 { 1793 struct mt7996_msdu_page *p, *tmp; 1794 int i; 1795 1796 local_bh_disable(); 1797 1798 for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++) { 1799 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[i], 1800 list) { 1801 list_del_init(&p->list); 1802 if (p->buf) 1803 mt76_put_page_pool_buf(p->buf, false); 1804 kfree(p); 1805 } 1806 } 1807 mt7996_msdu_page_free_cache(dev); 1808 1809 local_bh_enable(); 1810 1811 mt7996_rx_token_put(dev); 1812 } 1813 1814 int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q, 1815 dma_addr_t dma_addr, void *data) 1816 { 1817 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1818 struct mt7996_msdu_page_info *pinfo = data; 1819 struct mt7996_msdu_page *p; 1820 u32 hash; 1821 1822 pinfo->data |= cpu_to_le32(FIELD_PREP(MSDU_PAGE_INFO_OWNER_MASK, 1)); 1823 p = mt7996_msdu_page_get(dev); 1824 if (!p) 1825 return -ENOMEM; 1826 1827 p->buf = data; 1828 p->dma_addr = dma_addr; 1829 p->q = q; 1830 1831 hash = mt7996_msdu_page_hash_from_addr(dma_addr); 1832 1833 spin_lock(&dev->wed_rro.lock); 1834 list_add_tail(&p->list, &dev->wed_rro.page_map[hash]); 1835 spin_unlock(&dev->wed_rro.lock); 1836 1837 return 0; 1838 } 1839 1840 static struct mt7996_wed_rro_addr * 1841 mt7996_rro_addr_elem_get(struct mt7996_dev *dev, u16 session_id, u16 seq_num) 1842 { 1843 u32 idx = 0; 1844 void *addr; 1845 1846 if (session_id == MT7996_RRO_MAX_SESSION) { 1847 addr = dev->wed_rro.session.ptr; 1848 } else { 1849 idx = session_id / MT7996_RRO_BA_BITMAP_SESSION_SIZE; 1850 addr = dev->wed_rro.addr_elem[idx].ptr; 1851 1852 idx = session_id % MT7996_RRO_BA_BITMAP_SESSION_SIZE; 1853 idx = idx * MT7996_RRO_WINDOW_MAX_LEN; 1854 } 1855 idx += seq_num % MT7996_RRO_WINDOW_MAX_LEN; 1856 1857 return addr + idx * sizeof(struct mt7996_wed_rro_addr); 1858 } 1859 1860 #define MT996_RRO_SN_MASK GENMASK(11, 0) 1861 1862 void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data) 1863 { 1864 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1865 struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data; 1866 u32 cmd_data0 = le32_to_cpu(cmd->data0); 1867 u32 cmd_data1 = le32_to_cpu(cmd->data1); 1868 u8 ind_reason = FIELD_GET(RRO_IND_DATA0_IND_REASON_MASK, cmd_data0); 1869 u16 start_seq = FIELD_GET(RRO_IND_DATA0_START_SEQ_MASK, cmd_data0); 1870 u16 seq_id = FIELD_GET(RRO_IND_DATA0_SEQ_ID_MASK, cmd_data0); 1871 u16 ind_count = FIELD_GET(RRO_IND_DATA1_IND_COUNT_MASK, cmd_data1); 1872 struct mt7996_msdu_page_info *pinfo = NULL; 1873 struct mt7996_msdu_page *p = NULL; 1874 int i, seq_num = 0; 1875 1876 for (i = 0; i < ind_count; i++) { 1877 struct mt7996_wed_rro_addr *e; 1878 struct mt76_rx_status *status; 1879 struct mt7996_rro_hif *rxd; 1880 int j, len, qid, data_len; 1881 struct mt76_txwi_cache *t; 1882 dma_addr_t dma_addr = 0; 1883 u16 rx_token_id, count; 1884 struct mt76_queue *q; 1885 struct sk_buff *skb; 1886 u32 info = 0, data; 1887 u8 signature; 1888 void *buf; 1889 bool ls; 1890 1891 seq_num = FIELD_GET(MT996_RRO_SN_MASK, start_seq + i); 1892 e = mt7996_rro_addr_elem_get(dev, seq_id, seq_num); 1893 data = le32_to_cpu(e->data); 1894 signature = FIELD_GET(WED_RRO_ADDR_SIGNATURE_MASK, data); 1895 if (signature != (seq_num / MT7996_RRO_WINDOW_MAX_LEN)) { 1896 u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 1897 0xff); 1898 1899 e->data |= cpu_to_le32(val); 1900 goto update_ack_seq_num; 1901 } 1902 1903 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1904 dma_addr = FIELD_GET(WED_RRO_ADDR_HEAD_HIGH_MASK, data); 1905 dma_addr <<= 32; 1906 #endif 1907 dma_addr |= le32_to_cpu(e->head_low); 1908 1909 count = FIELD_GET(WED_RRO_ADDR_COUNT_MASK, data); 1910 for (j = 0; j < count; j++) { 1911 if (!p) { 1912 p = mt7996_rro_msdu_page_get(dev, dma_addr); 1913 if (!p) 1914 continue; 1915 1916 dma_sync_single_for_cpu(mdev->dma_dev, p->dma_addr, 1917 SKB_WITH_OVERHEAD(p->q->buf_size), 1918 page_pool_get_dma_dir(p->q->page_pool)); 1919 pinfo = (struct mt7996_msdu_page_info *)p->buf; 1920 } 1921 1922 rxd = &pinfo->rxd[j % MT7996_MAX_HIF_RXD_IN_PG]; 1923 len = FIELD_GET(RRO_HIF_DATA1_SDL_MASK, 1924 le32_to_cpu(rxd->data1)); 1925 1926 rx_token_id = FIELD_GET(RRO_HIF_DATA4_RX_TOKEN_ID_MASK, 1927 le32_to_cpu(rxd->data4)); 1928 t = mt76_rx_token_release(mdev, rx_token_id); 1929 if (!t) 1930 goto next_page; 1931 1932 qid = t->qid; 1933 buf = t->ptr; 1934 q = &mdev->q_rx[qid]; 1935 dma_sync_single_for_cpu(mdev->dma_dev, t->dma_addr, 1936 SKB_WITH_OVERHEAD(q->buf_size), 1937 page_pool_get_dma_dir(q->page_pool)); 1938 1939 t->dma_addr = 0; 1940 t->ptr = NULL; 1941 mt76_put_rxwi(mdev, t); 1942 if (!buf) 1943 goto next_page; 1944 1945 if (q->rx_head) 1946 data_len = q->buf_size; 1947 else 1948 data_len = SKB_WITH_OVERHEAD(q->buf_size); 1949 1950 if (data_len < len + q->buf_offset) { 1951 dev_kfree_skb(q->rx_head); 1952 mt76_put_page_pool_buf(buf, false); 1953 q->rx_head = NULL; 1954 goto next_page; 1955 } 1956 1957 ls = FIELD_GET(RRO_HIF_DATA1_LS_MASK, 1958 le32_to_cpu(rxd->data1)); 1959 if (q->rx_head) { 1960 /* TODO: Take into account non-linear skb. */ 1961 mt76_put_page_pool_buf(buf, false); 1962 if (ls) { 1963 dev_kfree_skb(q->rx_head); 1964 q->rx_head = NULL; 1965 } 1966 goto next_page; 1967 } 1968 1969 if (ls && !mt7996_rx_check(mdev, buf, len)) 1970 goto next_page; 1971 1972 skb = build_skb(buf, q->buf_size); 1973 if (!skb) 1974 goto next_page; 1975 1976 skb_reserve(skb, q->buf_offset); 1977 skb_mark_for_recycle(skb); 1978 __skb_put(skb, len); 1979 1980 if (ind_reason == 1 || ind_reason == 2) { 1981 dev_kfree_skb(skb); 1982 goto next_page; 1983 } 1984 1985 if (!ls) { 1986 q->rx_head = skb; 1987 goto next_page; 1988 } 1989 1990 status = (struct mt76_rx_status *)skb->cb; 1991 if (seq_id != MT7996_RRO_MAX_SESSION) 1992 status->aggr = true; 1993 1994 mt7996_queue_rx_skb(mdev, qid, skb, &info); 1995 next_page: 1996 if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) { 1997 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1998 dma_addr = 1999 FIELD_GET(MSDU_PAGE_INFO_PG_HIGH_MASK, 2000 le32_to_cpu(pinfo->data)); 2001 dma_addr <<= 32; 2002 dma_addr |= le32_to_cpu(pinfo->pg_low); 2003 #else 2004 dma_addr = le32_to_cpu(pinfo->pg_low); 2005 #endif 2006 mt7996_msdu_page_put_to_cache(dev, p); 2007 p = NULL; 2008 } 2009 } 2010 2011 update_ack_seq_num: 2012 if ((i + 1) % 4 == 0) 2013 mt76_wr(dev, MT_RRO_ACK_SN_CTRL, 2014 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, 2015 seq_id) | 2016 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, 2017 seq_num)); 2018 if (p) { 2019 mt7996_msdu_page_put_to_cache(dev, p); 2020 p = NULL; 2021 } 2022 } 2023 2024 /* Update ack_seq_num for remaining addr_elem */ 2025 if (i % 4) 2026 mt76_wr(dev, MT_RRO_ACK_SN_CTRL, 2027 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, seq_id) | 2028 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, seq_num)); 2029 } 2030 2031 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy) 2032 { 2033 struct mt7996_dev *dev = phy->dev; 2034 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx); 2035 2036 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN); 2037 mt76_set(dev, reg, BIT(11) | BIT(9)); 2038 } 2039 2040 void mt7996_mac_reset_counters(struct mt7996_phy *phy) 2041 { 2042 struct mt7996_dev *dev = phy->dev; 2043 u8 band_idx = phy->mt76->band_idx; 2044 int i; 2045 2046 for (i = 0; i < 16; i++) 2047 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2048 2049 phy->mt76->survey_time = ktime_get_boottime(); 2050 2051 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 2052 2053 /* reset airtime counters */ 2054 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx), 2055 MT_WF_RMAC_MIB_RXTIME_CLR); 2056 2057 mt7996_mcu_get_chan_mib_info(phy, true); 2058 } 2059 2060 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy) 2061 { 2062 s16 coverage_class = phy->coverage_class; 2063 struct mt7996_dev *dev = phy->dev; 2064 struct mt7996_phy *phy2 = mt7996_phy2(dev); 2065 struct mt7996_phy *phy3 = mt7996_phy3(dev); 2066 u32 reg_offset; 2067 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 2068 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 2069 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 2070 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 2071 u8 band_idx = phy->mt76->band_idx; 2072 int offset; 2073 2074 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 2075 return; 2076 2077 if (phy2) 2078 coverage_class = max_t(s16, dev->phy.coverage_class, 2079 phy2->coverage_class); 2080 2081 if (phy3) 2082 coverage_class = max_t(s16, coverage_class, 2083 phy3->coverage_class); 2084 2085 offset = 3 * coverage_class; 2086 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 2087 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 2088 2089 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset); 2090 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset); 2091 } 2092 2093 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band) 2094 { 2095 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band), 2096 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY | 2097 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR); 2098 2099 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band), 2100 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5)); 2101 } 2102 2103 static u8 2104 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx) 2105 { 2106 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 2107 struct mt7996_dev *dev = phy->dev; 2108 u32 val, sum = 0, n = 0; 2109 int ant, i; 2110 2111 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) { 2112 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant); 2113 2114 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 2115 val = mt76_rr(dev, reg); 2116 sum += val * nf_power[i]; 2117 n += val; 2118 } 2119 } 2120 2121 return n ? sum / n : 0; 2122 } 2123 2124 void mt7996_update_channel(struct mt76_phy *mphy) 2125 { 2126 struct mt7996_phy *phy = mphy->priv; 2127 struct mt76_channel_state *state = mphy->chan_state; 2128 int nf; 2129 2130 mt7996_mcu_get_chan_mib_info(phy, false); 2131 2132 nf = mt7996_phy_get_nf(phy, mphy->band_idx); 2133 if (!phy->noise) 2134 phy->noise = nf << 4; 2135 else if (nf) 2136 phy->noise += nf - (phy->noise >> 4); 2137 2138 state->noise = -(phy->noise >> 4); 2139 } 2140 2141 static bool 2142 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state) 2143 { 2144 bool ret; 2145 2146 ret = wait_event_timeout(dev->reset_wait, 2147 (READ_ONCE(dev->recovery.state) & state), 2148 MT7996_RESET_TIMEOUT); 2149 2150 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 2151 return ret; 2152 } 2153 2154 static void 2155 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 2156 { 2157 struct ieee80211_bss_conf *link_conf; 2158 struct mt7996_phy *phy = priv; 2159 struct mt7996_dev *dev = phy->dev; 2160 unsigned int link_id; 2161 2162 2163 switch (vif->type) { 2164 case NL80211_IFTYPE_MESH_POINT: 2165 case NL80211_IFTYPE_ADHOC: 2166 case NL80211_IFTYPE_AP: 2167 break; 2168 default: 2169 return; 2170 } 2171 2172 for_each_vif_active_link(vif, link_conf, link_id) { 2173 struct mt7996_vif_link *link; 2174 2175 link = mt7996_vif_link(dev, vif, link_id); 2176 if (!link || link->phy != phy) 2177 continue; 2178 2179 mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf, 2180 link_conf->enable_beacon); 2181 } 2182 } 2183 2184 void mt7996_mac_update_beacons(struct mt7996_phy *phy) 2185 { 2186 ieee80211_iterate_active_interfaces(phy->mt76->hw, 2187 IEEE80211_IFACE_ITER_RESUME_ALL, 2188 mt7996_update_vif_beacon, phy); 2189 } 2190 2191 static void 2192 mt7996_update_beacons(struct mt7996_dev *dev) 2193 { 2194 struct mt76_phy *phy2, *phy3; 2195 2196 mt7996_mac_update_beacons(&dev->phy); 2197 2198 phy2 = dev->mt76.phys[MT_BAND1]; 2199 if (phy2) 2200 mt7996_mac_update_beacons(phy2->priv); 2201 2202 phy3 = dev->mt76.phys[MT_BAND2]; 2203 if (phy3) 2204 mt7996_mac_update_beacons(phy3->priv); 2205 } 2206 2207 void mt7996_tx_token_put(struct mt7996_dev *dev) 2208 { 2209 struct mt76_txwi_cache *txwi; 2210 int id; 2211 2212 spin_lock_bh(&dev->mt76.token_lock); 2213 idr_for_each_entry(&dev->mt76.token, txwi, id) { 2214 mt7996_txwi_free(dev, txwi, NULL, NULL, NULL); 2215 dev->mt76.token_count--; 2216 } 2217 spin_unlock_bh(&dev->mt76.token_lock); 2218 idr_destroy(&dev->mt76.token); 2219 } 2220 2221 static int 2222 mt7996_mac_restart(struct mt7996_dev *dev) 2223 { 2224 struct mt76_dev *mdev = &dev->mt76; 2225 struct mt7996_phy *phy; 2226 int i, ret; 2227 2228 if (dev->hif2) { 2229 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 2230 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 2231 } 2232 2233 if (dev_is_pci(mdev->dev)) { 2234 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 2235 if (dev->hif2) 2236 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 2237 } 2238 2239 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2240 mt7996_for_each_phy(dev, phy) 2241 set_bit(MT76_RESET, &phy->mt76->state); 2242 wake_up(&dev->mt76.mcu.wait); 2243 2244 /* lock/unlock all queues to ensure that no tx is pending */ 2245 mt7996_for_each_phy(dev, phy) 2246 mt76_txq_schedule_all(phy->mt76); 2247 2248 /* disable all tx/rx napi */ 2249 mt76_worker_disable(&dev->mt76.tx_worker); 2250 mt76_for_each_q_rx(mdev, i) { 2251 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2252 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 2253 continue; 2254 2255 if (mdev->q_rx[i].ndesc) 2256 napi_disable(&dev->mt76.napi[i]); 2257 } 2258 napi_disable(&dev->mt76.tx_napi); 2259 2260 /* token reinit */ 2261 mt7996_tx_token_put(dev); 2262 idr_init(&dev->mt76.token); 2263 2264 mt7996_dma_reset(dev, true); 2265 2266 mt76_for_each_q_rx(mdev, i) { 2267 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2268 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 2269 continue; 2270 2271 if (mdev->q_rx[i].ndesc) { 2272 napi_enable(&dev->mt76.napi[i]); 2273 local_bh_disable(); 2274 napi_schedule(&dev->mt76.napi[i]); 2275 local_bh_enable(); 2276 } 2277 } 2278 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2279 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 2280 2281 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 2282 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 2283 if (dev->hif2) { 2284 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 2285 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 2286 } 2287 if (dev_is_pci(mdev->dev)) { 2288 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 2289 if (dev->hif2) 2290 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 2291 } 2292 2293 /* load firmware */ 2294 ret = mt7996_mcu_init_firmware(dev); 2295 if (ret) 2296 goto out; 2297 2298 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2299 mt7996_has_hwrro(dev)) { 2300 u32 wed_irq_mask = dev->mt76.mmio.irqmask | 2301 MT_INT_TX_DONE_BAND2; 2302 2303 mt7996_rro_hw_init(dev); 2304 mt76_for_each_q_rx(&dev->mt76, i) { 2305 if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) || 2306 mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])) 2307 mt76_queue_rx_reset(dev, i); 2308 } 2309 2310 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 2311 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask, 2312 false); 2313 mt7996_irq_enable(dev, wed_irq_mask); 2314 mt7996_irq_disable(dev, 0); 2315 } 2316 2317 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { 2318 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, 2319 MT_INT_TX_RX_DONE_EXT); 2320 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, 2321 MT_INT_TX_RX_DONE_EXT); 2322 } 2323 2324 /* set the necessary init items */ 2325 ret = mt7996_mcu_set_eeprom(dev); 2326 if (ret) 2327 goto out; 2328 2329 mt7996_mac_init(dev); 2330 mt7996_for_each_phy(dev, phy) 2331 mt7996_init_txpower(phy); 2332 ret = mt7996_txbf_init(dev); 2333 if (ret) 2334 goto out; 2335 2336 mt7996_for_each_phy(dev, phy) { 2337 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 2338 continue; 2339 2340 ret = mt7996_run(&dev->phy); 2341 if (ret) 2342 goto out; 2343 } 2344 2345 out: 2346 /* reset done */ 2347 mt7996_for_each_phy(dev, phy) 2348 clear_bit(MT76_RESET, &phy->mt76->state); 2349 2350 napi_enable(&dev->mt76.tx_napi); 2351 local_bh_disable(); 2352 napi_schedule(&dev->mt76.tx_napi); 2353 local_bh_enable(); 2354 2355 mt76_worker_enable(&dev->mt76.tx_worker); 2356 return ret; 2357 } 2358 2359 static void 2360 mt7996_mac_reset_sta_iter(void *data, struct ieee80211_sta *sta) 2361 { 2362 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 2363 struct mt7996_dev *dev = data; 2364 int i; 2365 2366 for (i = 0; i < ARRAY_SIZE(msta->link); i++) { 2367 struct mt7996_sta_link *msta_link = NULL; 2368 2369 msta_link = rcu_replace_pointer(msta->link[i], msta_link, 2370 lockdep_is_held(&dev->mt76.mutex)); 2371 if (!msta_link) 2372 continue; 2373 2374 mt7996_mac_sta_deinit_link(dev, msta_link); 2375 2376 if (msta->deflink_id == i) { 2377 msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; 2378 continue; 2379 } 2380 2381 kfree_rcu(msta_link, rcu_head); 2382 } 2383 } 2384 2385 static void 2386 mt7996_mac_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 2387 { 2388 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 2389 struct mt76_vif_data *mvif = mlink->mvif; 2390 struct mt7996_dev *dev = data; 2391 int i; 2392 2393 rcu_read_lock(); 2394 for (i = 0; i < ARRAY_SIZE(mvif->link); i++) { 2395 2396 mlink = mt76_dereference(mvif->link[i], &dev->mt76); 2397 if (!mlink || mlink == (struct mt76_vif_link *)vif->drv_priv) 2398 continue; 2399 2400 rcu_assign_pointer(mvif->link[i], NULL); 2401 kfree_rcu(mlink, rcu_head); 2402 } 2403 rcu_read_unlock(); 2404 } 2405 2406 static void 2407 mt7996_mac_full_reset(struct mt7996_dev *dev) 2408 { 2409 struct ieee80211_hw *hw = mt76_hw(dev); 2410 struct mt7996_phy *phy; 2411 LIST_HEAD(list); 2412 int i; 2413 2414 dev->recovery.hw_full_reset = true; 2415 2416 wake_up(&dev->mt76.mcu.wait); 2417 ieee80211_stop_queues(hw); 2418 2419 cancel_work_sync(&dev->wed_rro.work); 2420 mt7996_for_each_phy(dev, phy) 2421 cancel_delayed_work_sync(&phy->mt76->mac_work); 2422 2423 mutex_lock(&dev->mt76.mutex); 2424 for (i = 0; i < 10; i++) { 2425 if (!mt7996_mac_restart(dev)) 2426 break; 2427 } 2428 2429 if (i == 10) 2430 dev_err(dev->mt76.dev, "chip full reset failed\n"); 2431 2432 mt7996_for_each_phy(dev, phy) 2433 phy->omac_mask = 0; 2434 2435 ieee80211_iterate_stations_atomic(hw, mt7996_mac_reset_sta_iter, dev); 2436 ieee80211_iterate_active_interfaces_atomic(hw, 2437 IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER, 2438 mt7996_mac_reset_vif_iter, dev); 2439 mt76_reset_device(&dev->mt76); 2440 2441 INIT_LIST_HEAD(&dev->sta_rc_list); 2442 INIT_LIST_HEAD(&dev->twt_list); 2443 2444 spin_lock_bh(&dev->wed_rro.lock); 2445 list_splice_init(&dev->wed_rro.poll_list, &list); 2446 spin_unlock_bh(&dev->wed_rro.lock); 2447 2448 while (!list_empty(&list)) { 2449 struct mt7996_wed_rro_session_id *e; 2450 2451 e = list_first_entry(&list, struct mt7996_wed_rro_session_id, 2452 list); 2453 list_del_init(&e->list); 2454 kfree(e); 2455 } 2456 2457 i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA); 2458 dev->mt76.global_wcid.idx = i; 2459 dev->recovery.hw_full_reset = false; 2460 2461 mutex_unlock(&dev->mt76.mutex); 2462 2463 ieee80211_restart_hw(mt76_hw(dev)); 2464 } 2465 2466 void mt7996_mac_reset_work(struct work_struct *work) 2467 { 2468 struct ieee80211_hw *hw; 2469 struct mt7996_dev *dev; 2470 struct mt7996_phy *phy; 2471 int i; 2472 2473 dev = container_of(work, struct mt7996_dev, reset_work); 2474 hw = mt76_hw(dev); 2475 2476 /* chip full reset */ 2477 if (dev->recovery.restart) { 2478 /* disable WA/WM WDT */ 2479 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 2480 MT_MCU_CMD_WDT_MASK); 2481 2482 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 2483 dev->recovery.wa_reset_count++; 2484 else 2485 dev->recovery.wm_reset_count++; 2486 2487 mt7996_mac_full_reset(dev); 2488 2489 /* enable mcu irq */ 2490 mt7996_irq_enable(dev, MT_INT_MCU_CMD); 2491 mt7996_irq_disable(dev, 0); 2492 2493 /* enable WA/WM WDT */ 2494 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 2495 2496 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 2497 dev->recovery.restart = false; 2498 return; 2499 } 2500 2501 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 2502 return; 2503 2504 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.", 2505 wiphy_name(hw->wiphy)); 2506 2507 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) 2508 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2); 2509 2510 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 2511 mtk_wed_device_stop(&dev->mt76.mmio.wed); 2512 2513 ieee80211_stop_queues(mt76_hw(dev)); 2514 2515 set_bit(MT76_RESET, &dev->mphy.state); 2516 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2517 mt76_abort_scan(&dev->mt76); 2518 wake_up(&dev->mt76.mcu.wait); 2519 2520 cancel_work_sync(&dev->wed_rro.work); 2521 mt7996_for_each_phy(dev, phy) { 2522 mt76_abort_roc(phy->mt76); 2523 set_bit(MT76_RESET, &phy->mt76->state); 2524 cancel_delayed_work_sync(&phy->mt76->mac_work); 2525 } 2526 2527 mt76_worker_disable(&dev->mt76.tx_worker); 2528 mt76_for_each_q_rx(&dev->mt76, i) { 2529 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2530 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2531 continue; 2532 2533 napi_disable(&dev->mt76.napi[i]); 2534 } 2535 napi_disable(&dev->mt76.tx_napi); 2536 2537 mutex_lock(&dev->mt76.mutex); 2538 2539 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 2540 2541 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 2542 mt7996_dma_reset(dev, false); 2543 2544 mt7996_tx_token_put(dev); 2545 idr_init(&dev->mt76.token); 2546 2547 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 2548 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 2549 } 2550 2551 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2552 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2553 2554 /* enable DMA Tx/Tx and interrupt */ 2555 mt7996_dma_start(dev, false, false); 2556 2557 if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3) 2558 mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK); 2559 2560 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 2561 u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 | 2562 dev->mt76.mmio.irqmask; 2563 2564 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 2565 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask, 2566 true); 2567 mt7996_irq_enable(dev, wed_irq_mask); 2568 mt7996_irq_disable(dev, 0); 2569 } 2570 2571 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { 2572 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT); 2573 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, 2574 MT_INT_TX_RX_DONE_EXT); 2575 } 2576 2577 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2578 mt7996_for_each_phy(dev, phy) 2579 clear_bit(MT76_RESET, &phy->mt76->state); 2580 2581 mt76_for_each_q_rx(&dev->mt76, i) { 2582 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2583 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2584 continue; 2585 2586 napi_enable(&dev->mt76.napi[i]); 2587 local_bh_disable(); 2588 napi_schedule(&dev->mt76.napi[i]); 2589 local_bh_enable(); 2590 } 2591 2592 tasklet_schedule(&dev->mt76.irq_tasklet); 2593 2594 mt76_worker_enable(&dev->mt76.tx_worker); 2595 2596 napi_enable(&dev->mt76.tx_napi); 2597 local_bh_disable(); 2598 napi_schedule(&dev->mt76.tx_napi); 2599 local_bh_enable(); 2600 2601 ieee80211_wake_queues(hw); 2602 2603 mutex_unlock(&dev->mt76.mutex); 2604 2605 mt7996_update_beacons(dev); 2606 2607 mt7996_for_each_phy(dev, phy) 2608 ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, 2609 MT7996_WATCHDOG_TIME); 2610 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.", 2611 wiphy_name(dev->mt76.hw->wiphy)); 2612 } 2613 2614 /* firmware coredump */ 2615 void mt7996_mac_dump_work(struct work_struct *work) 2616 { 2617 const struct mt7996_mem_region *mem_region; 2618 struct mt7996_crash_data *crash_data; 2619 struct mt7996_dev *dev; 2620 struct mt7996_mem_hdr *hdr; 2621 size_t buf_len; 2622 int i; 2623 u32 num; 2624 u8 *buf; 2625 2626 dev = container_of(work, struct mt7996_dev, dump_work); 2627 2628 mutex_lock(&dev->dump_mutex); 2629 2630 crash_data = mt7996_coredump_new(dev); 2631 if (!crash_data) { 2632 mutex_unlock(&dev->dump_mutex); 2633 goto skip_coredump; 2634 } 2635 2636 mem_region = mt7996_coredump_get_mem_layout(dev, &num); 2637 if (!mem_region || !crash_data->memdump_buf_len) { 2638 mutex_unlock(&dev->dump_mutex); 2639 goto skip_memdump; 2640 } 2641 2642 buf = crash_data->memdump_buf; 2643 buf_len = crash_data->memdump_buf_len; 2644 2645 /* dumping memory content... */ 2646 memset(buf, 0, buf_len); 2647 for (i = 0; i < num; i++) { 2648 if (mem_region->len > buf_len) { 2649 dev_warn(dev->mt76.dev, "%s len %zu is too large\n", 2650 mem_region->name, mem_region->len); 2651 break; 2652 } 2653 2654 /* reserve space for the header */ 2655 hdr = (void *)buf; 2656 buf += sizeof(*hdr); 2657 buf_len -= sizeof(*hdr); 2658 2659 mt7996_memcpy_fromio(dev, buf, mem_region->start, 2660 mem_region->len); 2661 2662 hdr->start = mem_region->start; 2663 hdr->len = mem_region->len; 2664 2665 if (!mem_region->len) 2666 /* note: the header remains, just with zero length */ 2667 break; 2668 2669 buf += mem_region->len; 2670 buf_len -= mem_region->len; 2671 2672 mem_region++; 2673 } 2674 2675 mutex_unlock(&dev->dump_mutex); 2676 2677 skip_memdump: 2678 mt7996_coredump_submit(dev); 2679 skip_coredump: 2680 queue_work(dev->mt76.wq, &dev->reset_work); 2681 } 2682 2683 void mt7996_reset(struct mt7996_dev *dev) 2684 { 2685 if (!dev->recovery.hw_init_done) 2686 return; 2687 2688 if (dev->recovery.hw_full_reset) 2689 return; 2690 2691 /* wm/wa exception: do full recovery */ 2692 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 2693 dev->recovery.restart = true; 2694 dev_info(dev->mt76.dev, 2695 "%s indicated firmware crash, attempting recovery\n", 2696 wiphy_name(dev->mt76.hw->wiphy)); 2697 2698 mt7996_irq_disable(dev, MT_INT_MCU_CMD); 2699 queue_work(dev->mt76.wq, &dev->dump_work); 2700 return; 2701 } 2702 2703 queue_work(dev->mt76.wq, &dev->reset_work); 2704 wake_up(&dev->reset_wait); 2705 } 2706 2707 void mt7996_mac_update_stats(struct mt7996_phy *phy) 2708 { 2709 struct mt76_mib_stats *mib = &phy->mib; 2710 struct mt7996_dev *dev = phy->dev; 2711 u8 band_idx = phy->mt76->band_idx; 2712 u32 cnt; 2713 int i; 2714 2715 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx)); 2716 mib->fcs_err_cnt += cnt; 2717 2718 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx)); 2719 mib->rx_fifo_full_cnt += cnt; 2720 2721 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx)); 2722 mib->rx_mpdu_cnt += cnt; 2723 2724 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx)); 2725 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 2726 2727 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx)); 2728 mib->rx_vector_mismatch_cnt += cnt; 2729 2730 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx)); 2731 mib->rx_delimiter_fail_cnt += cnt; 2732 2733 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx)); 2734 mib->rx_len_mismatch_cnt += cnt; 2735 2736 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx)); 2737 mib->tx_ampdu_cnt += cnt; 2738 2739 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx)); 2740 mib->tx_stop_q_empty_cnt += cnt; 2741 2742 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx)); 2743 mib->tx_mpdu_attempts_cnt += cnt; 2744 2745 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx)); 2746 mib->tx_mpdu_success_cnt += cnt; 2747 2748 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx)); 2749 mib->rx_ampdu_cnt += cnt; 2750 2751 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx)); 2752 mib->rx_ampdu_bytes_cnt += cnt; 2753 2754 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx)); 2755 mib->rx_ampdu_valid_subframe_cnt += cnt; 2756 2757 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx)); 2758 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 2759 2760 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx)); 2761 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt); 2762 2763 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx)); 2764 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt); 2765 2766 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx)); 2767 mib->rx_pfdrop_cnt += cnt; 2768 2769 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx)); 2770 mib->rx_vec_queue_overflow_drop_cnt += cnt; 2771 2772 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx)); 2773 mib->rx_ba_cnt += cnt; 2774 2775 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx)); 2776 mib->tx_bf_ebf_ppdu_cnt += cnt; 2777 2778 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx)); 2779 mib->tx_bf_ibf_ppdu_cnt += cnt; 2780 2781 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx)); 2782 mib->tx_mu_bf_cnt += cnt; 2783 2784 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx)); 2785 mib->tx_mu_mpdu_cnt += cnt; 2786 2787 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx)); 2788 mib->tx_mu_acked_mpdu_cnt += cnt; 2789 2790 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx)); 2791 mib->tx_su_acked_mpdu_cnt += cnt; 2792 2793 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx)); 2794 mib->tx_bf_rx_fb_ht_cnt += cnt; 2795 mib->tx_bf_rx_fb_all_cnt += cnt; 2796 2797 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx)); 2798 mib->tx_bf_rx_fb_vht_cnt += cnt; 2799 mib->tx_bf_rx_fb_all_cnt += cnt; 2800 2801 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx)); 2802 mib->tx_bf_rx_fb_he_cnt += cnt; 2803 mib->tx_bf_rx_fb_all_cnt += cnt; 2804 2805 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx)); 2806 mib->tx_bf_rx_fb_eht_cnt += cnt; 2807 mib->tx_bf_rx_fb_all_cnt += cnt; 2808 2809 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx)); 2810 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); 2811 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); 2812 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); 2813 2814 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx)); 2815 mib->tx_bf_fb_trig_cnt += cnt; 2816 2817 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx)); 2818 mib->tx_bf_fb_cpl_cnt += cnt; 2819 2820 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 2821 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 2822 mib->tx_amsdu[i] += cnt; 2823 mib->tx_amsdu_cnt += cnt; 2824 } 2825 2826 /* rts count */ 2827 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx)); 2828 mib->rts_cnt += cnt; 2829 2830 /* rts retry count */ 2831 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx)); 2832 mib->rts_retries_cnt += cnt; 2833 2834 /* ba miss count */ 2835 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx)); 2836 mib->ba_miss_cnt += cnt; 2837 2838 /* ack fail count */ 2839 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx)); 2840 mib->ack_fail_cnt += cnt; 2841 2842 for (i = 0; i < 16; i++) { 2843 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2844 phy->mt76->aggr_stats[i] += cnt; 2845 } 2846 } 2847 2848 void mt7996_mac_sta_rc_work(struct work_struct *work) 2849 { 2850 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); 2851 struct mt7996_sta_link *msta_link; 2852 struct ieee80211_vif *vif; 2853 struct mt7996_vif *mvif; 2854 LIST_HEAD(list); 2855 u32 changed; 2856 2857 spin_lock_bh(&dev->mt76.sta_poll_lock); 2858 list_splice_init(&dev->sta_rc_list, &list); 2859 2860 while (!list_empty(&list)) { 2861 msta_link = list_first_entry(&list, struct mt7996_sta_link, 2862 rc_list); 2863 list_del_init(&msta_link->rc_list); 2864 2865 changed = msta_link->changed; 2866 msta_link->changed = 0; 2867 mvif = msta_link->sta->vif; 2868 vif = container_of((void *)mvif, struct ieee80211_vif, 2869 drv_priv); 2870 2871 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2872 2873 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2874 IEEE80211_RC_NSS_CHANGED | 2875 IEEE80211_RC_BW_CHANGED)) 2876 mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif, 2877 msta_link->wcid.link_id, 2878 true); 2879 2880 if (changed & IEEE80211_RC_SMPS_CHANGED) 2881 mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL, 2882 msta_link->wcid.link_id, 2883 RATE_PARAM_MMPS_UPDATE); 2884 2885 spin_lock_bh(&dev->mt76.sta_poll_lock); 2886 } 2887 2888 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2889 } 2890 2891 void mt7996_mac_work(struct work_struct *work) 2892 { 2893 struct mt7996_phy *phy; 2894 struct mt76_phy *mphy; 2895 2896 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2897 mac_work.work); 2898 phy = mphy->priv; 2899 2900 mutex_lock(&mphy->dev->mutex); 2901 2902 mt76_update_survey(mphy); 2903 if (++mphy->mac_work_count == 5) { 2904 mphy->mac_work_count = 0; 2905 2906 mt7996_mac_update_stats(phy); 2907 2908 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE); 2909 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { 2910 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT); 2911 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT); 2912 } 2913 } 2914 2915 mutex_unlock(&mphy->dev->mutex); 2916 2917 mt76_tx_status_check(mphy->dev, false); 2918 2919 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2920 MT7996_WATCHDOG_TIME); 2921 } 2922 2923 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy) 2924 { 2925 struct mt7996_dev *dev = phy->dev; 2926 int rdd_idx = mt7996_get_rdd_idx(phy, false); 2927 2928 if (rdd_idx < 0) 2929 return; 2930 2931 mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0); 2932 } 2933 2934 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx) 2935 { 2936 int err, region; 2937 2938 switch (dev->mt76.region) { 2939 case NL80211_DFS_ETSI: 2940 region = 0; 2941 break; 2942 case NL80211_DFS_JP: 2943 region = 2; 2944 break; 2945 case NL80211_DFS_FCC: 2946 default: 2947 region = 1; 2948 break; 2949 } 2950 2951 err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region); 2952 if (err < 0) 2953 return err; 2954 2955 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1); 2956 } 2957 2958 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) 2959 { 2960 struct mt7996_dev *dev = phy->dev; 2961 int err, rdd_idx; 2962 2963 rdd_idx = mt7996_get_rdd_idx(phy, false); 2964 if (rdd_idx < 0) 2965 return -EINVAL; 2966 2967 /* start CAC */ 2968 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0); 2969 if (err < 0) 2970 return err; 2971 2972 err = mt7996_dfs_start_rdd(dev, rdd_idx); 2973 2974 return err; 2975 } 2976 2977 static int 2978 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy) 2979 { 2980 const struct mt7996_dfs_radar_spec *radar_specs; 2981 struct mt7996_dev *dev = phy->dev; 2982 int err, i; 2983 2984 switch (dev->mt76.region) { 2985 case NL80211_DFS_FCC: 2986 radar_specs = &fcc_radar_specs; 2987 err = mt7996_mcu_set_fcc5_lpn(dev, 8); 2988 if (err < 0) 2989 return err; 2990 break; 2991 case NL80211_DFS_ETSI: 2992 radar_specs = &etsi_radar_specs; 2993 break; 2994 case NL80211_DFS_JP: 2995 radar_specs = &jp_radar_specs; 2996 break; 2997 default: 2998 return -EINVAL; 2999 } 3000 3001 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 3002 err = mt7996_mcu_set_radar_th(dev, i, 3003 &radar_specs->radar_pattern[i]); 3004 if (err < 0) 3005 return err; 3006 } 3007 3008 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 3009 } 3010 3011 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) 3012 { 3013 struct mt7996_dev *dev = phy->dev; 3014 enum mt76_dfs_state dfs_state, prev_state; 3015 int err, rdd_idx = mt7996_get_rdd_idx(phy, false); 3016 3017 prev_state = phy->mt76->dfs_state; 3018 dfs_state = mt76_phy_dfs_state(phy->mt76); 3019 3020 if (prev_state == dfs_state || rdd_idx < 0) 3021 return 0; 3022 3023 if (prev_state == MT_DFS_STATE_UNKNOWN) 3024 mt7996_dfs_stop_radar_detector(phy); 3025 3026 if (dfs_state == MT_DFS_STATE_DISABLED) 3027 goto stop; 3028 3029 if (prev_state <= MT_DFS_STATE_DISABLED) { 3030 err = mt7996_dfs_init_radar_specs(phy); 3031 if (err < 0) 3032 return err; 3033 3034 err = mt7996_dfs_start_radar_detector(phy); 3035 if (err < 0) 3036 return err; 3037 3038 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 3039 } 3040 3041 if (dfs_state == MT_DFS_STATE_CAC) 3042 return 0; 3043 3044 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0); 3045 if (err < 0) { 3046 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 3047 return err; 3048 } 3049 3050 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 3051 return 0; 3052 3053 stop: 3054 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0); 3055 if (err < 0) 3056 return err; 3057 3058 mt7996_dfs_stop_radar_detector(phy); 3059 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 3060 3061 return 0; 3062 } 3063 3064 static int 3065 mt7996_mac_twt_duration_align(int duration) 3066 { 3067 return duration << 8; 3068 } 3069 3070 static u64 3071 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev, 3072 struct mt7996_twt_flow *flow) 3073 { 3074 struct mt7996_twt_flow *iter, *iter_next; 3075 u32 duration = flow->duration << 8; 3076 u64 start_tsf; 3077 3078 iter = list_first_entry_or_null(&dev->twt_list, 3079 struct mt7996_twt_flow, list); 3080 if (!iter || !iter->sched || iter->start_tsf > duration) { 3081 /* add flow as first entry in the list */ 3082 list_add(&flow->list, &dev->twt_list); 3083 return 0; 3084 } 3085 3086 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 3087 start_tsf = iter->start_tsf + 3088 mt7996_mac_twt_duration_align(iter->duration); 3089 if (list_is_last(&iter->list, &dev->twt_list)) 3090 break; 3091 3092 if (!iter_next->sched || 3093 iter_next->start_tsf > start_tsf + duration) { 3094 list_add(&flow->list, &iter->list); 3095 goto out; 3096 } 3097 } 3098 3099 /* add flow as last entry in the list */ 3100 list_add_tail(&flow->list, &dev->twt_list); 3101 out: 3102 return start_tsf; 3103 } 3104 3105 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt) 3106 { 3107 struct ieee80211_twt_params *twt_agrt; 3108 u64 interval, duration; 3109 u16 mantissa; 3110 u8 exp; 3111 3112 /* only individual agreement supported */ 3113 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 3114 return -EOPNOTSUPP; 3115 3116 /* only 256us unit supported */ 3117 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 3118 return -EOPNOTSUPP; 3119 3120 twt_agrt = (struct ieee80211_twt_params *)twt->params; 3121 3122 /* explicit agreement not supported */ 3123 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 3124 return -EOPNOTSUPP; 3125 3126 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 3127 le16_to_cpu(twt_agrt->req_type)); 3128 mantissa = le16_to_cpu(twt_agrt->mantissa); 3129 duration = twt_agrt->min_twt_dur << 8; 3130 3131 interval = (u64)mantissa << exp; 3132 if (interval < duration) 3133 return -EOPNOTSUPP; 3134 3135 return 0; 3136 } 3137 3138 static bool 3139 mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link, 3140 struct ieee80211_twt_params *twt_agrt) 3141 { 3142 u16 type = le16_to_cpu(twt_agrt->req_type); 3143 u8 exp; 3144 int i; 3145 3146 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 3147 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) { 3148 struct mt7996_twt_flow *f; 3149 3150 if (!(msta_link->twt.flowid_mask & BIT(i))) 3151 continue; 3152 3153 f = &msta_link->twt.flow[i]; 3154 if (f->duration == twt_agrt->min_twt_dur && 3155 f->mantissa == twt_agrt->mantissa && 3156 f->exp == exp && 3157 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 3158 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 3159 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 3160 return true; 3161 } 3162 3163 return false; 3164 } 3165 3166 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, 3167 struct ieee80211_sta *sta, 3168 struct ieee80211_twt_setup *twt) 3169 { 3170 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 3171 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 3172 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 3173 struct mt7996_sta_link *msta_link = &msta->deflink; 3174 u16 req_type = le16_to_cpu(twt_agrt->req_type); 3175 enum ieee80211_twt_setup_cmd sta_setup_cmd; 3176 struct mt7996_dev *dev = mt7996_hw_dev(hw); 3177 struct mt7996_twt_flow *flow; 3178 u8 flowid, table_id, exp; 3179 3180 if (mt7996_mac_check_twt_req(twt)) 3181 goto out; 3182 3183 mutex_lock(&dev->mt76.mutex); 3184 3185 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT) 3186 goto unlock; 3187 3188 if (hweight8(msta_link->twt.flowid_mask) == 3189 ARRAY_SIZE(msta_link->twt.flow)) 3190 goto unlock; 3191 3192 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) { 3193 setup_cmd = TWT_SETUP_CMD_DICTATE; 3194 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR; 3195 goto unlock; 3196 } 3197 3198 if (mt7996_mac_twt_param_equal(msta_link, twt_agrt)) 3199 goto unlock; 3200 3201 flowid = ffs(~msta_link->twt.flowid_mask) - 1; 3202 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 3203 twt_agrt->req_type |= le16_encode_bits(flowid, 3204 IEEE80211_TWT_REQTYPE_FLOWID); 3205 3206 table_id = ffs(~dev->twt.table_mask) - 1; 3207 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 3208 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 3209 3210 flow = &msta_link->twt.flow[flowid]; 3211 memset(flow, 0, sizeof(*flow)); 3212 INIT_LIST_HEAD(&flow->list); 3213 flow->wcid = msta_link->wcid.idx; 3214 flow->table_id = table_id; 3215 flow->id = flowid; 3216 flow->duration = twt_agrt->min_twt_dur; 3217 flow->mantissa = twt_agrt->mantissa; 3218 flow->exp = exp; 3219 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 3220 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 3221 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 3222 3223 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 3224 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 3225 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 3226 u64 flow_tsf, curr_tsf; 3227 u32 rem; 3228 3229 flow->sched = true; 3230 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow); 3231 curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink); 3232 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 3233 flow_tsf = curr_tsf + interval - rem; 3234 twt_agrt->twt = cpu_to_le64(flow_tsf); 3235 } else { 3236 list_add_tail(&flow->list, &dev->twt_list); 3237 } 3238 flow->tsf = le64_to_cpu(twt_agrt->twt); 3239 3240 if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow, 3241 MCU_TWT_AGRT_ADD)) 3242 goto unlock; 3243 3244 setup_cmd = TWT_SETUP_CMD_ACCEPT; 3245 dev->twt.table_mask |= BIT(table_id); 3246 msta_link->twt.flowid_mask |= BIT(flowid); 3247 dev->twt.n_agrt++; 3248 3249 unlock: 3250 mutex_unlock(&dev->mt76.mutex); 3251 out: 3252 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 3253 twt_agrt->req_type |= 3254 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 3255 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED; 3256 } 3257 3258 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, 3259 struct mt7996_vif_link *link, 3260 struct mt7996_sta_link *msta_link, 3261 u8 flowid) 3262 { 3263 struct mt7996_twt_flow *flow; 3264 3265 lockdep_assert_held(&dev->mt76.mutex); 3266 3267 if (flowid >= ARRAY_SIZE(msta_link->twt.flow)) 3268 return; 3269 3270 if (!(msta_link->twt.flowid_mask & BIT(flowid))) 3271 return; 3272 3273 flow = &msta_link->twt.flow[flowid]; 3274 if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE)) 3275 return; 3276 3277 list_del_init(&flow->list); 3278 msta_link->twt.flowid_mask &= ~BIT(flowid); 3279 dev->twt.table_mask &= ~BIT(flow->table_id); 3280 dev->twt.n_agrt--; 3281 } 3282