1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/timekeeping.h> 8 #include "coredump.h" 9 #include "mt7996.h" 10 #include "../dma.h" 11 #include "mac.h" 12 #include "mcu.h" 13 14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 15 16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = { 17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 18 .radar_pattern = { 19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 27 }, 28 }; 29 30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = { 31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 32 .radar_pattern = { 33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 38 }, 39 }; 40 41 static const struct mt7996_dfs_radar_spec jp_radar_specs = { 42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 43 .radar_pattern = { 44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 52 }, 53 }; 54 55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, 56 u16 idx, u8 band_idx) 57 { 58 struct mt7996_sta_link *msta_link; 59 struct mt7996_sta *msta; 60 struct mt7996_vif *mvif; 61 struct mt76_wcid *wcid; 62 int i; 63 64 wcid = mt76_wcid_ptr(dev, idx); 65 if (!wcid || !wcid->sta) 66 return NULL; 67 68 if (!mt7996_band_valid(dev, band_idx)) 69 return NULL; 70 71 if (wcid->phy_idx == band_idx) 72 return wcid; 73 74 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 75 msta = msta_link->sta; 76 if (!msta || !msta->vif) 77 return NULL; 78 79 mvif = msta->vif; 80 for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) { 81 struct mt76_vif_link *mlink; 82 83 mlink = rcu_dereference(mvif->mt76.link[i]); 84 if (!mlink) 85 continue; 86 87 if (mlink->band_idx != band_idx) 88 continue; 89 90 msta_link = rcu_dereference(msta->link[i]); 91 break; 92 } 93 94 return &msta_link->wcid; 95 } 96 97 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask) 98 { 99 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 100 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 101 102 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 103 0, 5000); 104 } 105 106 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw) 107 { 108 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 109 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 110 111 return MT_WTBL_LMAC_OFFS(wcid, dw); 112 } 113 114 static void mt7996_mac_sta_poll(struct mt7996_dev *dev) 115 { 116 static const u8 ac_to_tid[] = { 117 [IEEE80211_AC_BE] = 0, 118 [IEEE80211_AC_BK] = 1, 119 [IEEE80211_AC_VI] = 4, 120 [IEEE80211_AC_VO] = 6 121 }; 122 struct mt7996_sta_link *msta_link; 123 struct mt76_vif_link *mlink; 124 struct ieee80211_sta *sta; 125 struct mt7996_sta *msta; 126 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 127 LIST_HEAD(sta_poll_list); 128 struct mt76_wcid *wcid; 129 int i; 130 131 spin_lock_bh(&dev->mt76.sta_poll_lock); 132 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); 133 spin_unlock_bh(&dev->mt76.sta_poll_lock); 134 135 rcu_read_lock(); 136 137 while (true) { 138 bool clear = false; 139 u32 addr, val; 140 u16 idx; 141 s8 rssi[4]; 142 143 spin_lock_bh(&dev->mt76.sta_poll_lock); 144 if (list_empty(&sta_poll_list)) { 145 spin_unlock_bh(&dev->mt76.sta_poll_lock); 146 break; 147 } 148 msta_link = list_first_entry(&sta_poll_list, 149 struct mt7996_sta_link, 150 wcid.poll_list); 151 msta = msta_link->sta; 152 wcid = &msta_link->wcid; 153 list_del_init(&wcid->poll_list); 154 spin_unlock_bh(&dev->mt76.sta_poll_lock); 155 156 idx = wcid->idx; 157 158 /* refresh peer's airtime reporting */ 159 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20); 160 161 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 162 u32 tx_last = msta_link->airtime_ac[i]; 163 u32 rx_last = msta_link->airtime_ac[i + 4]; 164 165 msta_link->airtime_ac[i] = mt76_rr(dev, addr); 166 msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 167 168 tx_time[i] = msta_link->airtime_ac[i] - tx_last; 169 rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last; 170 171 if ((tx_last | rx_last) & BIT(30)) 172 clear = true; 173 174 addr += 8; 175 } 176 177 if (clear) { 178 mt7996_mac_wtbl_update(dev, idx, 179 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 180 memset(msta_link->airtime_ac, 0, 181 sizeof(msta_link->airtime_ac)); 182 } 183 184 if (!wcid->sta) 185 continue; 186 187 sta = container_of((void *)msta, struct ieee80211_sta, 188 drv_priv); 189 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 190 u8 q = mt76_connac_lmac_mapping(i); 191 u32 tx_cur = tx_time[q]; 192 u32 rx_cur = rx_time[q]; 193 u8 tid = ac_to_tid[i]; 194 195 if (!tx_cur && !rx_cur) 196 continue; 197 198 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); 199 } 200 201 /* get signal strength of resp frames (CTS/BA/ACK) */ 202 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34); 203 val = mt76_rr(dev, addr); 204 205 rssi[0] = to_rssi(GENMASK(7, 0), val); 206 rssi[1] = to_rssi(GENMASK(15, 8), val); 207 rssi[2] = to_rssi(GENMASK(23, 16), val); 208 rssi[3] = to_rssi(GENMASK(31, 14), val); 209 210 mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]); 211 if (mlink) { 212 struct mt76_phy *mphy = mt76_vif_link_phy(mlink); 213 214 if (mphy) 215 msta_link->ack_signal = 216 mt76_rx_signal(mphy->antenna_mask, 217 rssi); 218 } 219 220 ewma_avg_signal_add(&msta_link->avg_ack_signal, 221 -msta_link->ack_signal); 222 } 223 224 rcu_read_unlock(); 225 } 226 227 /* The HW does not translate the mac header to 802.3 for mesh point */ 228 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 229 { 230 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 231 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 232 struct mt7996_sta_link *msta_link = (void *)status->wcid; 233 struct mt7996_sta *msta = msta_link->sta; 234 struct ieee80211_bss_conf *link_conf; 235 __le32 *rxd = (__le32 *)skb->data; 236 struct ieee80211_sta *sta; 237 struct ieee80211_vif *vif; 238 struct ieee80211_hdr hdr; 239 u16 frame_control; 240 241 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != 242 MT_RXD3_NORMAL_U2M) 243 return -EINVAL; 244 245 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) 246 return -EINVAL; 247 248 if (!msta || !msta->vif) 249 return -EINVAL; 250 251 sta = wcid_to_sta(status->wcid); 252 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 253 link_conf = rcu_dereference(vif->link_conf[msta_link->wcid.link_id]); 254 if (!link_conf) 255 return -EINVAL; 256 257 /* store the info from RXD and ethhdr to avoid being overridden */ 258 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); 259 hdr.frame_control = cpu_to_le16(frame_control); 260 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); 261 hdr.duration_id = 0; 262 263 ether_addr_copy(hdr.addr1, vif->addr); 264 ether_addr_copy(hdr.addr2, sta->addr); 265 switch (frame_control & (IEEE80211_FCTL_TODS | 266 IEEE80211_FCTL_FROMDS)) { 267 case 0: 268 ether_addr_copy(hdr.addr3, link_conf->bssid); 269 break; 270 case IEEE80211_FCTL_FROMDS: 271 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 272 break; 273 case IEEE80211_FCTL_TODS: 274 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 275 break; 276 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 277 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 278 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 279 break; 280 default: 281 return -EINVAL; 282 } 283 284 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 285 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 286 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 287 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 288 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) 289 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 290 else 291 skb_pull(skb, 2); 292 293 if (ieee80211_has_order(hdr.frame_control)) 294 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], 295 IEEE80211_HT_CTL_LEN); 296 if (ieee80211_is_data_qos(hdr.frame_control)) { 297 __le16 qos_ctrl; 298 299 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); 300 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, 301 IEEE80211_QOS_CTL_LEN); 302 } 303 304 if (ieee80211_has_a4(hdr.frame_control)) 305 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 306 else 307 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 308 309 return 0; 310 } 311 312 static int 313 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, 314 struct mt76_rx_status *status, 315 struct ieee80211_supported_band *sband, 316 __le32 *rxv, u8 *mode) 317 { 318 u32 v0, v2; 319 u8 stbc, gi, bw, dcm, nss; 320 int i, idx; 321 bool cck = false; 322 323 v0 = le32_to_cpu(rxv[0]); 324 v2 = le32_to_cpu(rxv[2]); 325 326 idx = FIELD_GET(MT_PRXV_TX_RATE, v0); 327 i = idx; 328 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; 329 330 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); 331 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); 332 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); 333 dcm = FIELD_GET(MT_PRXV_DCM, v2); 334 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); 335 336 switch (*mode) { 337 case MT_PHY_TYPE_CCK: 338 cck = true; 339 fallthrough; 340 case MT_PHY_TYPE_OFDM: 341 i = mt76_get_rate(&dev->mt76, sband, i, cck); 342 break; 343 case MT_PHY_TYPE_HT_GF: 344 case MT_PHY_TYPE_HT: 345 status->encoding = RX_ENC_HT; 346 if (gi) 347 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 348 if (i > 31) 349 return -EINVAL; 350 break; 351 case MT_PHY_TYPE_VHT: 352 status->nss = nss; 353 status->encoding = RX_ENC_VHT; 354 if (gi) 355 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 356 if (i > 11) 357 return -EINVAL; 358 break; 359 case MT_PHY_TYPE_HE_MU: 360 case MT_PHY_TYPE_HE_SU: 361 case MT_PHY_TYPE_HE_EXT_SU: 362 case MT_PHY_TYPE_HE_TB: 363 status->nss = nss; 364 status->encoding = RX_ENC_HE; 365 i &= GENMASK(3, 0); 366 367 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 368 status->he_gi = gi; 369 370 status->he_dcm = dcm; 371 break; 372 case MT_PHY_TYPE_EHT_SU: 373 case MT_PHY_TYPE_EHT_TRIG: 374 case MT_PHY_TYPE_EHT_MU: 375 status->nss = nss; 376 status->encoding = RX_ENC_EHT; 377 i &= GENMASK(3, 0); 378 379 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) 380 status->eht.gi = gi; 381 break; 382 default: 383 return -EINVAL; 384 } 385 status->rate_idx = i; 386 387 switch (bw) { 388 case IEEE80211_STA_RX_BW_20: 389 break; 390 case IEEE80211_STA_RX_BW_40: 391 if (*mode & MT_PHY_TYPE_HE_EXT_SU && 392 (idx & MT_PRXV_TX_ER_SU_106T)) { 393 status->bw = RATE_INFO_BW_HE_RU; 394 status->he_ru = 395 NL80211_RATE_INFO_HE_RU_ALLOC_106; 396 } else { 397 status->bw = RATE_INFO_BW_40; 398 } 399 break; 400 case IEEE80211_STA_RX_BW_80: 401 status->bw = RATE_INFO_BW_80; 402 break; 403 case IEEE80211_STA_RX_BW_160: 404 status->bw = RATE_INFO_BW_160; 405 break; 406 /* rxv reports bw 320-1 and 320-2 separately */ 407 case IEEE80211_STA_RX_BW_320: 408 case IEEE80211_STA_RX_BW_320 + 1: 409 status->bw = RATE_INFO_BW_320; 410 break; 411 default: 412 return -EINVAL; 413 } 414 415 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 416 if (*mode < MT_PHY_TYPE_HE_SU && gi) 417 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 418 419 return 0; 420 } 421 422 static void 423 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q, 424 struct mt7996_sta *msta, struct sk_buff *skb, 425 u32 info) 426 { 427 struct ieee80211_vif *vif; 428 struct wireless_dev *wdev; 429 430 if (!msta || !msta->vif) 431 return; 432 433 if (!mt76_queue_is_wed_rx(q)) 434 return; 435 436 if (!(info & MT_DMA_INFO_PPE_VLD)) 437 return; 438 439 vif = container_of((void *)msta->vif, struct ieee80211_vif, 440 drv_priv); 441 wdev = ieee80211_vif_to_wdev(vif); 442 skb->dev = wdev->netdev; 443 444 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, 445 FIELD_GET(MT_DMA_PPE_CPU_REASON, info), 446 FIELD_GET(MT_DMA_PPE_ENTRY, info)); 447 } 448 449 static int 450 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, 451 struct sk_buff *skb, u32 *info) 452 { 453 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 454 struct mt76_phy *mphy = &dev->mt76.phy; 455 struct mt7996_phy *phy = &dev->phy; 456 struct ieee80211_supported_band *sband; 457 __le32 *rxd = (__le32 *)skb->data; 458 __le32 *rxv = NULL; 459 u32 rxd0 = le32_to_cpu(rxd[0]); 460 u32 rxd1 = le32_to_cpu(rxd[1]); 461 u32 rxd2 = le32_to_cpu(rxd[2]); 462 u32 rxd3 = le32_to_cpu(rxd[3]); 463 u32 rxd4 = le32_to_cpu(rxd[4]); 464 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; 465 u32 csum_status = *(u32 *)skb->cb; 466 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; 467 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; 468 bool unicast, insert_ccmp_hdr = false; 469 u8 remove_pad, amsdu_info, band_idx; 470 u8 mode = 0, qos_ctl = 0; 471 bool hdr_trans; 472 u16 hdr_gap; 473 u16 seq_ctrl = 0; 474 __le16 fc = 0; 475 int idx; 476 u8 hw_aggr = false; 477 struct mt7996_sta *msta = NULL; 478 479 hw_aggr = status->aggr; 480 memset(status, 0, sizeof(*status)); 481 482 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); 483 mphy = dev->mt76.phys[band_idx]; 484 phy = mphy->priv; 485 status->phy_idx = mphy->band_idx; 486 487 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 488 return -EINVAL; 489 490 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 491 return -EINVAL; 492 493 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 494 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 495 return -EINVAL; 496 497 /* ICV error or CCMP/BIP/WPI MIC error */ 498 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 499 status->flag |= RX_FLAG_ONLY_MONITOR; 500 501 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 502 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 503 status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx); 504 505 if (status->wcid) { 506 struct mt7996_sta_link *msta_link; 507 508 msta_link = container_of(status->wcid, struct mt7996_sta_link, 509 wcid); 510 msta = msta_link->sta; 511 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 512 } 513 514 status->freq = mphy->chandef.chan->center_freq; 515 status->band = mphy->chandef.chan->band; 516 if (status->band == NL80211_BAND_5GHZ) 517 sband = &mphy->sband_5g.sband; 518 else if (status->band == NL80211_BAND_6GHZ) 519 sband = &mphy->sband_6g.sband; 520 else 521 sband = &mphy->sband_2g.sband; 522 523 if (!sband->channels) 524 return -EINVAL; 525 526 if ((rxd3 & csum_mask) == csum_mask && 527 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 528 skb->ip_summed = CHECKSUM_UNNECESSARY; 529 530 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR) 531 status->flag |= RX_FLAG_FAILED_FCS_CRC; 532 533 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 534 status->flag |= RX_FLAG_MMIC_ERROR; 535 536 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 537 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 538 status->flag |= RX_FLAG_DECRYPTED; 539 status->flag |= RX_FLAG_IV_STRIPPED; 540 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 541 } 542 543 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 544 545 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 546 return -EINVAL; 547 548 rxd += 8; 549 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 550 u32 v0 = le32_to_cpu(rxd[0]); 551 u32 v2 = le32_to_cpu(rxd[2]); 552 553 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); 554 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); 555 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); 556 557 rxd += 4; 558 if ((u8 *)rxd - skb->data >= skb->len) 559 return -EINVAL; 560 } 561 562 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 563 u8 *data = (u8 *)rxd; 564 565 if (status->flag & RX_FLAG_DECRYPTED) { 566 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 567 case MT_CIPHER_AES_CCMP: 568 case MT_CIPHER_CCMP_CCX: 569 case MT_CIPHER_CCMP_256: 570 insert_ccmp_hdr = 571 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 572 fallthrough; 573 case MT_CIPHER_TKIP: 574 case MT_CIPHER_TKIP_NO_MIC: 575 case MT_CIPHER_GCMP: 576 case MT_CIPHER_GCMP_256: 577 status->iv[0] = data[5]; 578 status->iv[1] = data[4]; 579 status->iv[2] = data[3]; 580 status->iv[3] = data[2]; 581 status->iv[4] = data[1]; 582 status->iv[5] = data[0]; 583 break; 584 default: 585 break; 586 } 587 } 588 rxd += 4; 589 if ((u8 *)rxd - skb->data >= skb->len) 590 return -EINVAL; 591 } 592 593 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 594 status->timestamp = le32_to_cpu(rxd[0]); 595 status->flag |= RX_FLAG_MACTIME_START; 596 597 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 598 status->flag |= RX_FLAG_AMPDU_DETAILS; 599 600 /* all subframes of an A-MPDU have the same timestamp */ 601 if (phy->rx_ampdu_ts != status->timestamp) { 602 if (!++phy->ampdu_ref) 603 phy->ampdu_ref++; 604 } 605 phy->rx_ampdu_ts = status->timestamp; 606 607 status->ampdu_ref = phy->ampdu_ref; 608 } 609 610 rxd += 4; 611 if ((u8 *)rxd - skb->data >= skb->len) 612 return -EINVAL; 613 } 614 615 /* RXD Group 3 - P-RXV */ 616 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 617 u32 v3; 618 int ret; 619 620 rxv = rxd; 621 rxd += 4; 622 if ((u8 *)rxd - skb->data >= skb->len) 623 return -EINVAL; 624 625 v3 = le32_to_cpu(rxv[3]); 626 627 status->chains = mphy->antenna_mask; 628 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); 629 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); 630 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); 631 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); 632 633 /* RXD Group 5 - C-RXV */ 634 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 635 rxd += 24; 636 if ((u8 *)rxd - skb->data >= skb->len) 637 return -EINVAL; 638 } 639 640 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode); 641 if (ret < 0) 642 return ret; 643 } 644 645 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 646 status->amsdu = !!amsdu_info; 647 if (status->amsdu) { 648 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 649 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 650 } 651 652 /* IEEE 802.11 fragmentation can only be applied to unicast frames. 653 * Hence, drop fragments with multicast/broadcast RA. 654 * This check fixes vulnerabilities, like CVE-2020-26145. 655 */ 656 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) && 657 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M) 658 return -EINVAL; 659 660 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 661 if (hdr_trans && ieee80211_has_morefrags(fc)) { 662 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) 663 return -EINVAL; 664 hdr_trans = false; 665 } else { 666 int pad_start = 0; 667 668 skb_pull(skb, hdr_gap); 669 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) { 670 pad_start = ieee80211_get_hdrlen_from_skb(skb); 671 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 672 /* When header translation failure is indicated, 673 * the hardware will insert an extra 2-byte field 674 * containing the data length after the protocol 675 * type field. This happens either when the LLC-SNAP 676 * pattern did not match, or if a VLAN header was 677 * detected. 678 */ 679 pad_start = 12; 680 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 681 pad_start += 4; 682 else 683 pad_start = 0; 684 } 685 686 if (pad_start) { 687 memmove(skb->data + 2, skb->data, pad_start); 688 skb_pull(skb, 2); 689 } 690 } 691 692 if (!hdr_trans) { 693 struct ieee80211_hdr *hdr; 694 695 if (insert_ccmp_hdr) { 696 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 697 698 mt76_insert_ccmp_hdr(skb, key_id); 699 } 700 701 hdr = mt76_skb_get_hdr(skb); 702 fc = hdr->frame_control; 703 if (ieee80211_is_data_qos(fc)) { 704 u8 *qos = ieee80211_get_qos_ctl(hdr); 705 706 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 707 qos_ctl = *qos; 708 709 /* Mesh DA/SA/Length will be stripped after hardware 710 * de-amsdu, so here needs to clear amsdu present bit 711 * to mark it as a normal mesh frame. 712 */ 713 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu) 714 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 715 } 716 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data); 717 } else { 718 status->flag |= RX_FLAG_8023; 719 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, 720 *info); 721 mt76_npu_check_ppe(&dev->mt76, skb, *info); 722 } 723 724 if (rxv && !(status->flag & RX_FLAG_8023)) { 725 switch (status->encoding) { 726 case RX_ENC_EHT: 727 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode); 728 break; 729 case RX_ENC_HE: 730 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); 731 break; 732 default: 733 break; 734 } 735 } 736 737 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr) 738 return 0; 739 740 status->aggr = unicast && 741 !ieee80211_is_qos_nullfunc(fc); 742 status->qos_ctl = qos_ctl; 743 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 744 745 return 0; 746 } 747 748 static void 749 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, 750 struct sk_buff *skb, struct mt76_wcid *wcid) 751 { 752 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 753 u8 fc_type, fc_stype; 754 u16 ethertype; 755 bool wmm = false; 756 u32 val; 757 758 if (wcid->sta) { 759 struct ieee80211_sta *sta = wcid_to_sta(wcid); 760 761 wmm = sta->wme; 762 } 763 764 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 765 FIELD_PREP(MT_TXD1_TID, tid); 766 767 ethertype = get_unaligned_be16(&skb->data[12]); 768 if (ethertype >= ETH_P_802_3_MIN) 769 val |= MT_TXD1_ETH_802_3; 770 771 txwi[1] |= cpu_to_le32(val); 772 773 fc_type = IEEE80211_FTYPE_DATA >> 2; 774 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 775 776 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 777 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 778 779 txwi[2] |= cpu_to_le32(val); 780 781 if (wcid->amsdu) 782 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU); 783 } 784 785 static void 786 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, 787 struct sk_buff *skb, 788 struct ieee80211_key_conf *key, 789 struct mt76_wcid *wcid) 790 { 791 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 792 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 793 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 794 bool multicast = is_multicast_ether_addr(hdr->addr1); 795 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 796 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl; 797 u16 seqno = le16_to_cpu(sc); 798 bool hw_bigtk = false; 799 u8 fc_type, fc_stype; 800 u32 val; 801 802 if (ieee80211_is_action(fc) && 803 mgmt->u.action.category == WLAN_CATEGORY_BACK && 804 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { 805 if (is_mt7990(&dev->mt76)) 806 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid)); 807 else 808 txwi[7] |= cpu_to_le32(MT_TXD7_MAC_TXD); 809 810 tid = MT_TX_ADDBA; 811 } else if (ieee80211_is_mgmt(hdr->frame_control)) { 812 tid = MT_TX_NORMAL; 813 } 814 815 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 816 FIELD_PREP(MT_TXD1_HDR_INFO, 817 ieee80211_get_hdrlen_from_skb(skb) / 2) | 818 FIELD_PREP(MT_TXD1_TID, tid); 819 820 if (!ieee80211_is_data(fc) || multicast || 821 info->flags & IEEE80211_TX_CTL_USE_MINRATE) 822 val |= MT_TXD1_FIXED_RATE; 823 824 if (is_mt7990(&dev->mt76) && ieee80211_is_beacon(fc) && 825 (wcid->hw_key_idx2 == 6 || wcid->hw_key_idx2 == 7)) 826 hw_bigtk = true; 827 828 if ((key && multicast && ieee80211_is_robust_mgmt_frame(skb)) || hw_bigtk) { 829 val |= MT_TXD1_BIP; 830 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 831 } 832 833 txwi[1] |= cpu_to_le32(val); 834 835 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 836 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 837 838 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 839 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 840 841 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc)) 842 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST); 843 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 844 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID); 845 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 846 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST); 847 else 848 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE); 849 850 txwi[2] |= cpu_to_le32(val); 851 852 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); 853 if (ieee80211_is_beacon(fc)) { 854 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 855 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 856 } 857 858 if (multicast && ieee80211_vif_is_mld(info->control.vif)) { 859 val = MT_TXD3_SN_VALID | 860 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 861 txwi[3] |= cpu_to_le32(val); 862 } 863 864 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 865 if (ieee80211_is_back_req(hdr->frame_control)) { 866 struct ieee80211_bar *bar; 867 868 bar = (struct ieee80211_bar *)skb->data; 869 seqno = le16_to_cpu(bar->start_seq_num); 870 } 871 872 val = MT_TXD3_SN_VALID | 873 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 874 txwi[3] |= cpu_to_le32(val); 875 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); 876 } 877 878 if (ieee80211_vif_is_mld(info->control.vif) && 879 (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))) 880 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 881 882 if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) && 883 ieee80211_vif_is_mld(info->control.vif)) { 884 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 885 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 886 } 887 888 if (!wcid->sta && ieee80211_is_mgmt(fc)) 889 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 890 } 891 892 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, 893 struct sk_buff *skb, struct mt76_wcid *wcid, 894 struct ieee80211_key_conf *key, int pid, 895 enum mt76_txq_id qid, u32 changed) 896 { 897 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 898 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 899 struct ieee80211_vif *vif = info->control.vif; 900 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 901 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 902 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 903 struct mt76_vif_link *mlink = NULL; 904 struct mt7996_vif *mvif; 905 unsigned int link_id; 906 u16 tx_count = 15; 907 u32 val; 908 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | 909 BSS_CHANGED_FILS_DISCOVERY)); 910 bool beacon = !!(changed & (BSS_CHANGED_BEACON | 911 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc); 912 913 if (wcid != &dev->mt76.global_wcid) 914 link_id = wcid->link_id; 915 else 916 link_id = u32_get_bits(info->control.flags, 917 IEEE80211_TX_CTRL_MLO_LINK); 918 919 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; 920 if (mvif) { 921 if (wcid->offchannel) 922 mlink = rcu_dereference(mvif->mt76.offchannel_link); 923 if (!mlink) 924 mlink = rcu_dereference(mvif->mt76.link[link_id]); 925 } 926 927 if (mlink) { 928 omac_idx = mlink->omac_idx; 929 wmm_idx = mlink->wmm_idx; 930 band_idx = mlink->band_idx; 931 } 932 933 if (inband_disc) { 934 p_fmt = MT_TX_TYPE_FW; 935 q_idx = MT_LMAC_ALTX0; 936 } else if (beacon) { 937 p_fmt = MT_TX_TYPE_FW; 938 q_idx = MT_LMAC_BCN0; 939 } else if (qid >= MT_TXQ_PSD) { 940 p_fmt = MT_TX_TYPE_CT; 941 q_idx = MT_LMAC_ALTX0; 942 } else { 943 p_fmt = MT_TX_TYPE_CT; 944 q_idx = wmm_idx * MT7996_MAX_WMM_SETS + 945 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); 946 } 947 948 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 949 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 950 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 951 txwi[0] = cpu_to_le32(val); 952 953 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 954 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 955 956 if (band_idx) 957 val |= FIELD_PREP(MT_TXD1_TGID, band_idx); 958 959 txwi[1] = cpu_to_le32(val); 960 txwi[2] = 0; 961 962 val = MT_TXD3_SW_POWER_MGMT | 963 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 964 if (key) 965 val |= MT_TXD3_PROTECT_FRAME; 966 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 967 val |= MT_TXD3_NO_ACK; 968 969 txwi[3] = cpu_to_le32(val); 970 txwi[4] = 0; 971 972 val = FIELD_PREP(MT_TXD5_PID, pid); 973 if (pid >= MT_PACKET_ID_FIRST) 974 val |= MT_TXD5_TX_STATUS_HOST; 975 txwi[5] = cpu_to_le32(val); 976 977 val = MT_TXD6_DAS | MT_TXD6_VTA; 978 if ((q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) || 979 skb->protocol == cpu_to_be16(ETH_P_PAE)) 980 val |= MT_TXD6_DIS_MAT; 981 982 if (is_mt7996(&dev->mt76)) 983 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1); 984 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control)) 985 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1); 986 987 txwi[6] = cpu_to_le32(val); 988 txwi[7] = 0; 989 990 if (is_8023) 991 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid); 992 else 993 mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid); 994 995 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { 996 bool mcast = ieee80211_is_data(hdr->frame_control) && 997 is_multicast_ether_addr(hdr->addr1); 998 u8 idx = MT7996_BASIC_RATES_TBL; 999 1000 if (mlink) { 1001 if (mcast && mlink->mcast_rates_idx) 1002 idx = mlink->mcast_rates_idx; 1003 else if (beacon && mlink->beacon_rates_idx) 1004 idx = mlink->beacon_rates_idx; 1005 else 1006 idx = mlink->basic_rates_idx; 1007 } 1008 1009 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW; 1010 if (mcast) 1011 val |= MT_TXD6_DIS_MAT; 1012 txwi[6] |= cpu_to_le32(val); 1013 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 1014 } 1015 } 1016 1017 static bool 1018 mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb) 1019 { 1020 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1021 1022 if (ieee80211_is_mgmt(hdr->frame_control)) 1023 return true; 1024 1025 /* for SDO to bypass specific data frame */ 1026 if (!mt7996_has_wa(dev)) { 1027 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) 1028 return true; 1029 1030 if (ieee80211_has_a4(hdr->frame_control) && 1031 !ieee80211_is_data_present(hdr->frame_control)) 1032 return true; 1033 } 1034 1035 return false; 1036 } 1037 1038 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1039 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1040 struct ieee80211_sta *sta, 1041 struct mt76_tx_info *tx_info) 1042 { 1043 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1044 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1045 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1046 struct ieee80211_key_conf *key = info->control.hw_key; 1047 struct ieee80211_vif *vif = info->control.vif; 1048 struct mt7996_vif *mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; 1049 struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL; 1050 struct mt76_vif_link *mlink = NULL; 1051 struct mt76_txwi_cache *t; 1052 int id, i, pid, nbuf = tx_info->nbuf - 1; 1053 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1054 __le32 *ptr = (__le32 *)txwi_ptr; 1055 u8 *txwi = (u8 *)txwi_ptr; 1056 u8 link_id; 1057 1058 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 1059 return -EINVAL; 1060 1061 if (!wcid) 1062 wcid = &dev->mt76.global_wcid; 1063 1064 if ((is_8023 || ieee80211_is_data_qos(hdr->frame_control)) && sta->mlo && 1065 likely(tx_info->skb->protocol != cpu_to_be16(ETH_P_PAE))) { 1066 u8 tid = tx_info->skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1067 1068 link_id = (tid % 2) ? msta->seclink_id : msta->deflink_id; 1069 } else { 1070 link_id = u32_get_bits(info->control.flags, 1071 IEEE80211_TX_CTRL_MLO_LINK); 1072 } 1073 1074 if (link_id != wcid->link_id && link_id != IEEE80211_LINK_UNSPECIFIED) { 1075 if (msta) { 1076 struct mt7996_sta_link *msta_link = 1077 rcu_dereference(msta->link[link_id]); 1078 1079 if (msta_link) 1080 wcid = &msta_link->wcid; 1081 } else if (mvif) { 1082 mlink = rcu_dereference(mvif->mt76.link[link_id]); 1083 if (mlink && mlink->wcid) 1084 wcid = mlink->wcid; 1085 } 1086 } 1087 1088 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 1089 t->skb = tx_info->skb; 1090 1091 id = mt76_token_consume(mdev, &t); 1092 if (id < 0) 1093 return id; 1094 1095 /* Since the rules of HW MLD address translation are not fully 1096 * compatible with 802.11 EAPOL frame, we do the translation by 1097 * software 1098 */ 1099 if (tx_info->skb->protocol == cpu_to_be16(ETH_P_PAE) && sta->mlo) { 1100 struct ieee80211_hdr *hdr = (void *)tx_info->skb->data; 1101 struct ieee80211_bss_conf *link_conf; 1102 struct ieee80211_link_sta *link_sta; 1103 1104 link_conf = rcu_dereference(vif->link_conf[wcid->link_id]); 1105 if (!link_conf) 1106 return -EINVAL; 1107 1108 link_sta = rcu_dereference(sta->link[wcid->link_id]); 1109 if (!link_sta) 1110 return -EINVAL; 1111 1112 dma_sync_single_for_cpu(mdev->dma_dev, tx_info->buf[1].addr, 1113 tx_info->buf[1].len, DMA_TO_DEVICE); 1114 1115 memcpy(hdr->addr1, link_sta->addr, ETH_ALEN); 1116 memcpy(hdr->addr2, link_conf->addr, ETH_ALEN); 1117 if (ieee80211_has_a4(hdr->frame_control)) { 1118 memcpy(hdr->addr3, sta->addr, ETH_ALEN); 1119 memcpy(hdr->addr4, vif->addr, ETH_ALEN); 1120 } else if (ieee80211_has_tods(hdr->frame_control)) { 1121 memcpy(hdr->addr3, sta->addr, ETH_ALEN); 1122 } else if (ieee80211_has_fromds(hdr->frame_control)) { 1123 memcpy(hdr->addr3, vif->addr, ETH_ALEN); 1124 } 1125 1126 dma_sync_single_for_device(mdev->dma_dev, tx_info->buf[1].addr, 1127 tx_info->buf[1].len, DMA_TO_DEVICE); 1128 } 1129 1130 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1131 memset(txwi_ptr, 0, MT_TXD_SIZE); 1132 /* Transmit non qos data by 802.11 header and need to fill txd by host*/ 1133 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1134 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 1135 pid, qid, 0); 1136 1137 /* MT7996 and MT7992 require driver to provide the MAC TXP for AddBA 1138 * req 1139 */ 1140 if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) { 1141 u32 val; 1142 1143 ptr = (__le32 *)(txwi + MT_TXD_SIZE); 1144 memset((void *)ptr, 0, sizeof(struct mt76_connac_fw_txp)); 1145 1146 val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) | 1147 MT_TXP0_TOKEN_ID0_VALID_MASK; 1148 ptr[0] = cpu_to_le32(val); 1149 1150 val = FIELD_PREP(MT_TXP1_TID_ADDBA, 1151 tx_info->skb->priority & 1152 IEEE80211_QOS_CTL_TID_MASK); 1153 ptr[1] = cpu_to_le32(val); 1154 ptr[2] = cpu_to_le32(tx_info->buf[1].addr & 0xFFFFFFFF); 1155 1156 val = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[1].len) | 1157 MT_TXP3_ML0_MASK; 1158 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1159 val |= FIELD_PREP(MT_TXP3_DMA_ADDR_H, 1160 tx_info->buf[1].addr >> 32); 1161 #endif 1162 ptr[3] = cpu_to_le32(val); 1163 } else { 1164 struct mt76_connac_txp_common *txp; 1165 1166 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE); 1167 for (i = 0; i < nbuf; i++) { 1168 u16 len; 1169 1170 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len); 1171 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1172 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H, 1173 tx_info->buf[i + 1].addr >> 32); 1174 #endif 1175 1176 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 1177 txp->fw.len[i] = cpu_to_le16(len); 1178 } 1179 txp->fw.nbuf = nbuf; 1180 1181 txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST); 1182 1183 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1184 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD); 1185 1186 if (!key) 1187 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 1188 1189 if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb)) 1190 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 1191 1192 if (mvif) { 1193 if (wcid->offchannel) 1194 mlink = rcu_dereference(mvif->mt76.offchannel_link); 1195 if (!mlink) 1196 mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]); 1197 1198 txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx; 1199 } 1200 1201 txp->fw.token = cpu_to_le16(id); 1202 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff); 1203 } 1204 1205 tx_info->skb = NULL; 1206 1207 /* pass partial skb header to fw */ 1208 tx_info->buf[1].len = MT_CT_PARSE_LEN; 1209 tx_info->buf[1].skip_unmap = true; 1210 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 1211 1212 return 0; 1213 } 1214 1215 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 1216 { 1217 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 1218 __le32 *txwi = ptr; 1219 u32 val; 1220 1221 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 1222 1223 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 1224 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 1225 txwi[0] = cpu_to_le32(val); 1226 1227 val = BIT(31) | 1228 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 1229 txwi[1] = cpu_to_le32(val); 1230 1231 txp->token = cpu_to_le16(token_id); 1232 txp->nbuf = 1; 1233 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 1234 1235 return MT_TXD_SIZE + sizeof(*txp); 1236 } 1237 1238 static void 1239 mt7996_tx_check_aggr(struct ieee80211_link_sta *link_sta, 1240 struct mt76_wcid *wcid, struct sk_buff *skb) 1241 { 1242 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1243 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1244 u16 fc, tid; 1245 1246 if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) 1247 return; 1248 1249 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1250 if (tid >= 6) /* skip VO queue */ 1251 return; 1252 1253 if (is_8023) { 1254 fc = IEEE80211_FTYPE_DATA | 1255 (link_sta->sta->wme ? IEEE80211_STYPE_QOS_DATA 1256 : IEEE80211_STYPE_DATA); 1257 } else { 1258 /* No need to get precise TID for Action/Management Frame, 1259 * since it will not meet the following Frame Control 1260 * condition anyway. 1261 */ 1262 1263 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1264 1265 fc = le16_to_cpu(hdr->frame_control) & 1266 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); 1267 } 1268 1269 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1270 return; 1271 1272 if (!test_and_set_bit(tid, &wcid->ampdu_state)) 1273 ieee80211_start_tx_ba_session(link_sta->sta, tid, 0); 1274 } 1275 1276 static void 1277 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, 1278 struct ieee80211_link_sta *link_sta, 1279 struct mt76_wcid *wcid, struct list_head *free_list) 1280 { 1281 struct mt76_dev *mdev = &dev->mt76; 1282 __le32 *txwi; 1283 u16 wcid_idx; 1284 1285 mt76_connac_txp_skb_unmap(mdev, t); 1286 if (!t->skb) 1287 goto out; 1288 1289 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 1290 if (link_sta) { 1291 wcid_idx = wcid->idx; 1292 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) { 1293 struct mt7996_sta *msta; 1294 1295 /* AMPDU state is stored in the primary link */ 1296 msta = (void *)link_sta->sta->drv_priv; 1297 mt7996_tx_check_aggr(link_sta, &msta->deflink.wcid, 1298 t->skb); 1299 } 1300 } else { 1301 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); 1302 } 1303 1304 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 1305 1306 out: 1307 t->skb = NULL; 1308 mt76_put_txwi(mdev, t); 1309 } 1310 1311 static void 1312 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) 1313 { 1314 __le32 *tx_free = (__le32 *)data, *cur_info; 1315 struct mt76_dev *mdev = &dev->mt76; 1316 struct mt76_phy *phy2 = mdev->phys[MT_BAND1]; 1317 struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; 1318 struct ieee80211_link_sta *link_sta = NULL; 1319 struct mt76_txwi_cache *txwi; 1320 struct mt76_wcid *wcid = NULL; 1321 LIST_HEAD(free_list); 1322 struct sk_buff *skb, *tmp; 1323 void *end = data + len; 1324 bool wake = false; 1325 u16 total, count = 0; 1326 u8 ver; 1327 1328 /* clean DMA queues and unmap buffers first */ 1329 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1330 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1331 if (phy2) { 1332 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false); 1333 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false); 1334 } 1335 if (phy3) { 1336 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false); 1337 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); 1338 } 1339 1340 ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER); 1341 if (WARN_ON_ONCE(ver < 5)) 1342 return; 1343 1344 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); 1345 for (cur_info = &tx_free[2]; count < total; cur_info++) { 1346 u32 msdu, info; 1347 u8 i; 1348 1349 if (WARN_ON_ONCE((void *)cur_info >= end)) 1350 return; 1351 /* 1'b1: new wcid pair. 1352 * 1'b0: msdu_id with the same 'wcid pair' as above. 1353 */ 1354 info = le32_to_cpu(*cur_info); 1355 if (info & MT_TXFREE_INFO_PAIR) { 1356 struct ieee80211_sta *sta; 1357 unsigned long valid_links; 1358 struct mt7996_sta *msta; 1359 unsigned int id; 1360 u16 idx; 1361 1362 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); 1363 wcid = mt76_wcid_ptr(dev, idx); 1364 sta = wcid_to_sta(wcid); 1365 if (!sta) { 1366 link_sta = NULL; 1367 goto next; 1368 } 1369 1370 link_sta = rcu_dereference(sta->link[wcid->link_id]); 1371 if (!link_sta) 1372 goto next; 1373 1374 msta = (struct mt7996_sta *)sta->drv_priv; 1375 valid_links = sta->valid_links ?: BIT(0); 1376 1377 /* For MLD STA, add all link's wcid to sta_poll_list */ 1378 for_each_set_bit(id, &valid_links, 1379 IEEE80211_MLD_MAX_NUM_LINKS) { 1380 struct mt7996_sta_link *msta_link; 1381 1382 msta_link = rcu_dereference(msta->link[id]); 1383 if (!msta_link) 1384 continue; 1385 1386 mt76_wcid_add_poll(&dev->mt76, 1387 &msta_link->wcid); 1388 } 1389 next: 1390 /* ver 7 has a new DW with pair = 1, skip it */ 1391 if (ver == 7 && ((void *)(cur_info + 1) < end) && 1392 (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR)) 1393 cur_info++; 1394 continue; 1395 } else if (info & MT_TXFREE_INFO_HEADER) { 1396 u32 tx_retries = 0, tx_failed = 0; 1397 1398 if (!wcid) 1399 continue; 1400 1401 tx_retries = 1402 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; 1403 tx_failed = tx_retries + 1404 !!FIELD_GET(MT_TXFREE_INFO_STAT, info); 1405 1406 wcid->stats.tx_retries += tx_retries; 1407 wcid->stats.tx_failed += tx_failed; 1408 continue; 1409 } 1410 1411 for (i = 0; i < 2; i++) { 1412 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; 1413 if (msdu == MT_TXFREE_INFO_MSDU_ID) 1414 continue; 1415 1416 count++; 1417 txwi = mt76_token_release(mdev, msdu, &wake); 1418 if (!txwi) 1419 continue; 1420 1421 mt7996_txwi_free(dev, txwi, link_sta, wcid, 1422 &free_list); 1423 } 1424 } 1425 1426 mt7996_mac_sta_poll(dev); 1427 1428 if (wake) 1429 mt76_set_tx_blocked(&dev->mt76, false); 1430 1431 mt76_worker_schedule(&dev->mt76.tx_worker); 1432 1433 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1434 skb_list_del_init(skb); 1435 napi_consume_skb(skb, 1); 1436 } 1437 } 1438 1439 static bool 1440 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, 1441 int pid, __le32 *txs_data) 1442 { 1443 struct mt76_sta_stats *stats = &wcid->stats; 1444 struct ieee80211_supported_band *sband; 1445 struct mt76_dev *mdev = &dev->mt76; 1446 struct mt76_phy *mphy; 1447 struct ieee80211_tx_info *info; 1448 struct sk_buff_head list; 1449 struct rate_info rate = {}; 1450 struct sk_buff *skb = NULL; 1451 bool cck = false; 1452 u32 txrate, txs, mode, stbc; 1453 1454 txs = le32_to_cpu(txs_data[0]); 1455 1456 mt76_tx_status_lock(mdev, &list); 1457 1458 /* only report MPDU TXS */ 1459 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) { 1460 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); 1461 if (skb) { 1462 info = IEEE80211_SKB_CB(skb); 1463 if (!(txs & MT_TXS0_ACK_ERROR_MASK)) 1464 info->flags |= IEEE80211_TX_STAT_ACK; 1465 1466 info->status.ampdu_len = 1; 1467 info->status.ampdu_ack_len = 1468 !!(info->flags & IEEE80211_TX_STAT_ACK); 1469 1470 info->status.rates[0].idx = -1; 1471 } 1472 } 1473 1474 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) { 1475 struct ieee80211_sta *sta; 1476 u8 tid; 1477 1478 sta = wcid_to_sta(wcid); 1479 tid = FIELD_GET(MT_TXS0_TID, txs); 1480 ieee80211_refresh_tx_agg_session_timer(sta, tid); 1481 } 1482 1483 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1484 1485 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); 1486 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; 1487 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); 1488 1489 if (stbc && rate.nss > 1) 1490 rate.nss >>= 1; 1491 1492 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) 1493 stats->tx_nss[rate.nss - 1]++; 1494 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) 1495 stats->tx_mcs[rate.mcs]++; 1496 1497 mode = FIELD_GET(MT_TX_RATE_MODE, txrate); 1498 switch (mode) { 1499 case MT_PHY_TYPE_CCK: 1500 cck = true; 1501 fallthrough; 1502 case MT_PHY_TYPE_OFDM: 1503 mphy = mt76_dev_phy(mdev, wcid->phy_idx); 1504 1505 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1506 sband = &mphy->sband_5g.sband; 1507 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 1508 sband = &mphy->sband_6g.sband; 1509 else 1510 sband = &mphy->sband_2g.sband; 1511 1512 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); 1513 rate.legacy = sband->bitrates[rate.mcs].bitrate; 1514 break; 1515 case MT_PHY_TYPE_HT: 1516 case MT_PHY_TYPE_HT_GF: 1517 if (rate.mcs > 31) 1518 goto out; 1519 1520 rate.flags = RATE_INFO_FLAGS_MCS; 1521 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1522 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1523 break; 1524 case MT_PHY_TYPE_VHT: 1525 if (rate.mcs > 9) 1526 goto out; 1527 1528 rate.flags = RATE_INFO_FLAGS_VHT_MCS; 1529 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1530 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1531 break; 1532 case MT_PHY_TYPE_HE_SU: 1533 case MT_PHY_TYPE_HE_EXT_SU: 1534 case MT_PHY_TYPE_HE_TB: 1535 case MT_PHY_TYPE_HE_MU: 1536 if (rate.mcs > 11) 1537 goto out; 1538 1539 rate.he_gi = wcid->rate.he_gi; 1540 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); 1541 rate.flags = RATE_INFO_FLAGS_HE_MCS; 1542 break; 1543 case MT_PHY_TYPE_EHT_SU: 1544 case MT_PHY_TYPE_EHT_TRIG: 1545 case MT_PHY_TYPE_EHT_MU: 1546 if (rate.mcs > 13) 1547 goto out; 1548 1549 rate.eht_gi = wcid->rate.eht_gi; 1550 rate.flags = RATE_INFO_FLAGS_EHT_MCS; 1551 break; 1552 default: 1553 goto out; 1554 } 1555 1556 stats->tx_mode[mode]++; 1557 1558 switch (FIELD_GET(MT_TXS0_BW, txs)) { 1559 case IEEE80211_STA_RX_BW_320: 1560 rate.bw = RATE_INFO_BW_320; 1561 stats->tx_bw[4]++; 1562 break; 1563 case IEEE80211_STA_RX_BW_160: 1564 rate.bw = RATE_INFO_BW_160; 1565 stats->tx_bw[3]++; 1566 break; 1567 case IEEE80211_STA_RX_BW_80: 1568 rate.bw = RATE_INFO_BW_80; 1569 stats->tx_bw[2]++; 1570 break; 1571 case IEEE80211_STA_RX_BW_40: 1572 rate.bw = RATE_INFO_BW_40; 1573 stats->tx_bw[1]++; 1574 break; 1575 default: 1576 rate.bw = RATE_INFO_BW_20; 1577 stats->tx_bw[0]++; 1578 break; 1579 } 1580 wcid->rate = rate; 1581 1582 out: 1583 if (skb) 1584 mt76_tx_status_skb_done(mdev, skb, &list); 1585 mt76_tx_status_unlock(mdev, &list); 1586 1587 return !!skb; 1588 } 1589 1590 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) 1591 { 1592 struct mt7996_sta_link *msta_link; 1593 struct mt76_wcid *wcid; 1594 __le32 *txs_data = data; 1595 u16 wcidx; 1596 u8 pid; 1597 1598 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1599 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1600 1601 if (pid < MT_PACKET_ID_NO_SKB) 1602 return; 1603 1604 rcu_read_lock(); 1605 1606 wcid = mt76_wcid_ptr(dev, wcidx); 1607 if (!wcid) 1608 goto out; 1609 1610 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data); 1611 1612 if (!wcid->sta) 1613 goto out; 1614 1615 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 1616 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 1617 1618 out: 1619 rcu_read_unlock(); 1620 } 1621 1622 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) 1623 { 1624 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1625 __le32 *rxd = (__le32 *)data; 1626 __le32 *end = (__le32 *)&rxd[len / 4]; 1627 enum rx_pkt_type type; 1628 1629 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1630 if (type != PKT_TYPE_NORMAL) { 1631 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1632 1633 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1634 MT_RXD0_SW_PKT_TYPE_FRAME)) 1635 return true; 1636 } 1637 1638 switch (type) { 1639 case PKT_TYPE_TXRX_NOTIFY: 1640 mt7996_mac_tx_free(dev, data, len); 1641 return false; 1642 case PKT_TYPE_TXS: 1643 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1644 mt7996_mac_add_txs(dev, rxd); 1645 return false; 1646 case PKT_TYPE_RX_FW_MONITOR: 1647 mt7996_debugfs_rx_fw_monitor(dev, data, len); 1648 return false; 1649 default: 1650 return true; 1651 } 1652 } 1653 1654 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1655 struct sk_buff *skb, u32 *info) 1656 { 1657 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1658 __le32 *rxd = (__le32 *)skb->data; 1659 __le32 *end = (__le32 *)&skb->data[skb->len]; 1660 enum rx_pkt_type type; 1661 1662 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1663 if (type != PKT_TYPE_NORMAL) { 1664 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1665 1666 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1667 MT_RXD0_SW_PKT_TYPE_FRAME)) 1668 type = PKT_TYPE_NORMAL; 1669 } 1670 1671 switch (type) { 1672 case PKT_TYPE_TXRX_NOTIFY: 1673 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) && 1674 q == MT_RXQ_TXFREE_BAND2) { 1675 dev_kfree_skb(skb); 1676 break; 1677 } 1678 1679 mt7996_mac_tx_free(dev, skb->data, skb->len); 1680 napi_consume_skb(skb, 1); 1681 break; 1682 case PKT_TYPE_RX_EVENT: 1683 mt7996_mcu_rx_event(dev, skb); 1684 break; 1685 case PKT_TYPE_TXS: 1686 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1687 mt7996_mac_add_txs(dev, rxd); 1688 dev_kfree_skb(skb); 1689 break; 1690 case PKT_TYPE_RX_FW_MONITOR: 1691 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1692 dev_kfree_skb(skb); 1693 break; 1694 case PKT_TYPE_NORMAL: 1695 if (!mt7996_mac_fill_rx(dev, q, skb, info)) { 1696 mt76_rx(&dev->mt76, q, skb); 1697 return; 1698 } 1699 fallthrough; 1700 default: 1701 dev_kfree_skb(skb); 1702 break; 1703 } 1704 } 1705 1706 static struct mt7996_msdu_page * 1707 mt7996_msdu_page_get_from_cache(struct mt7996_dev *dev) 1708 { 1709 struct mt7996_msdu_page *p = NULL; 1710 1711 spin_lock(&dev->wed_rro.lock); 1712 1713 if (!list_empty(&dev->wed_rro.page_cache)) { 1714 p = list_first_entry(&dev->wed_rro.page_cache, 1715 struct mt7996_msdu_page, list); 1716 list_del(&p->list); 1717 } 1718 1719 spin_unlock(&dev->wed_rro.lock); 1720 1721 return p; 1722 } 1723 1724 static struct mt7996_msdu_page *mt7996_msdu_page_get(struct mt7996_dev *dev) 1725 { 1726 struct mt7996_msdu_page *p; 1727 1728 p = mt7996_msdu_page_get_from_cache(dev); 1729 if (!p) { 1730 p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC); 1731 if (p) 1732 INIT_LIST_HEAD(&p->list); 1733 } 1734 1735 return p; 1736 } 1737 1738 static void mt7996_msdu_page_put_to_cache(struct mt7996_dev *dev, 1739 struct mt7996_msdu_page *p) 1740 { 1741 if (p->buf) { 1742 mt76_put_page_pool_buf(p->buf, false); 1743 p->buf = NULL; 1744 } 1745 1746 spin_lock(&dev->wed_rro.lock); 1747 list_add(&p->list, &dev->wed_rro.page_cache); 1748 spin_unlock(&dev->wed_rro.lock); 1749 } 1750 1751 static void mt7996_msdu_page_free_cache(struct mt7996_dev *dev) 1752 { 1753 while (true) { 1754 struct mt7996_msdu_page *p; 1755 1756 p = mt7996_msdu_page_get_from_cache(dev); 1757 if (!p) 1758 break; 1759 1760 if (p->buf) 1761 mt76_put_page_pool_buf(p->buf, false); 1762 1763 kfree(p); 1764 } 1765 } 1766 1767 static u32 mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr) 1768 { 1769 u32 val = 0; 1770 int i = 0; 1771 1772 while (dma_addr) { 1773 val += (u32)((dma_addr & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE; 1774 dma_addr >>= 8; 1775 i += 13; 1776 } 1777 1778 return val % MT7996_RRO_MSDU_PG_HASH_SIZE; 1779 } 1780 1781 static struct mt7996_msdu_page * 1782 mt7996_rro_msdu_page_get(struct mt7996_dev *dev, dma_addr_t dma_addr) 1783 { 1784 u32 hash = mt7996_msdu_page_hash_from_addr(dma_addr); 1785 struct mt7996_msdu_page *p, *tmp, *addr = NULL; 1786 1787 spin_lock(&dev->wed_rro.lock); 1788 1789 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[hash], 1790 list) { 1791 if (p->dma_addr == dma_addr) { 1792 list_del(&p->list); 1793 addr = p; 1794 break; 1795 } 1796 } 1797 1798 spin_unlock(&dev->wed_rro.lock); 1799 1800 return addr; 1801 } 1802 1803 static void mt7996_rx_token_put(struct mt7996_dev *dev) 1804 { 1805 int i; 1806 1807 for (i = 0; i < dev->mt76.rx_token_size; i++) { 1808 struct mt76_txwi_cache *t; 1809 1810 t = mt76_rx_token_release(&dev->mt76, i); 1811 if (!t || !t->ptr) 1812 continue; 1813 1814 mt76_put_page_pool_buf(t->ptr, false); 1815 t->dma_addr = 0; 1816 t->ptr = NULL; 1817 1818 mt76_put_rxwi(&dev->mt76, t); 1819 } 1820 } 1821 1822 void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev) 1823 { 1824 struct mt7996_msdu_page *p, *tmp; 1825 int i; 1826 1827 local_bh_disable(); 1828 1829 for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++) { 1830 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[i], 1831 list) { 1832 list_del_init(&p->list); 1833 if (p->buf) 1834 mt76_put_page_pool_buf(p->buf, false); 1835 kfree(p); 1836 } 1837 } 1838 mt7996_msdu_page_free_cache(dev); 1839 1840 local_bh_enable(); 1841 1842 mt7996_rx_token_put(dev); 1843 } 1844 1845 int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q, 1846 dma_addr_t dma_addr, void *data) 1847 { 1848 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1849 struct mt7996_msdu_page_info *pinfo = data; 1850 struct mt7996_msdu_page *p; 1851 u32 hash; 1852 1853 pinfo->data |= cpu_to_le32(FIELD_PREP(MSDU_PAGE_INFO_OWNER_MASK, 1)); 1854 p = mt7996_msdu_page_get(dev); 1855 if (!p) 1856 return -ENOMEM; 1857 1858 p->buf = data; 1859 p->dma_addr = dma_addr; 1860 p->q = q; 1861 1862 hash = mt7996_msdu_page_hash_from_addr(dma_addr); 1863 1864 spin_lock(&dev->wed_rro.lock); 1865 list_add_tail(&p->list, &dev->wed_rro.page_map[hash]); 1866 spin_unlock(&dev->wed_rro.lock); 1867 1868 return 0; 1869 } 1870 1871 static struct mt7996_wed_rro_addr * 1872 mt7996_rro_addr_elem_get(struct mt7996_dev *dev, u16 session_id, u16 seq_num) 1873 { 1874 u32 idx = 0; 1875 void *addr; 1876 1877 if (session_id == MT7996_RRO_MAX_SESSION) { 1878 addr = dev->wed_rro.session.ptr; 1879 } else { 1880 idx = session_id / MT7996_RRO_BA_BITMAP_SESSION_SIZE; 1881 addr = dev->wed_rro.addr_elem[idx].ptr; 1882 1883 idx = session_id % MT7996_RRO_BA_BITMAP_SESSION_SIZE; 1884 idx = idx * MT7996_RRO_WINDOW_MAX_LEN; 1885 } 1886 idx += seq_num % MT7996_RRO_WINDOW_MAX_LEN; 1887 1888 return addr + idx * sizeof(struct mt7996_wed_rro_addr); 1889 } 1890 1891 #define MT996_RRO_SN_MASK GENMASK(11, 0) 1892 1893 void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data) 1894 { 1895 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1896 struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data; 1897 u32 cmd_data0 = le32_to_cpu(cmd->data0); 1898 u32 cmd_data1 = le32_to_cpu(cmd->data1); 1899 u8 ind_reason = FIELD_GET(RRO_IND_DATA0_IND_REASON_MASK, cmd_data0); 1900 u16 start_seq = FIELD_GET(RRO_IND_DATA0_START_SEQ_MASK, cmd_data0); 1901 u16 seq_id = FIELD_GET(RRO_IND_DATA0_SEQ_ID_MASK, cmd_data0); 1902 u16 ind_count = FIELD_GET(RRO_IND_DATA1_IND_COUNT_MASK, cmd_data1); 1903 struct mt7996_msdu_page_info *pinfo = NULL; 1904 struct mt7996_msdu_page *p = NULL; 1905 int i, seq_num = 0; 1906 1907 for (i = 0; i < ind_count; i++) { 1908 struct mt7996_wed_rro_addr *e; 1909 struct mt76_rx_status *status; 1910 struct mt7996_rro_hif *rxd; 1911 int j, len, qid, data_len; 1912 struct mt76_txwi_cache *t; 1913 dma_addr_t dma_addr = 0; 1914 u16 rx_token_id, count; 1915 struct mt76_queue *q; 1916 struct sk_buff *skb; 1917 u32 info = 0, data; 1918 u8 signature; 1919 void *buf; 1920 bool ls; 1921 1922 seq_num = FIELD_GET(MT996_RRO_SN_MASK, start_seq + i); 1923 e = mt7996_rro_addr_elem_get(dev, seq_id, seq_num); 1924 data = le32_to_cpu(e->data); 1925 signature = FIELD_GET(WED_RRO_ADDR_SIGNATURE_MASK, data); 1926 if (signature != (seq_num / MT7996_RRO_WINDOW_MAX_LEN)) { 1927 u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 1928 0xff); 1929 1930 e->data |= cpu_to_le32(val); 1931 goto update_ack_seq_num; 1932 } 1933 1934 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1935 dma_addr = FIELD_GET(WED_RRO_ADDR_HEAD_HIGH_MASK, data); 1936 dma_addr <<= 32; 1937 #endif 1938 dma_addr |= le32_to_cpu(e->head_low); 1939 1940 count = FIELD_GET(WED_RRO_ADDR_COUNT_MASK, data); 1941 for (j = 0; j < count; j++) { 1942 if (!p) { 1943 p = mt7996_rro_msdu_page_get(dev, dma_addr); 1944 if (!p) 1945 continue; 1946 1947 dma_sync_single_for_cpu(mdev->dma_dev, p->dma_addr, 1948 SKB_WITH_OVERHEAD(p->q->buf_size), 1949 page_pool_get_dma_dir(p->q->page_pool)); 1950 pinfo = (struct mt7996_msdu_page_info *)p->buf; 1951 } 1952 1953 rxd = &pinfo->rxd[j % MT7996_MAX_HIF_RXD_IN_PG]; 1954 len = FIELD_GET(RRO_HIF_DATA1_SDL_MASK, 1955 le32_to_cpu(rxd->data1)); 1956 1957 rx_token_id = FIELD_GET(RRO_HIF_DATA4_RX_TOKEN_ID_MASK, 1958 le32_to_cpu(rxd->data4)); 1959 t = mt76_rx_token_release(mdev, rx_token_id); 1960 if (!t) 1961 goto next_page; 1962 1963 qid = t->qid; 1964 buf = t->ptr; 1965 q = &mdev->q_rx[qid]; 1966 dma_sync_single_for_cpu(mdev->dma_dev, t->dma_addr, 1967 SKB_WITH_OVERHEAD(q->buf_size), 1968 page_pool_get_dma_dir(q->page_pool)); 1969 1970 t->dma_addr = 0; 1971 t->ptr = NULL; 1972 mt76_put_rxwi(mdev, t); 1973 if (!buf) 1974 goto next_page; 1975 1976 if (q->rx_head) 1977 data_len = q->buf_size; 1978 else 1979 data_len = SKB_WITH_OVERHEAD(q->buf_size); 1980 1981 if (data_len < len + q->buf_offset) { 1982 dev_kfree_skb(q->rx_head); 1983 mt76_put_page_pool_buf(buf, false); 1984 q->rx_head = NULL; 1985 goto next_page; 1986 } 1987 1988 ls = FIELD_GET(RRO_HIF_DATA1_LS_MASK, 1989 le32_to_cpu(rxd->data1)); 1990 if (q->rx_head) { 1991 /* TODO: Take into account non-linear skb. */ 1992 mt76_put_page_pool_buf(buf, false); 1993 if (ls) { 1994 dev_kfree_skb(q->rx_head); 1995 q->rx_head = NULL; 1996 } 1997 goto next_page; 1998 } 1999 2000 if (ls && !mt7996_rx_check(mdev, buf, len)) 2001 goto next_page; 2002 2003 skb = build_skb(buf, q->buf_size); 2004 if (!skb) 2005 goto next_page; 2006 2007 skb_reserve(skb, q->buf_offset); 2008 skb_mark_for_recycle(skb); 2009 __skb_put(skb, len); 2010 2011 if (ind_reason == 1 || ind_reason == 2) { 2012 dev_kfree_skb(skb); 2013 goto next_page; 2014 } 2015 2016 if (!ls) { 2017 q->rx_head = skb; 2018 goto next_page; 2019 } 2020 2021 status = (struct mt76_rx_status *)skb->cb; 2022 if (seq_id != MT7996_RRO_MAX_SESSION) 2023 status->aggr = true; 2024 2025 mt7996_queue_rx_skb(mdev, qid, skb, &info); 2026 next_page: 2027 if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) { 2028 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2029 dma_addr = 2030 FIELD_GET(MSDU_PAGE_INFO_PG_HIGH_MASK, 2031 le32_to_cpu(pinfo->data)); 2032 dma_addr <<= 32; 2033 dma_addr |= le32_to_cpu(pinfo->pg_low); 2034 #else 2035 dma_addr = le32_to_cpu(pinfo->pg_low); 2036 #endif 2037 mt7996_msdu_page_put_to_cache(dev, p); 2038 p = NULL; 2039 } 2040 } 2041 2042 update_ack_seq_num: 2043 if ((i + 1) % 4 == 0) 2044 mt76_wr(dev, MT_RRO_ACK_SN_CTRL, 2045 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, 2046 seq_id) | 2047 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, 2048 seq_num)); 2049 if (p) { 2050 mt7996_msdu_page_put_to_cache(dev, p); 2051 p = NULL; 2052 } 2053 } 2054 2055 /* Update ack_seq_num for remaining addr_elem */ 2056 if (i % 4) 2057 mt76_wr(dev, MT_RRO_ACK_SN_CTRL, 2058 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, seq_id) | 2059 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, seq_num)); 2060 } 2061 2062 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy) 2063 { 2064 struct mt7996_dev *dev = phy->dev; 2065 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx); 2066 2067 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN); 2068 mt76_set(dev, reg, BIT(11) | BIT(9)); 2069 } 2070 2071 void mt7996_mac_reset_counters(struct mt7996_phy *phy) 2072 { 2073 struct mt7996_dev *dev = phy->dev; 2074 u8 band_idx = phy->mt76->band_idx; 2075 int i; 2076 2077 for (i = 0; i < 16; i++) 2078 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2079 2080 phy->mt76->survey_time = ktime_get_boottime(); 2081 2082 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 2083 2084 /* reset airtime counters */ 2085 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx), 2086 MT_WF_RMAC_MIB_RXTIME_CLR); 2087 2088 mt7996_mcu_get_chan_mib_info(phy, true); 2089 } 2090 2091 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy) 2092 { 2093 s16 coverage_class = phy->coverage_class; 2094 struct mt7996_dev *dev = phy->dev; 2095 struct mt7996_phy *phy2 = mt7996_phy2(dev); 2096 struct mt7996_phy *phy3 = mt7996_phy3(dev); 2097 u32 reg_offset; 2098 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 2099 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 2100 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 2101 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 2102 u8 band_idx = phy->mt76->band_idx; 2103 int offset; 2104 2105 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 2106 return; 2107 2108 if (phy2) 2109 coverage_class = max_t(s16, dev->phy.coverage_class, 2110 phy2->coverage_class); 2111 2112 if (phy3) 2113 coverage_class = max_t(s16, coverage_class, 2114 phy3->coverage_class); 2115 2116 offset = 3 * coverage_class; 2117 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 2118 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 2119 2120 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset); 2121 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset); 2122 } 2123 2124 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band) 2125 { 2126 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band), 2127 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY | 2128 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR); 2129 2130 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band), 2131 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5)); 2132 } 2133 2134 static u8 2135 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx) 2136 { 2137 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 2138 struct mt7996_dev *dev = phy->dev; 2139 u32 val, sum = 0, n = 0; 2140 int ant, i; 2141 2142 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) { 2143 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant); 2144 2145 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 2146 val = mt76_rr(dev, reg); 2147 sum += val * nf_power[i]; 2148 n += val; 2149 } 2150 } 2151 2152 return n ? sum / n : 0; 2153 } 2154 2155 void mt7996_update_channel(struct mt76_phy *mphy) 2156 { 2157 struct mt7996_phy *phy = mphy->priv; 2158 struct mt76_channel_state *state = mphy->chan_state; 2159 int nf; 2160 2161 mt7996_mcu_get_chan_mib_info(phy, false); 2162 2163 nf = mt7996_phy_get_nf(phy, mphy->band_idx); 2164 if (!phy->noise) 2165 phy->noise = nf << 4; 2166 else if (nf) 2167 phy->noise += nf - (phy->noise >> 4); 2168 2169 state->noise = -(phy->noise >> 4); 2170 } 2171 2172 static bool 2173 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state) 2174 { 2175 bool ret; 2176 2177 ret = wait_event_timeout(dev->reset_wait, 2178 (READ_ONCE(dev->recovery.state) & state), 2179 MT7996_RESET_TIMEOUT); 2180 2181 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 2182 return ret; 2183 } 2184 2185 static void 2186 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 2187 { 2188 struct ieee80211_bss_conf *link_conf; 2189 struct mt7996_phy *phy = priv; 2190 struct mt7996_dev *dev = phy->dev; 2191 unsigned int link_id; 2192 2193 2194 switch (vif->type) { 2195 case NL80211_IFTYPE_MESH_POINT: 2196 case NL80211_IFTYPE_ADHOC: 2197 case NL80211_IFTYPE_AP: 2198 break; 2199 default: 2200 return; 2201 } 2202 2203 for_each_vif_active_link(vif, link_conf, link_id) { 2204 struct mt7996_vif_link *link; 2205 2206 link = mt7996_vif_link(dev, vif, link_id); 2207 if (!link || link->phy != phy) 2208 continue; 2209 2210 mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf, 2211 link_conf->enable_beacon); 2212 } 2213 } 2214 2215 void mt7996_mac_update_beacons(struct mt7996_phy *phy) 2216 { 2217 ieee80211_iterate_active_interfaces(phy->mt76->hw, 2218 IEEE80211_IFACE_ITER_RESUME_ALL, 2219 mt7996_update_vif_beacon, phy); 2220 } 2221 2222 static void 2223 mt7996_update_beacons(struct mt7996_dev *dev) 2224 { 2225 struct mt76_phy *phy2, *phy3; 2226 2227 mt7996_mac_update_beacons(&dev->phy); 2228 2229 phy2 = dev->mt76.phys[MT_BAND1]; 2230 if (phy2) 2231 mt7996_mac_update_beacons(phy2->priv); 2232 2233 phy3 = dev->mt76.phys[MT_BAND2]; 2234 if (phy3) 2235 mt7996_mac_update_beacons(phy3->priv); 2236 } 2237 2238 void mt7996_tx_token_put(struct mt7996_dev *dev) 2239 { 2240 struct mt76_txwi_cache *txwi; 2241 int id; 2242 2243 spin_lock_bh(&dev->mt76.token_lock); 2244 idr_for_each_entry(&dev->mt76.token, txwi, id) { 2245 mt7996_txwi_free(dev, txwi, NULL, NULL, NULL); 2246 dev->mt76.token_count--; 2247 } 2248 spin_unlock_bh(&dev->mt76.token_lock); 2249 idr_destroy(&dev->mt76.token); 2250 } 2251 2252 static int 2253 mt7996_mac_restart(struct mt7996_dev *dev) 2254 { 2255 struct mt76_dev *mdev = &dev->mt76; 2256 struct mt7996_phy *phy; 2257 int i, ret; 2258 2259 if (dev->hif2) { 2260 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 2261 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 2262 } 2263 2264 if (dev_is_pci(mdev->dev)) { 2265 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 2266 if (dev->hif2) 2267 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 2268 } 2269 2270 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2271 mt7996_for_each_phy(dev, phy) 2272 set_bit(MT76_RESET, &phy->mt76->state); 2273 wake_up(&dev->mt76.mcu.wait); 2274 2275 /* lock/unlock all queues to ensure that no tx is pending */ 2276 mt7996_for_each_phy(dev, phy) 2277 mt76_txq_schedule_all(phy->mt76); 2278 2279 /* disable all tx/rx napi */ 2280 mt76_worker_disable(&dev->mt76.tx_worker); 2281 mt76_for_each_q_rx(mdev, i) { 2282 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2283 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 2284 continue; 2285 2286 if (mdev->q_rx[i].ndesc) 2287 napi_disable(&dev->mt76.napi[i]); 2288 } 2289 napi_disable(&dev->mt76.tx_napi); 2290 2291 /* token reinit */ 2292 mt7996_tx_token_put(dev); 2293 idr_init(&dev->mt76.token); 2294 2295 mt7996_dma_reset(dev, true); 2296 2297 mt76_for_each_q_rx(mdev, i) { 2298 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2299 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 2300 continue; 2301 2302 if (mdev->q_rx[i].ndesc) { 2303 napi_enable(&dev->mt76.napi[i]); 2304 local_bh_disable(); 2305 napi_schedule(&dev->mt76.napi[i]); 2306 local_bh_enable(); 2307 } 2308 } 2309 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2310 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 2311 2312 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 2313 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 2314 if (dev->hif2) { 2315 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 2316 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 2317 } 2318 if (dev_is_pci(mdev->dev)) { 2319 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 2320 if (dev->hif2) 2321 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 2322 } 2323 2324 /* load firmware */ 2325 ret = mt7996_mcu_init_firmware(dev); 2326 if (ret) 2327 goto out; 2328 2329 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2330 mt7996_has_hwrro(dev)) { 2331 u32 wed_irq_mask = dev->mt76.mmio.irqmask | 2332 MT_INT_TX_DONE_BAND2; 2333 2334 mt7996_rro_hw_init(dev); 2335 mt76_for_each_q_rx(&dev->mt76, i) { 2336 if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) || 2337 mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])) 2338 mt76_queue_rx_reset(dev, i); 2339 } 2340 2341 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 2342 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask, 2343 false); 2344 mt7996_irq_enable(dev, wed_irq_mask); 2345 mt7996_irq_disable(dev, 0); 2346 } 2347 2348 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { 2349 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, 2350 MT_INT_TX_RX_DONE_EXT); 2351 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, 2352 MT_INT_TX_RX_DONE_EXT); 2353 } 2354 2355 /* set the necessary init items */ 2356 ret = mt7996_mcu_set_eeprom(dev); 2357 if (ret) 2358 goto out; 2359 2360 mt7996_mac_init(dev); 2361 mt7996_for_each_phy(dev, phy) 2362 mt7996_init_txpower(phy); 2363 ret = mt7996_txbf_init(dev); 2364 if (ret) 2365 goto out; 2366 2367 mt7996_for_each_phy(dev, phy) { 2368 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 2369 continue; 2370 2371 ret = mt7996_run(phy); 2372 if (ret) 2373 goto out; 2374 } 2375 2376 out: 2377 /* reset done */ 2378 mt7996_for_each_phy(dev, phy) 2379 clear_bit(MT76_RESET, &phy->mt76->state); 2380 2381 napi_enable(&dev->mt76.tx_napi); 2382 local_bh_disable(); 2383 napi_schedule(&dev->mt76.tx_napi); 2384 local_bh_enable(); 2385 2386 mt76_worker_enable(&dev->mt76.tx_worker); 2387 return ret; 2388 } 2389 2390 static void 2391 mt7996_mac_reset_sta_iter(void *data, struct ieee80211_sta *sta) 2392 { 2393 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 2394 struct mt7996_dev *dev = data; 2395 int i; 2396 2397 for (i = 0; i < ARRAY_SIZE(msta->link); i++) { 2398 struct mt7996_sta_link *msta_link = NULL; 2399 2400 msta_link = rcu_replace_pointer(msta->link[i], msta_link, 2401 lockdep_is_held(&dev->mt76.mutex)); 2402 if (!msta_link) 2403 continue; 2404 2405 mt7996_mac_sta_deinit_link(dev, msta_link); 2406 2407 if (msta->deflink_id == i) { 2408 msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; 2409 continue; 2410 } 2411 2412 kfree_rcu(msta_link, rcu_head); 2413 } 2414 } 2415 2416 static void 2417 mt7996_mac_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 2418 { 2419 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 2420 struct mt76_vif_data *mvif = mlink->mvif; 2421 struct mt7996_dev *dev = data; 2422 int i; 2423 2424 rcu_read_lock(); 2425 for (i = 0; i < ARRAY_SIZE(mvif->link); i++) { 2426 2427 mlink = mt76_dereference(mvif->link[i], &dev->mt76); 2428 if (!mlink || mlink == (struct mt76_vif_link *)vif->drv_priv) 2429 continue; 2430 2431 rcu_assign_pointer(mvif->link[i], NULL); 2432 kfree_rcu(mlink, rcu_head); 2433 } 2434 rcu_read_unlock(); 2435 } 2436 2437 static void 2438 mt7996_mac_full_reset(struct mt7996_dev *dev) 2439 { 2440 struct ieee80211_hw *hw = mt76_hw(dev); 2441 struct mt7996_phy *phy; 2442 LIST_HEAD(list); 2443 int i; 2444 2445 dev->recovery.hw_full_reset = true; 2446 2447 wake_up(&dev->mt76.mcu.wait); 2448 ieee80211_stop_queues(hw); 2449 2450 cancel_work_sync(&dev->wed_rro.work); 2451 mt7996_for_each_phy(dev, phy) 2452 cancel_delayed_work_sync(&phy->mt76->mac_work); 2453 2454 mt76_abort_scan(&dev->mt76); 2455 2456 mutex_lock(&dev->mt76.mutex); 2457 for (i = 0; i < 10; i++) { 2458 if (!mt7996_mac_restart(dev)) 2459 break; 2460 } 2461 2462 if (i == 10) 2463 dev_err(dev->mt76.dev, "chip full reset failed\n"); 2464 2465 mt7996_for_each_phy(dev, phy) 2466 phy->omac_mask = 0; 2467 2468 ieee80211_iterate_stations_atomic(hw, mt7996_mac_reset_sta_iter, dev); 2469 ieee80211_iterate_active_interfaces_atomic(hw, 2470 IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER, 2471 mt7996_mac_reset_vif_iter, dev); 2472 mt76_reset_device(&dev->mt76); 2473 2474 INIT_LIST_HEAD(&dev->sta_rc_list); 2475 INIT_LIST_HEAD(&dev->twt_list); 2476 2477 spin_lock_bh(&dev->wed_rro.lock); 2478 list_splice_init(&dev->wed_rro.poll_list, &list); 2479 spin_unlock_bh(&dev->wed_rro.lock); 2480 2481 while (!list_empty(&list)) { 2482 struct mt7996_wed_rro_session_id *e; 2483 2484 e = list_first_entry(&list, struct mt7996_wed_rro_session_id, 2485 list); 2486 list_del_init(&e->list); 2487 kfree(e); 2488 } 2489 2490 i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA); 2491 dev->mt76.global_wcid.idx = i; 2492 dev->recovery.hw_full_reset = false; 2493 2494 mutex_unlock(&dev->mt76.mutex); 2495 2496 ieee80211_restart_hw(mt76_hw(dev)); 2497 } 2498 2499 void mt7996_mac_reset_work(struct work_struct *work) 2500 { 2501 struct ieee80211_hw *hw; 2502 struct mt7996_dev *dev; 2503 struct mt7996_phy *phy; 2504 int i; 2505 2506 dev = container_of(work, struct mt7996_dev, reset_work); 2507 hw = mt76_hw(dev); 2508 2509 /* chip full reset */ 2510 if (dev->recovery.restart) { 2511 /* disable WA/WM WDT */ 2512 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 2513 MT_MCU_CMD_WDT_MASK); 2514 2515 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 2516 dev->recovery.wa_reset_count++; 2517 else 2518 dev->recovery.wm_reset_count++; 2519 2520 mt7996_mac_full_reset(dev); 2521 2522 /* enable mcu irq */ 2523 mt7996_irq_enable(dev, MT_INT_MCU_CMD); 2524 mt7996_irq_disable(dev, 0); 2525 2526 /* enable WA/WM WDT */ 2527 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 2528 2529 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 2530 dev->recovery.restart = false; 2531 return; 2532 } 2533 2534 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 2535 return; 2536 2537 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.", 2538 wiphy_name(hw->wiphy)); 2539 2540 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) 2541 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2); 2542 2543 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 2544 mtk_wed_device_stop(&dev->mt76.mmio.wed); 2545 2546 ieee80211_stop_queues(mt76_hw(dev)); 2547 2548 set_bit(MT76_RESET, &dev->mphy.state); 2549 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2550 mt76_abort_scan(&dev->mt76); 2551 wake_up(&dev->mt76.mcu.wait); 2552 2553 cancel_work_sync(&dev->wed_rro.work); 2554 mt7996_for_each_phy(dev, phy) { 2555 mt76_abort_roc(phy->mt76); 2556 set_bit(MT76_RESET, &phy->mt76->state); 2557 cancel_delayed_work_sync(&phy->mt76->mac_work); 2558 } 2559 2560 mt76_worker_disable(&dev->mt76.tx_worker); 2561 mt76_for_each_q_rx(&dev->mt76, i) { 2562 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2563 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2564 continue; 2565 2566 napi_disable(&dev->mt76.napi[i]); 2567 } 2568 napi_disable(&dev->mt76.tx_napi); 2569 2570 mutex_lock(&dev->mt76.mutex); 2571 2572 mt7996_npu_hw_stop(dev); 2573 2574 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 2575 2576 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 2577 mt7996_dma_reset(dev, false); 2578 2579 mt7996_tx_token_put(dev); 2580 idr_init(&dev->mt76.token); 2581 2582 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 2583 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 2584 } 2585 2586 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2587 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2588 2589 /* enable DMA Rx/Tx and interrupt */ 2590 mt7996_dma_start(dev, false, false); 2591 2592 if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3) 2593 mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK); 2594 2595 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 2596 u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 | 2597 dev->mt76.mmio.irqmask; 2598 2599 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 2600 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask, 2601 true); 2602 mt7996_irq_enable(dev, wed_irq_mask); 2603 mt7996_irq_disable(dev, 0); 2604 } 2605 2606 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { 2607 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT); 2608 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, 2609 MT_INT_TX_RX_DONE_EXT); 2610 } 2611 2612 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2613 mt7996_for_each_phy(dev, phy) 2614 clear_bit(MT76_RESET, &phy->mt76->state); 2615 2616 mt76_for_each_q_rx(&dev->mt76, i) { 2617 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2618 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2619 continue; 2620 2621 napi_enable(&dev->mt76.napi[i]); 2622 local_bh_disable(); 2623 napi_schedule(&dev->mt76.napi[i]); 2624 local_bh_enable(); 2625 } 2626 2627 tasklet_schedule(&dev->mt76.irq_tasklet); 2628 2629 mt76_worker_enable(&dev->mt76.tx_worker); 2630 2631 napi_enable(&dev->mt76.tx_napi); 2632 local_bh_disable(); 2633 napi_schedule(&dev->mt76.tx_napi); 2634 local_bh_enable(); 2635 2636 ieee80211_wake_queues(hw); 2637 mt7996_update_beacons(dev); 2638 2639 mutex_unlock(&dev->mt76.mutex); 2640 2641 mt7996_npu_hw_init(dev); 2642 2643 mt7996_for_each_phy(dev, phy) 2644 ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, 2645 MT7996_WATCHDOG_TIME); 2646 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.", 2647 wiphy_name(dev->mt76.hw->wiphy)); 2648 } 2649 2650 /* firmware coredump */ 2651 void mt7996_mac_dump_work(struct work_struct *work) 2652 { 2653 const struct mt7996_mem_region *mem_region; 2654 struct mt7996_crash_data *crash_data; 2655 struct mt7996_dev *dev; 2656 struct mt7996_mem_hdr *hdr; 2657 size_t buf_len; 2658 int i; 2659 u32 num; 2660 u8 *buf; 2661 2662 dev = container_of(work, struct mt7996_dev, dump_work); 2663 2664 mutex_lock(&dev->dump_mutex); 2665 2666 crash_data = mt7996_coredump_new(dev); 2667 if (!crash_data) { 2668 mutex_unlock(&dev->dump_mutex); 2669 goto skip_coredump; 2670 } 2671 2672 mem_region = mt7996_coredump_get_mem_layout(dev, &num); 2673 if (!mem_region || !crash_data->memdump_buf_len) { 2674 mutex_unlock(&dev->dump_mutex); 2675 goto skip_memdump; 2676 } 2677 2678 buf = crash_data->memdump_buf; 2679 buf_len = crash_data->memdump_buf_len; 2680 2681 /* dumping memory content... */ 2682 memset(buf, 0, buf_len); 2683 for (i = 0; i < num; i++) { 2684 if (mem_region->len > buf_len) { 2685 dev_warn(dev->mt76.dev, "%s len %zu is too large\n", 2686 mem_region->name, mem_region->len); 2687 break; 2688 } 2689 2690 /* reserve space for the header */ 2691 hdr = (void *)buf; 2692 buf += sizeof(*hdr); 2693 buf_len -= sizeof(*hdr); 2694 2695 mt7996_memcpy_fromio(dev, buf, mem_region->start, 2696 mem_region->len); 2697 2698 hdr->start = mem_region->start; 2699 hdr->len = mem_region->len; 2700 2701 if (!mem_region->len) 2702 /* note: the header remains, just with zero length */ 2703 break; 2704 2705 buf += mem_region->len; 2706 buf_len -= mem_region->len; 2707 2708 mem_region++; 2709 } 2710 2711 mutex_unlock(&dev->dump_mutex); 2712 2713 skip_memdump: 2714 mt7996_coredump_submit(dev); 2715 skip_coredump: 2716 queue_work(dev->mt76.wq, &dev->reset_work); 2717 } 2718 2719 void mt7996_reset(struct mt7996_dev *dev) 2720 { 2721 if (!dev->recovery.hw_init_done) 2722 return; 2723 2724 if (dev->recovery.hw_full_reset) 2725 return; 2726 2727 /* wm/wa exception: do full recovery */ 2728 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 2729 dev->recovery.restart = true; 2730 dev_info(dev->mt76.dev, 2731 "%s indicated firmware crash, attempting recovery\n", 2732 wiphy_name(dev->mt76.hw->wiphy)); 2733 2734 mt7996_irq_disable(dev, MT_INT_MCU_CMD); 2735 queue_work(dev->mt76.wq, &dev->dump_work); 2736 return; 2737 } 2738 2739 queue_work(dev->mt76.wq, &dev->reset_work); 2740 wake_up(&dev->reset_wait); 2741 } 2742 2743 void mt7996_mac_update_stats(struct mt7996_phy *phy) 2744 { 2745 struct mt76_mib_stats *mib = &phy->mib; 2746 struct mt7996_dev *dev = phy->dev; 2747 u8 band_idx = phy->mt76->band_idx; 2748 u32 cnt; 2749 int i; 2750 2751 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx)); 2752 mib->fcs_err_cnt += cnt; 2753 2754 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx)); 2755 mib->rx_fifo_full_cnt += cnt; 2756 2757 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx)); 2758 mib->rx_mpdu_cnt += cnt; 2759 2760 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx)); 2761 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 2762 2763 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx)); 2764 mib->rx_vector_mismatch_cnt += cnt; 2765 2766 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx)); 2767 mib->rx_delimiter_fail_cnt += cnt; 2768 2769 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx)); 2770 mib->rx_len_mismatch_cnt += cnt; 2771 2772 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx)); 2773 mib->tx_ampdu_cnt += cnt; 2774 2775 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx)); 2776 mib->tx_stop_q_empty_cnt += cnt; 2777 2778 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx)); 2779 mib->tx_mpdu_attempts_cnt += cnt; 2780 2781 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx)); 2782 mib->tx_mpdu_success_cnt += cnt; 2783 2784 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx)); 2785 mib->rx_ampdu_cnt += cnt; 2786 2787 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx)); 2788 mib->rx_ampdu_bytes_cnt += cnt; 2789 2790 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx)); 2791 mib->rx_ampdu_valid_subframe_cnt += cnt; 2792 2793 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx)); 2794 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 2795 2796 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx)); 2797 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt); 2798 2799 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx)); 2800 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt); 2801 2802 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx)); 2803 mib->rx_pfdrop_cnt += cnt; 2804 2805 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx)); 2806 mib->rx_vec_queue_overflow_drop_cnt += cnt; 2807 2808 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx)); 2809 mib->rx_ba_cnt += cnt; 2810 2811 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx)); 2812 mib->tx_bf_ebf_ppdu_cnt += cnt; 2813 2814 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx)); 2815 mib->tx_bf_ibf_ppdu_cnt += cnt; 2816 2817 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx)); 2818 mib->tx_mu_bf_cnt += cnt; 2819 2820 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx)); 2821 mib->tx_mu_mpdu_cnt += cnt; 2822 2823 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx)); 2824 mib->tx_mu_acked_mpdu_cnt += cnt; 2825 2826 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx)); 2827 mib->tx_su_acked_mpdu_cnt += cnt; 2828 2829 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx)); 2830 mib->tx_bf_rx_fb_ht_cnt += cnt; 2831 mib->tx_bf_rx_fb_all_cnt += cnt; 2832 2833 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx)); 2834 mib->tx_bf_rx_fb_vht_cnt += cnt; 2835 mib->tx_bf_rx_fb_all_cnt += cnt; 2836 2837 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx)); 2838 mib->tx_bf_rx_fb_he_cnt += cnt; 2839 mib->tx_bf_rx_fb_all_cnt += cnt; 2840 2841 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx)); 2842 mib->tx_bf_rx_fb_eht_cnt += cnt; 2843 mib->tx_bf_rx_fb_all_cnt += cnt; 2844 2845 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx)); 2846 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); 2847 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); 2848 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); 2849 2850 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx)); 2851 mib->tx_bf_fb_trig_cnt += cnt; 2852 2853 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx)); 2854 mib->tx_bf_fb_cpl_cnt += cnt; 2855 2856 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 2857 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 2858 mib->tx_amsdu[i] += cnt; 2859 mib->tx_amsdu_cnt += cnt; 2860 } 2861 2862 /* rts count */ 2863 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx)); 2864 mib->rts_cnt += cnt; 2865 2866 /* rts retry count */ 2867 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx)); 2868 mib->rts_retries_cnt += cnt; 2869 2870 /* ba miss count */ 2871 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx)); 2872 mib->ba_miss_cnt += cnt; 2873 2874 /* ack fail count */ 2875 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx)); 2876 mib->ack_fail_cnt += cnt; 2877 2878 for (i = 0; i < 16; i++) { 2879 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2880 phy->mt76->aggr_stats[i] += cnt; 2881 } 2882 } 2883 2884 void mt7996_mac_sta_rc_work(struct work_struct *work) 2885 { 2886 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); 2887 struct mt7996_sta_link *msta_link; 2888 struct ieee80211_vif *vif; 2889 struct mt7996_vif *mvif; 2890 LIST_HEAD(list); 2891 u32 changed; 2892 2893 mutex_lock(&dev->mt76.mutex); 2894 2895 spin_lock_bh(&dev->mt76.sta_poll_lock); 2896 list_splice_init(&dev->sta_rc_list, &list); 2897 2898 while (!list_empty(&list)) { 2899 msta_link = list_first_entry(&list, struct mt7996_sta_link, 2900 rc_list); 2901 list_del_init(&msta_link->rc_list); 2902 2903 changed = msta_link->changed; 2904 msta_link->changed = 0; 2905 mvif = msta_link->sta->vif; 2906 vif = container_of((void *)mvif, struct ieee80211_vif, 2907 drv_priv); 2908 2909 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2910 2911 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2912 IEEE80211_RC_NSS_CHANGED | 2913 IEEE80211_RC_BW_CHANGED)) 2914 mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif, 2915 msta_link->wcid.link_id, 2916 true); 2917 2918 if (changed & IEEE80211_RC_SMPS_CHANGED) 2919 mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL, 2920 msta_link->wcid.link_id, 2921 RATE_PARAM_MMPS_UPDATE); 2922 2923 spin_lock_bh(&dev->mt76.sta_poll_lock); 2924 } 2925 2926 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2927 2928 mutex_unlock(&dev->mt76.mutex); 2929 } 2930 2931 void mt7996_mac_work(struct work_struct *work) 2932 { 2933 struct mt7996_phy *phy; 2934 struct mt76_phy *mphy; 2935 2936 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2937 mac_work.work); 2938 phy = mphy->priv; 2939 2940 mutex_lock(&mphy->dev->mutex); 2941 2942 mt76_update_survey(mphy); 2943 if (++mphy->mac_work_count == 5) { 2944 mphy->mac_work_count = 0; 2945 2946 mt7996_mac_update_stats(phy); 2947 2948 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE); 2949 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { 2950 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT); 2951 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT); 2952 } 2953 } 2954 2955 mutex_unlock(&mphy->dev->mutex); 2956 2957 mt76_tx_status_check(mphy->dev, false); 2958 2959 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2960 MT7996_WATCHDOG_TIME); 2961 } 2962 2963 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy) 2964 { 2965 struct mt7996_dev *dev = phy->dev; 2966 int rdd_idx = mt7996_get_rdd_idx(phy, false); 2967 2968 if (rdd_idx < 0) 2969 return; 2970 2971 mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0); 2972 } 2973 2974 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx) 2975 { 2976 int err, region; 2977 2978 switch (dev->mt76.region) { 2979 case NL80211_DFS_ETSI: 2980 region = 0; 2981 break; 2982 case NL80211_DFS_JP: 2983 region = 2; 2984 break; 2985 case NL80211_DFS_FCC: 2986 default: 2987 region = 1; 2988 break; 2989 } 2990 2991 err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region); 2992 if (err < 0) 2993 return err; 2994 2995 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1); 2996 } 2997 2998 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) 2999 { 3000 struct mt7996_dev *dev = phy->dev; 3001 int err, rdd_idx; 3002 3003 rdd_idx = mt7996_get_rdd_idx(phy, false); 3004 if (rdd_idx < 0) 3005 return -EINVAL; 3006 3007 /* start CAC */ 3008 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0); 3009 if (err < 0) 3010 return err; 3011 3012 err = mt7996_dfs_start_rdd(dev, rdd_idx); 3013 3014 return err; 3015 } 3016 3017 static int 3018 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy) 3019 { 3020 const struct mt7996_dfs_radar_spec *radar_specs; 3021 struct mt7996_dev *dev = phy->dev; 3022 int err, i; 3023 3024 switch (dev->mt76.region) { 3025 case NL80211_DFS_FCC: 3026 radar_specs = &fcc_radar_specs; 3027 err = mt7996_mcu_set_fcc5_lpn(dev, 8); 3028 if (err < 0) 3029 return err; 3030 break; 3031 case NL80211_DFS_ETSI: 3032 radar_specs = &etsi_radar_specs; 3033 break; 3034 case NL80211_DFS_JP: 3035 radar_specs = &jp_radar_specs; 3036 break; 3037 default: 3038 return -EINVAL; 3039 } 3040 3041 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 3042 err = mt7996_mcu_set_radar_th(dev, i, 3043 &radar_specs->radar_pattern[i]); 3044 if (err < 0) 3045 return err; 3046 } 3047 3048 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 3049 } 3050 3051 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) 3052 { 3053 struct mt7996_dev *dev = phy->dev; 3054 enum mt76_dfs_state dfs_state, prev_state; 3055 int err, rdd_idx = mt7996_get_rdd_idx(phy, false); 3056 3057 prev_state = phy->mt76->dfs_state; 3058 dfs_state = mt76_phy_dfs_state(phy->mt76); 3059 3060 if (prev_state == dfs_state || rdd_idx < 0) 3061 return 0; 3062 3063 if (prev_state == MT_DFS_STATE_UNKNOWN) 3064 mt7996_dfs_stop_radar_detector(phy); 3065 3066 if (dfs_state == MT_DFS_STATE_DISABLED) 3067 goto stop; 3068 3069 if (prev_state <= MT_DFS_STATE_DISABLED) { 3070 err = mt7996_dfs_init_radar_specs(phy); 3071 if (err < 0) 3072 return err; 3073 3074 err = mt7996_dfs_start_radar_detector(phy); 3075 if (err < 0) 3076 return err; 3077 3078 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 3079 } 3080 3081 if (dfs_state == MT_DFS_STATE_CAC) 3082 return 0; 3083 3084 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0); 3085 if (err < 0) { 3086 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 3087 return err; 3088 } 3089 3090 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 3091 return 0; 3092 3093 stop: 3094 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0); 3095 if (err < 0) 3096 return err; 3097 3098 mt7996_dfs_stop_radar_detector(phy); 3099 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 3100 3101 return 0; 3102 } 3103 3104 static int 3105 mt7996_mac_twt_duration_align(int duration) 3106 { 3107 return duration << 8; 3108 } 3109 3110 static u64 3111 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev, 3112 struct mt7996_twt_flow *flow) 3113 { 3114 struct mt7996_twt_flow *iter, *iter_next; 3115 u32 duration = flow->duration << 8; 3116 u64 start_tsf; 3117 3118 iter = list_first_entry_or_null(&dev->twt_list, 3119 struct mt7996_twt_flow, list); 3120 if (!iter || !iter->sched || iter->start_tsf > duration) { 3121 /* add flow as first entry in the list */ 3122 list_add(&flow->list, &dev->twt_list); 3123 return 0; 3124 } 3125 3126 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 3127 start_tsf = iter->start_tsf + 3128 mt7996_mac_twt_duration_align(iter->duration); 3129 if (list_is_last(&iter->list, &dev->twt_list)) 3130 break; 3131 3132 if (!iter_next->sched || 3133 iter_next->start_tsf > start_tsf + duration) { 3134 list_add(&flow->list, &iter->list); 3135 goto out; 3136 } 3137 } 3138 3139 /* add flow as last entry in the list */ 3140 list_add_tail(&flow->list, &dev->twt_list); 3141 out: 3142 return start_tsf; 3143 } 3144 3145 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt) 3146 { 3147 struct ieee80211_twt_params *twt_agrt; 3148 u64 interval, duration; 3149 u16 mantissa; 3150 u8 exp; 3151 3152 /* only individual agreement supported */ 3153 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 3154 return -EOPNOTSUPP; 3155 3156 /* only 256us unit supported */ 3157 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 3158 return -EOPNOTSUPP; 3159 3160 twt_agrt = (struct ieee80211_twt_params *)twt->params; 3161 3162 /* explicit agreement not supported */ 3163 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 3164 return -EOPNOTSUPP; 3165 3166 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 3167 le16_to_cpu(twt_agrt->req_type)); 3168 mantissa = le16_to_cpu(twt_agrt->mantissa); 3169 duration = twt_agrt->min_twt_dur << 8; 3170 3171 interval = (u64)mantissa << exp; 3172 if (interval < duration) 3173 return -EOPNOTSUPP; 3174 3175 return 0; 3176 } 3177 3178 static bool 3179 mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link, 3180 struct ieee80211_twt_params *twt_agrt) 3181 { 3182 u16 type = le16_to_cpu(twt_agrt->req_type); 3183 u8 exp; 3184 int i; 3185 3186 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 3187 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) { 3188 struct mt7996_twt_flow *f; 3189 3190 if (!(msta_link->twt.flowid_mask & BIT(i))) 3191 continue; 3192 3193 f = &msta_link->twt.flow[i]; 3194 if (f->duration == twt_agrt->min_twt_dur && 3195 f->mantissa == twt_agrt->mantissa && 3196 f->exp == exp && 3197 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 3198 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 3199 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 3200 return true; 3201 } 3202 3203 return false; 3204 } 3205 3206 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, 3207 struct ieee80211_sta *sta, 3208 struct ieee80211_twt_setup *twt) 3209 { 3210 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 3211 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 3212 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 3213 struct mt7996_sta_link *msta_link = &msta->deflink; 3214 u16 req_type = le16_to_cpu(twt_agrt->req_type); 3215 enum ieee80211_twt_setup_cmd sta_setup_cmd; 3216 struct mt7996_dev *dev = mt7996_hw_dev(hw); 3217 struct mt7996_twt_flow *flow; 3218 u8 flowid, table_id, exp; 3219 3220 if (mt7996_mac_check_twt_req(twt)) 3221 goto out; 3222 3223 mutex_lock(&dev->mt76.mutex); 3224 3225 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT) 3226 goto unlock; 3227 3228 if (hweight8(msta_link->twt.flowid_mask) == 3229 ARRAY_SIZE(msta_link->twt.flow)) 3230 goto unlock; 3231 3232 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) { 3233 setup_cmd = TWT_SETUP_CMD_DICTATE; 3234 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR; 3235 goto unlock; 3236 } 3237 3238 if (mt7996_mac_twt_param_equal(msta_link, twt_agrt)) 3239 goto unlock; 3240 3241 flowid = ffs(~msta_link->twt.flowid_mask) - 1; 3242 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 3243 twt_agrt->req_type |= le16_encode_bits(flowid, 3244 IEEE80211_TWT_REQTYPE_FLOWID); 3245 3246 table_id = ffs(~dev->twt.table_mask) - 1; 3247 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 3248 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 3249 3250 flow = &msta_link->twt.flow[flowid]; 3251 memset(flow, 0, sizeof(*flow)); 3252 INIT_LIST_HEAD(&flow->list); 3253 flow->wcid = msta_link->wcid.idx; 3254 flow->table_id = table_id; 3255 flow->id = flowid; 3256 flow->duration = twt_agrt->min_twt_dur; 3257 flow->mantissa = twt_agrt->mantissa; 3258 flow->exp = exp; 3259 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 3260 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 3261 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 3262 3263 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 3264 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 3265 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 3266 u64 flow_tsf, curr_tsf; 3267 u32 rem; 3268 3269 flow->sched = true; 3270 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow); 3271 curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink); 3272 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 3273 flow_tsf = curr_tsf + interval - rem; 3274 twt_agrt->twt = cpu_to_le64(flow_tsf); 3275 } else { 3276 list_add_tail(&flow->list, &dev->twt_list); 3277 } 3278 flow->tsf = le64_to_cpu(twt_agrt->twt); 3279 3280 if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow, 3281 MCU_TWT_AGRT_ADD)) 3282 goto unlock; 3283 3284 setup_cmd = TWT_SETUP_CMD_ACCEPT; 3285 dev->twt.table_mask |= BIT(table_id); 3286 msta_link->twt.flowid_mask |= BIT(flowid); 3287 dev->twt.n_agrt++; 3288 3289 unlock: 3290 mutex_unlock(&dev->mt76.mutex); 3291 out: 3292 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 3293 twt_agrt->req_type |= 3294 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 3295 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED; 3296 } 3297 3298 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, 3299 struct mt7996_vif_link *link, 3300 struct mt7996_sta_link *msta_link, 3301 u8 flowid) 3302 { 3303 struct mt7996_twt_flow *flow; 3304 3305 lockdep_assert_held(&dev->mt76.mutex); 3306 3307 if (flowid >= ARRAY_SIZE(msta_link->twt.flow)) 3308 return; 3309 3310 if (!(msta_link->twt.flowid_mask & BIT(flowid))) 3311 return; 3312 3313 flow = &msta_link->twt.flow[flowid]; 3314 if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE)) 3315 return; 3316 3317 list_del_init(&flow->list); 3318 msta_link->twt.flowid_mask &= ~BIT(flowid); 3319 dev->twt.table_mask &= ~BIT(flow->table_id); 3320 dev->twt.n_agrt--; 3321 } 3322