1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/timekeeping.h> 6 #include "mt7915.h" 7 #include "../dma.h" 8 #include "mac.h" 9 #include "mcu.h" 10 11 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 12 13 static const struct mt7915_dfs_radar_spec etsi_radar_specs = { 14 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 15 .radar_pattern = { 16 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 17 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 18 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 19 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 20 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 21 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 22 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 23 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 24 }, 25 }; 26 27 static const struct mt7915_dfs_radar_spec fcc_radar_specs = { 28 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 29 .radar_pattern = { 30 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 31 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 32 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 33 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 34 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 35 }, 36 }; 37 38 static const struct mt7915_dfs_radar_spec jp_radar_specs = { 39 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 40 .radar_pattern = { 41 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 42 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 43 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 44 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 45 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 46 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 47 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 48 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 49 }, 50 }; 51 52 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, 53 u16 idx, bool unicast) 54 { 55 struct mt7915_sta *sta; 56 struct mt76_wcid *wcid; 57 58 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 59 return NULL; 60 61 wcid = rcu_dereference(dev->mt76.wcid[idx]); 62 if (unicast || !wcid) 63 return wcid; 64 65 if (!wcid->sta) 66 return NULL; 67 68 sta = container_of(wcid, struct mt7915_sta, wcid); 69 if (!sta->vif) 70 return NULL; 71 72 return &sta->vif->sta.wcid; 73 } 74 75 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 76 { 77 } 78 79 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) 80 { 81 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 82 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 83 84 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 85 0, 5000); 86 } 87 88 u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw) 89 { 90 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 91 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 92 93 return MT_WTBL_LMAC_OFFS(wcid, dw); 94 } 95 96 static void mt7915_mac_sta_poll(struct mt7915_dev *dev) 97 { 98 static const u8 ac_to_tid[] = { 99 [IEEE80211_AC_BE] = 0, 100 [IEEE80211_AC_BK] = 1, 101 [IEEE80211_AC_VI] = 4, 102 [IEEE80211_AC_VO] = 6 103 }; 104 struct ieee80211_sta *sta; 105 struct mt7915_sta *msta; 106 struct rate_info *rate; 107 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 108 LIST_HEAD(sta_poll_list); 109 int i; 110 111 spin_lock_bh(&dev->sta_poll_lock); 112 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 113 spin_unlock_bh(&dev->sta_poll_lock); 114 115 rcu_read_lock(); 116 117 while (true) { 118 bool clear = false; 119 u32 addr, val; 120 u16 idx; 121 u8 bw; 122 123 spin_lock_bh(&dev->sta_poll_lock); 124 if (list_empty(&sta_poll_list)) { 125 spin_unlock_bh(&dev->sta_poll_lock); 126 break; 127 } 128 msta = list_first_entry(&sta_poll_list, 129 struct mt7915_sta, poll_list); 130 list_del_init(&msta->poll_list); 131 spin_unlock_bh(&dev->sta_poll_lock); 132 133 idx = msta->wcid.idx; 134 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20); 135 136 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 137 u32 tx_last = msta->airtime_ac[i]; 138 u32 rx_last = msta->airtime_ac[i + 4]; 139 140 msta->airtime_ac[i] = mt76_rr(dev, addr); 141 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 142 143 tx_time[i] = msta->airtime_ac[i] - tx_last; 144 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 145 146 if ((tx_last | rx_last) & BIT(30)) 147 clear = true; 148 149 addr += 8; 150 } 151 152 if (clear) { 153 mt7915_mac_wtbl_update(dev, idx, 154 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 155 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 156 } 157 158 if (!msta->wcid.sta) 159 continue; 160 161 sta = container_of((void *)msta, struct ieee80211_sta, 162 drv_priv); 163 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 164 u8 q = mt76_connac_lmac_mapping(i); 165 u32 tx_cur = tx_time[q]; 166 u32 rx_cur = rx_time[q]; 167 u8 tid = ac_to_tid[i]; 168 169 if (!tx_cur && !rx_cur) 170 continue; 171 172 ieee80211_sta_register_airtime(sta, tid, tx_cur, 173 rx_cur); 174 } 175 176 /* 177 * We don't support reading GI info from txs packets. 178 * For accurate tx status reporting and AQL improvement, 179 we need to make sure that flags match so polling GI 180 * from per-sta counters directly. 181 */ 182 rate = &msta->wcid.rate; 183 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7); 184 val = mt76_rr(dev, addr); 185 186 switch (rate->bw) { 187 case RATE_INFO_BW_160: 188 bw = IEEE80211_STA_RX_BW_160; 189 break; 190 case RATE_INFO_BW_80: 191 bw = IEEE80211_STA_RX_BW_80; 192 break; 193 case RATE_INFO_BW_40: 194 bw = IEEE80211_STA_RX_BW_40; 195 break; 196 default: 197 bw = IEEE80211_STA_RX_BW_20; 198 break; 199 } 200 201 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 202 u8 offs = 24 + 2 * bw; 203 204 rate->he_gi = (val & (0x3 << offs)) >> offs; 205 } else if (rate->flags & 206 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 207 if (val & BIT(12 + bw)) 208 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 209 else 210 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 211 } 212 } 213 214 rcu_read_unlock(); 215 } 216 217 static int 218 mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) 219 { 220 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 221 struct mt76_phy *mphy = &dev->mt76.phy; 222 struct mt7915_phy *phy = &dev->phy; 223 struct ieee80211_supported_band *sband; 224 __le32 *rxd = (__le32 *)skb->data; 225 __le32 *rxv = NULL; 226 u32 rxd0 = le32_to_cpu(rxd[0]); 227 u32 rxd1 = le32_to_cpu(rxd[1]); 228 u32 rxd2 = le32_to_cpu(rxd[2]); 229 u32 rxd3 = le32_to_cpu(rxd[3]); 230 u32 rxd4 = le32_to_cpu(rxd[4]); 231 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 232 bool unicast, insert_ccmp_hdr = false; 233 u8 remove_pad, amsdu_info; 234 u8 mode = 0, qos_ctl = 0; 235 struct mt7915_sta *msta; 236 bool hdr_trans; 237 u16 hdr_gap; 238 u16 seq_ctrl = 0; 239 __le16 fc = 0; 240 int idx; 241 242 memset(status, 0, sizeof(*status)); 243 244 if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) { 245 mphy = dev->mt76.phys[MT_BAND1]; 246 if (!mphy) 247 return -EINVAL; 248 249 phy = mphy->priv; 250 status->phy_idx = 1; 251 } 252 253 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 254 return -EINVAL; 255 256 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 257 return -EINVAL; 258 259 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 260 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 261 return -EINVAL; 262 263 /* ICV error or CCMP/BIP/WPI MIC error */ 264 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 265 status->flag |= RX_FLAG_ONLY_MONITOR; 266 267 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 268 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 269 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); 270 271 if (status->wcid) { 272 msta = container_of(status->wcid, struct mt7915_sta, wcid); 273 spin_lock_bh(&dev->sta_poll_lock); 274 if (list_empty(&msta->poll_list)) 275 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 276 spin_unlock_bh(&dev->sta_poll_lock); 277 } 278 279 status->freq = mphy->chandef.chan->center_freq; 280 status->band = mphy->chandef.chan->band; 281 if (status->band == NL80211_BAND_5GHZ) 282 sband = &mphy->sband_5g.sband; 283 else if (status->band == NL80211_BAND_6GHZ) 284 sband = &mphy->sband_6g.sband; 285 else 286 sband = &mphy->sband_2g.sband; 287 288 if (!sband->channels) 289 return -EINVAL; 290 291 if ((rxd0 & csum_mask) == csum_mask) 292 skb->ip_summed = CHECKSUM_UNNECESSARY; 293 294 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) 295 status->flag |= RX_FLAG_FAILED_FCS_CRC; 296 297 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 298 status->flag |= RX_FLAG_MMIC_ERROR; 299 300 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && 301 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 302 status->flag |= RX_FLAG_DECRYPTED; 303 status->flag |= RX_FLAG_IV_STRIPPED; 304 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 305 } 306 307 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 308 309 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 310 return -EINVAL; 311 312 rxd += 6; 313 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 314 u32 v0 = le32_to_cpu(rxd[0]); 315 u32 v2 = le32_to_cpu(rxd[2]); 316 317 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); 318 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); 319 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); 320 321 rxd += 4; 322 if ((u8 *)rxd - skb->data >= skb->len) 323 return -EINVAL; 324 } 325 326 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 327 u8 *data = (u8 *)rxd; 328 329 if (status->flag & RX_FLAG_DECRYPTED) { 330 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) { 331 case MT_CIPHER_AES_CCMP: 332 case MT_CIPHER_CCMP_CCX: 333 case MT_CIPHER_CCMP_256: 334 insert_ccmp_hdr = 335 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 336 fallthrough; 337 case MT_CIPHER_TKIP: 338 case MT_CIPHER_TKIP_NO_MIC: 339 case MT_CIPHER_GCMP: 340 case MT_CIPHER_GCMP_256: 341 status->iv[0] = data[5]; 342 status->iv[1] = data[4]; 343 status->iv[2] = data[3]; 344 status->iv[3] = data[2]; 345 status->iv[4] = data[1]; 346 status->iv[5] = data[0]; 347 break; 348 default: 349 break; 350 } 351 } 352 rxd += 4; 353 if ((u8 *)rxd - skb->data >= skb->len) 354 return -EINVAL; 355 } 356 357 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 358 status->timestamp = le32_to_cpu(rxd[0]); 359 status->flag |= RX_FLAG_MACTIME_START; 360 361 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 362 status->flag |= RX_FLAG_AMPDU_DETAILS; 363 364 /* all subframes of an A-MPDU have the same timestamp */ 365 if (phy->rx_ampdu_ts != status->timestamp) { 366 if (!++phy->ampdu_ref) 367 phy->ampdu_ref++; 368 } 369 phy->rx_ampdu_ts = status->timestamp; 370 371 status->ampdu_ref = phy->ampdu_ref; 372 } 373 374 rxd += 2; 375 if ((u8 *)rxd - skb->data >= skb->len) 376 return -EINVAL; 377 } 378 379 /* RXD Group 3 - P-RXV */ 380 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 381 u32 v0, v1; 382 int ret; 383 384 rxv = rxd; 385 rxd += 2; 386 if ((u8 *)rxd - skb->data >= skb->len) 387 return -EINVAL; 388 389 v0 = le32_to_cpu(rxv[0]); 390 v1 = le32_to_cpu(rxv[1]); 391 392 if (v0 & MT_PRXV_HT_AD_CODE) 393 status->enc_flags |= RX_ENC_FLAG_LDPC; 394 395 status->chains = mphy->antenna_mask; 396 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); 397 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); 398 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); 399 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); 400 401 /* RXD Group 5 - C-RXV */ 402 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 403 rxd += 18; 404 if ((u8 *)rxd - skb->data >= skb->len) 405 return -EINVAL; 406 } 407 408 if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) { 409 ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, 410 sband, rxv, &mode); 411 if (ret < 0) 412 return ret; 413 } 414 } 415 416 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 417 status->amsdu = !!amsdu_info; 418 if (status->amsdu) { 419 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 420 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 421 } 422 423 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 424 if (hdr_trans && ieee80211_has_morefrags(fc)) { 425 struct ieee80211_vif *vif; 426 int err; 427 428 if (!msta || !msta->vif) 429 return -EINVAL; 430 431 vif = container_of((void *)msta->vif, struct ieee80211_vif, 432 drv_priv); 433 err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap); 434 if (err) 435 return err; 436 437 hdr_trans = false; 438 } else { 439 int pad_start = 0; 440 441 skb_pull(skb, hdr_gap); 442 if (!hdr_trans && status->amsdu) { 443 pad_start = ieee80211_get_hdrlen_from_skb(skb); 444 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 445 /* 446 * When header translation failure is indicated, 447 * the hardware will insert an extra 2-byte field 448 * containing the data length after the protocol 449 * type field. 450 */ 451 pad_start = 12; 452 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 453 pad_start += 4; 454 455 if (get_unaligned_be16(skb->data + pad_start) != 456 skb->len - pad_start - 2) 457 pad_start = 0; 458 } 459 460 if (pad_start) { 461 memmove(skb->data + 2, skb->data, pad_start); 462 skb_pull(skb, 2); 463 } 464 } 465 466 if (!hdr_trans) { 467 struct ieee80211_hdr *hdr; 468 469 if (insert_ccmp_hdr) { 470 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 471 472 mt76_insert_ccmp_hdr(skb, key_id); 473 } 474 475 hdr = mt76_skb_get_hdr(skb); 476 fc = hdr->frame_control; 477 if (ieee80211_is_data_qos(fc)) { 478 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 479 qos_ctl = *ieee80211_get_qos_ctl(hdr); 480 } 481 } else { 482 status->flag |= RX_FLAG_8023; 483 } 484 485 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 486 mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode); 487 488 if (!status->wcid || !ieee80211_is_data_qos(fc)) 489 return 0; 490 491 status->aggr = unicast && 492 !ieee80211_is_qos_nullfunc(fc); 493 status->qos_ctl = qos_ctl; 494 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 495 496 return 0; 497 } 498 499 static void 500 mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) 501 { 502 #ifdef CONFIG_NL80211_TESTMODE 503 struct mt7915_phy *phy = &dev->phy; 504 __le32 *rxd = (__le32 *)skb->data; 505 __le32 *rxv_hdr = rxd + 2; 506 __le32 *rxv = rxd + 4; 507 u32 rcpi, ib_rssi, wb_rssi, v20, v21; 508 u8 band_idx; 509 s32 foe; 510 u8 snr; 511 int i; 512 513 band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX); 514 if (band_idx && !phy->band_idx) { 515 phy = mt7915_ext_phy(dev); 516 if (!phy) 517 goto out; 518 } 519 520 rcpi = le32_to_cpu(rxv[6]); 521 ib_rssi = le32_to_cpu(rxv[7]); 522 wb_rssi = le32_to_cpu(rxv[8]) >> 5; 523 524 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) { 525 if (i == 3) 526 wb_rssi = le32_to_cpu(rxv[9]); 527 528 phy->test.last_rcpi[i] = rcpi & 0xff; 529 phy->test.last_ib_rssi[i] = ib_rssi & 0xff; 530 phy->test.last_wb_rssi[i] = wb_rssi & 0xff; 531 } 532 533 v20 = le32_to_cpu(rxv[20]); 534 v21 = le32_to_cpu(rxv[21]); 535 536 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) | 537 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT); 538 539 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16; 540 541 phy->test.last_freq_offset = foe; 542 phy->test.last_snr = snr; 543 out: 544 #endif 545 dev_kfree_skb(skb); 546 } 547 548 static void 549 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, 550 struct sk_buff *skb) 551 { 552 #ifdef CONFIG_NL80211_TESTMODE 553 struct mt76_testmode_data *td = &phy->mt76->test; 554 const struct ieee80211_rate *r; 555 u8 bw, mode, nss = td->tx_rate_nss; 556 u8 rate_idx = td->tx_rate_idx; 557 u16 rateval = 0; 558 u32 val; 559 bool cck = false; 560 int band; 561 562 if (skb != phy->mt76->test.tx_skb) 563 return; 564 565 switch (td->tx_rate_mode) { 566 case MT76_TM_TX_MODE_HT: 567 nss = 1 + (rate_idx >> 3); 568 mode = MT_PHY_TYPE_HT; 569 break; 570 case MT76_TM_TX_MODE_VHT: 571 mode = MT_PHY_TYPE_VHT; 572 break; 573 case MT76_TM_TX_MODE_HE_SU: 574 mode = MT_PHY_TYPE_HE_SU; 575 break; 576 case MT76_TM_TX_MODE_HE_EXT_SU: 577 mode = MT_PHY_TYPE_HE_EXT_SU; 578 break; 579 case MT76_TM_TX_MODE_HE_TB: 580 mode = MT_PHY_TYPE_HE_TB; 581 break; 582 case MT76_TM_TX_MODE_HE_MU: 583 mode = MT_PHY_TYPE_HE_MU; 584 break; 585 case MT76_TM_TX_MODE_CCK: 586 cck = true; 587 fallthrough; 588 case MT76_TM_TX_MODE_OFDM: 589 band = phy->mt76->chandef.chan->band; 590 if (band == NL80211_BAND_2GHZ && !cck) 591 rate_idx += 4; 592 593 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; 594 val = cck ? r->hw_value_short : r->hw_value; 595 596 mode = val >> 8; 597 rate_idx = val & 0xff; 598 break; 599 default: 600 mode = MT_PHY_TYPE_OFDM; 601 break; 602 } 603 604 switch (phy->mt76->chandef.width) { 605 case NL80211_CHAN_WIDTH_40: 606 bw = 1; 607 break; 608 case NL80211_CHAN_WIDTH_80: 609 bw = 2; 610 break; 611 case NL80211_CHAN_WIDTH_80P80: 612 case NL80211_CHAN_WIDTH_160: 613 bw = 3; 614 break; 615 default: 616 bw = 0; 617 break; 618 } 619 620 if (td->tx_rate_stbc && nss == 1) { 621 nss++; 622 rateval |= MT_TX_RATE_STBC; 623 } 624 625 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 626 FIELD_PREP(MT_TX_RATE_MODE, mode) | 627 FIELD_PREP(MT_TX_RATE_NSS, nss - 1); 628 629 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 630 631 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT); 632 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT) 633 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 634 635 val = MT_TXD6_FIXED_BW | 636 FIELD_PREP(MT_TXD6_BW, bw) | 637 FIELD_PREP(MT_TXD6_TX_RATE, rateval) | 638 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi); 639 640 /* for HE_SU/HE_EXT_SU PPDU 641 * - 1x, 2x, 4x LTF + 0.8us GI 642 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 643 * for HE_MU PPDU 644 * - 2x, 4x LTF + 0.8us GI 645 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 646 * for HE_TB PPDU 647 * - 1x, 2x LTF + 1.6us GI 648 * - 4x LTF + 3.2us GI 649 */ 650 if (mode >= MT_PHY_TYPE_HE_SU) 651 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); 652 653 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) 654 val |= MT_TXD6_LDPC; 655 656 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); 657 txwi[6] |= cpu_to_le32(val); 658 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 659 phy->test.spe_idx)); 660 #endif 661 } 662 663 void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, 664 struct sk_buff *skb, struct mt76_wcid *wcid, int pid, 665 struct ieee80211_key_conf *key, 666 enum mt76_txq_id qid, u32 changed) 667 { 668 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 669 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 670 struct mt76_phy *mphy = &dev->phy; 671 672 if (phy_idx && dev->phys[MT_BAND1]) 673 mphy = dev->phys[MT_BAND1]; 674 675 mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed); 676 677 if (mt76_testmode_enabled(mphy)) 678 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); 679 } 680 681 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 682 enum mt76_txq_id qid, struct mt76_wcid *wcid, 683 struct ieee80211_sta *sta, 684 struct mt76_tx_info *tx_info) 685 { 686 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 687 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 688 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 689 struct ieee80211_key_conf *key = info->control.hw_key; 690 struct ieee80211_vif *vif = info->control.vif; 691 struct mt76_connac_fw_txp *txp; 692 struct mt76_txwi_cache *t; 693 int id, i, nbuf = tx_info->nbuf - 1; 694 u8 *txwi = (u8 *)txwi_ptr; 695 int pid; 696 697 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 698 return -EINVAL; 699 700 if (!wcid) 701 wcid = &dev->mt76.global_wcid; 702 703 if (sta) { 704 struct mt7915_sta *msta; 705 706 msta = (struct mt7915_sta *)sta->drv_priv; 707 708 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 709 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 710 msta->jiffies = jiffies; 711 } 712 } 713 714 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 715 t->skb = tx_info->skb; 716 717 id = mt76_token_consume(mdev, &t); 718 if (id < 0) 719 return id; 720 721 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 722 mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key, 723 qid, 0); 724 725 txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE); 726 for (i = 0; i < nbuf; i++) { 727 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 728 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 729 } 730 txp->nbuf = nbuf; 731 732 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); 733 734 if (!key) 735 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 736 737 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 738 ieee80211_is_mgmt(hdr->frame_control)) 739 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 740 741 if (vif) { 742 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 743 744 txp->bss_idx = mvif->mt76.idx; 745 } 746 747 txp->token = cpu_to_le16(id); 748 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) 749 txp->rept_wds_wcid = cpu_to_le16(wcid->idx); 750 else 751 txp->rept_wds_wcid = cpu_to_le16(0x3ff); 752 tx_info->skb = DMA_DUMMY_DATA; 753 754 /* pass partial skb header to fw */ 755 tx_info->buf[1].len = MT_CT_PARSE_LEN; 756 tx_info->buf[1].skip_unmap = true; 757 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 758 759 return 0; 760 } 761 762 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 763 { 764 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 765 __le32 *txwi = ptr; 766 u32 val; 767 768 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 769 770 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 771 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 772 txwi[0] = cpu_to_le32(val); 773 774 val = MT_TXD1_LONG_FORMAT | 775 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 776 txwi[1] = cpu_to_le32(val); 777 778 txp->token = cpu_to_le16(token_id); 779 txp->nbuf = 1; 780 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 781 782 return MT_TXD_SIZE + sizeof(*txp); 783 } 784 785 static void 786 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 787 { 788 struct mt7915_sta *msta; 789 u16 fc, tid; 790 u32 val; 791 792 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 793 return; 794 795 tid = le32_get_bits(txwi[1], MT_TXD1_TID); 796 if (tid >= 6) /* skip VO queue */ 797 return; 798 799 val = le32_to_cpu(txwi[2]); 800 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 801 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 802 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 803 return; 804 805 msta = (struct mt7915_sta *)sta->drv_priv; 806 if (!test_and_set_bit(tid, &msta->ampdu_state)) 807 ieee80211_start_tx_ba_session(sta, tid, 0); 808 } 809 810 static void 811 mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, 812 struct ieee80211_sta *sta, struct list_head *free_list) 813 { 814 struct mt76_dev *mdev = &dev->mt76; 815 struct mt7915_sta *msta; 816 struct mt76_wcid *wcid; 817 __le32 *txwi; 818 u16 wcid_idx; 819 820 mt76_connac_txp_skb_unmap(mdev, t); 821 if (!t->skb) 822 goto out; 823 824 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 825 if (sta) { 826 wcid = (struct mt76_wcid *)sta->drv_priv; 827 wcid_idx = wcid->idx; 828 } else { 829 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); 830 wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]); 831 832 if (wcid && wcid->sta) { 833 msta = container_of(wcid, struct mt7915_sta, wcid); 834 sta = container_of((void *)msta, struct ieee80211_sta, 835 drv_priv); 836 spin_lock_bh(&dev->sta_poll_lock); 837 if (list_empty(&msta->poll_list)) 838 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 839 spin_unlock_bh(&dev->sta_poll_lock); 840 } 841 } 842 843 if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 844 mt7915_tx_check_aggr(sta, txwi); 845 846 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 847 848 out: 849 t->skb = NULL; 850 mt76_put_txwi(mdev, t); 851 } 852 853 static void 854 mt7915_mac_tx_free_prepare(struct mt7915_dev *dev) 855 { 856 struct mt76_dev *mdev = &dev->mt76; 857 struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1]; 858 859 /* clean DMA queues and unmap buffers first */ 860 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 861 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 862 if (mphy_ext) { 863 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); 864 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); 865 } 866 } 867 868 static void 869 mt7915_mac_tx_free_done(struct mt7915_dev *dev, 870 struct list_head *free_list, bool wake) 871 { 872 struct sk_buff *skb, *tmp; 873 874 mt7915_mac_sta_poll(dev); 875 876 if (wake) 877 mt76_set_tx_blocked(&dev->mt76, false); 878 879 mt76_worker_schedule(&dev->mt76.tx_worker); 880 881 list_for_each_entry_safe(skb, tmp, free_list, list) { 882 skb_list_del_init(skb); 883 napi_consume_skb(skb, 1); 884 } 885 } 886 887 static void 888 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) 889 { 890 struct mt76_connac_tx_free *free = data; 891 __le32 *tx_info = (__le32 *)(data + sizeof(*free)); 892 struct mt76_dev *mdev = &dev->mt76; 893 struct mt76_txwi_cache *txwi; 894 struct ieee80211_sta *sta = NULL; 895 LIST_HEAD(free_list); 896 void *end = data + len; 897 bool v3, wake = false; 898 u16 total, count = 0; 899 u32 txd = le32_to_cpu(free->txd); 900 __le32 *cur_info; 901 902 mt7915_mac_tx_free_prepare(dev); 903 904 total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT); 905 v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4); 906 if (WARN_ON_ONCE((void *)&tx_info[total >> v3] > end)) 907 return; 908 909 for (cur_info = tx_info; count < total; cur_info++) { 910 u32 msdu, info = le32_to_cpu(*cur_info); 911 u8 i; 912 913 /* 914 * 1'b1: new wcid pair. 915 * 1'b0: msdu_id with the same 'wcid pair' as above. 916 */ 917 if (info & MT_TX_FREE_PAIR) { 918 struct mt7915_sta *msta; 919 struct mt76_wcid *wcid; 920 u16 idx; 921 922 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 923 wcid = rcu_dereference(dev->mt76.wcid[idx]); 924 sta = wcid_to_sta(wcid); 925 if (!sta) 926 continue; 927 928 msta = container_of(wcid, struct mt7915_sta, wcid); 929 spin_lock_bh(&dev->sta_poll_lock); 930 if (list_empty(&msta->poll_list)) 931 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 932 spin_unlock_bh(&dev->sta_poll_lock); 933 continue; 934 } 935 936 if (v3 && (info & MT_TX_FREE_MPDU_HEADER)) 937 continue; 938 939 for (i = 0; i < 1 + v3; i++) { 940 if (v3) { 941 msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3; 942 if (msdu == MT_TX_FREE_MSDU_ID_V3) 943 continue; 944 } else { 945 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 946 } 947 count++; 948 txwi = mt76_token_release(mdev, msdu, &wake); 949 if (!txwi) 950 continue; 951 952 mt7915_txwi_free(dev, txwi, sta, &free_list); 953 } 954 } 955 956 mt7915_mac_tx_free_done(dev, &free_list, wake); 957 } 958 959 static void 960 mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len) 961 { 962 struct mt76_connac_tx_free *free = data; 963 __le16 *info = (__le16 *)(data + sizeof(*free)); 964 struct mt76_dev *mdev = &dev->mt76; 965 void *end = data + len; 966 LIST_HEAD(free_list); 967 bool wake = false; 968 u8 i, count; 969 970 mt7915_mac_tx_free_prepare(dev); 971 972 count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl)); 973 if (WARN_ON_ONCE((void *)&info[count] > end)) 974 return; 975 976 for (i = 0; i < count; i++) { 977 struct mt76_txwi_cache *txwi; 978 u16 msdu = le16_to_cpu(info[i]); 979 980 txwi = mt76_token_release(mdev, msdu, &wake); 981 if (!txwi) 982 continue; 983 984 mt7915_txwi_free(dev, txwi, NULL, &free_list); 985 } 986 987 mt7915_mac_tx_free_done(dev, &free_list, wake); 988 } 989 990 static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) 991 { 992 struct mt7915_sta *msta = NULL; 993 struct mt76_wcid *wcid; 994 __le32 *txs_data = data; 995 u16 wcidx; 996 u8 pid; 997 998 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) 999 return; 1000 1001 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1002 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1003 1004 if (pid < MT_PACKET_ID_FIRST) 1005 return; 1006 1007 if (wcidx >= mt7915_wtbl_size(dev)) 1008 return; 1009 1010 rcu_read_lock(); 1011 1012 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1013 if (!wcid) 1014 goto out; 1015 1016 msta = container_of(wcid, struct mt7915_sta, wcid); 1017 1018 mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data, 1019 &msta->stats); 1020 if (!wcid->sta) 1021 goto out; 1022 1023 spin_lock_bh(&dev->sta_poll_lock); 1024 if (list_empty(&msta->poll_list)) 1025 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1026 spin_unlock_bh(&dev->sta_poll_lock); 1027 1028 out: 1029 rcu_read_unlock(); 1030 } 1031 1032 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) 1033 { 1034 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1035 __le32 *rxd = (__le32 *)data; 1036 __le32 *end = (__le32 *)&rxd[len / 4]; 1037 enum rx_pkt_type type; 1038 1039 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1040 1041 switch (type) { 1042 case PKT_TYPE_TXRX_NOTIFY: 1043 mt7915_mac_tx_free(dev, data, len); 1044 return false; 1045 case PKT_TYPE_TXRX_NOTIFY_V0: 1046 mt7915_mac_tx_free_v0(dev, data, len); 1047 return false; 1048 case PKT_TYPE_TXS: 1049 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1050 mt7915_mac_add_txs(dev, rxd); 1051 return false; 1052 case PKT_TYPE_RX_FW_MONITOR: 1053 mt7915_debugfs_rx_fw_monitor(dev, data, len); 1054 return false; 1055 default: 1056 return true; 1057 } 1058 } 1059 1060 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1061 struct sk_buff *skb) 1062 { 1063 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1064 __le32 *rxd = (__le32 *)skb->data; 1065 __le32 *end = (__le32 *)&skb->data[skb->len]; 1066 enum rx_pkt_type type; 1067 1068 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1069 1070 switch (type) { 1071 case PKT_TYPE_TXRX_NOTIFY: 1072 mt7915_mac_tx_free(dev, skb->data, skb->len); 1073 napi_consume_skb(skb, 1); 1074 break; 1075 case PKT_TYPE_TXRX_NOTIFY_V0: 1076 mt7915_mac_tx_free_v0(dev, skb->data, skb->len); 1077 napi_consume_skb(skb, 1); 1078 break; 1079 case PKT_TYPE_RX_EVENT: 1080 mt7915_mcu_rx_event(dev, skb); 1081 break; 1082 case PKT_TYPE_TXRXV: 1083 mt7915_mac_fill_rx_vector(dev, skb); 1084 break; 1085 case PKT_TYPE_TXS: 1086 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1087 mt7915_mac_add_txs(dev, rxd); 1088 dev_kfree_skb(skb); 1089 break; 1090 case PKT_TYPE_RX_FW_MONITOR: 1091 mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1092 dev_kfree_skb(skb); 1093 break; 1094 case PKT_TYPE_NORMAL: 1095 if (!mt7915_mac_fill_rx(dev, skb)) { 1096 mt76_rx(&dev->mt76, q, skb); 1097 return; 1098 } 1099 fallthrough; 1100 default: 1101 dev_kfree_skb(skb); 1102 break; 1103 } 1104 } 1105 1106 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) 1107 { 1108 struct mt7915_dev *dev = phy->dev; 1109 u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx); 1110 1111 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); 1112 mt76_set(dev, reg, BIT(11) | BIT(9)); 1113 } 1114 1115 void mt7915_mac_reset_counters(struct mt7915_phy *phy) 1116 { 1117 struct mt7915_dev *dev = phy->dev; 1118 int i; 1119 1120 for (i = 0; i < 4; i++) { 1121 mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1122 mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1123 } 1124 1125 i = 0; 1126 phy->mt76->survey_time = ktime_get_boottime(); 1127 if (phy->band_idx) 1128 i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2; 1129 1130 memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2); 1131 1132 /* reset airtime counters */ 1133 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx), 1134 MT_WF_RMAC_MIB_RXTIME_CLR); 1135 1136 mt7915_mcu_get_chan_mib_info(phy, true); 1137 } 1138 1139 void mt7915_mac_set_timing(struct mt7915_phy *phy) 1140 { 1141 s16 coverage_class = phy->coverage_class; 1142 struct mt7915_dev *dev = phy->dev; 1143 struct mt7915_phy *ext_phy = mt7915_ext_phy(dev); 1144 u32 val, reg_offset; 1145 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1146 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1147 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1148 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1149 int offset; 1150 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ); 1151 1152 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1153 return; 1154 1155 if (ext_phy) 1156 coverage_class = max_t(s16, dev->phy.coverage_class, 1157 ext_phy->coverage_class); 1158 1159 mt76_set(dev, MT_ARB_SCR(phy->band_idx), 1160 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1161 udelay(1); 1162 1163 offset = 3 * coverage_class; 1164 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1165 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1166 1167 mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset); 1168 mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset); 1169 mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx), 1170 FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) | 1171 FIELD_PREP(MT_IFS_RIFS, 2) | 1172 FIELD_PREP(MT_IFS_SIFS, 10) | 1173 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1174 1175 mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx), 1176 FIELD_PREP(MT_IFS_EIFS_CCK, 314)); 1177 1178 if (phy->slottime < 20 || a_band) 1179 val = MT7915_CFEND_RATE_DEFAULT; 1180 else 1181 val = MT7915_CFEND_RATE_11B; 1182 1183 mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val); 1184 mt76_clear(dev, MT_ARB_SCR(phy->band_idx), 1185 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1186 } 1187 1188 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) 1189 { 1190 u32 reg; 1191 1192 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) : 1193 MT_WF_PHY_RXTD12_MT7916(ext_phy); 1194 mt76_set(dev, reg, 1195 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | 1196 MT_WF_PHY_RXTD12_IRPI_SW_CLR); 1197 1198 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) : 1199 MT_WF_PHY_RX_CTRL1_MT7916(ext_phy); 1200 mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); 1201 } 1202 1203 static u8 1204 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) 1205 { 1206 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1207 struct mt7915_dev *dev = phy->dev; 1208 u32 val, sum = 0, n = 0; 1209 int nss, i; 1210 1211 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) { 1212 u32 reg = is_mt7915(&dev->mt76) ? 1213 MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) : 1214 MT_WF_IRPI_NSS_MT7916(idx, nss); 1215 1216 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1217 val = mt76_rr(dev, reg); 1218 sum += val * nf_power[i]; 1219 n += val; 1220 } 1221 } 1222 1223 if (!n) 1224 return 0; 1225 1226 return sum / n; 1227 } 1228 1229 void mt7915_update_channel(struct mt76_phy *mphy) 1230 { 1231 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv; 1232 struct mt76_channel_state *state = mphy->chan_state; 1233 int nf; 1234 1235 mt7915_mcu_get_chan_mib_info(phy, false); 1236 1237 nf = mt7915_phy_get_nf(phy, phy->band_idx); 1238 if (!phy->noise) 1239 phy->noise = nf << 4; 1240 else if (nf) 1241 phy->noise += nf - (phy->noise >> 4); 1242 1243 state->noise = -(phy->noise >> 4); 1244 } 1245 1246 static bool 1247 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state) 1248 { 1249 bool ret; 1250 1251 ret = wait_event_timeout(dev->reset_wait, 1252 (READ_ONCE(dev->reset_state) & state), 1253 MT7915_RESET_TIMEOUT); 1254 1255 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1256 return ret; 1257 } 1258 1259 static void 1260 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1261 { 1262 struct ieee80211_hw *hw = priv; 1263 1264 switch (vif->type) { 1265 case NL80211_IFTYPE_MESH_POINT: 1266 case NL80211_IFTYPE_ADHOC: 1267 case NL80211_IFTYPE_AP: 1268 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon, 1269 BSS_CHANGED_BEACON_ENABLED); 1270 break; 1271 default: 1272 break; 1273 } 1274 } 1275 1276 static void 1277 mt7915_update_beacons(struct mt7915_dev *dev) 1278 { 1279 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1280 1281 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1282 IEEE80211_IFACE_ITER_RESUME_ALL, 1283 mt7915_update_vif_beacon, dev->mt76.hw); 1284 1285 if (!mphy_ext) 1286 return; 1287 1288 ieee80211_iterate_active_interfaces(mphy_ext->hw, 1289 IEEE80211_IFACE_ITER_RESUME_ALL, 1290 mt7915_update_vif_beacon, mphy_ext->hw); 1291 } 1292 1293 static void 1294 mt7915_dma_reset(struct mt7915_dev *dev) 1295 { 1296 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1297 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 1298 int i; 1299 1300 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 1301 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1302 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1303 1304 if (is_mt7915(&dev->mt76)) 1305 mt76_clear(dev, MT_WFDMA1_GLO_CFG, 1306 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1307 MT_WFDMA1_GLO_CFG_RX_DMA_EN); 1308 if (dev->hif2) { 1309 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 1310 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1311 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1312 1313 if (is_mt7915(&dev->mt76)) 1314 mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 1315 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1316 MT_WFDMA1_GLO_CFG_RX_DMA_EN); 1317 } 1318 1319 usleep_range(1000, 2000); 1320 1321 for (i = 0; i < __MT_TXQ_MAX; i++) { 1322 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 1323 if (mphy_ext) 1324 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); 1325 } 1326 1327 for (i = 0; i < __MT_MCUQ_MAX; i++) 1328 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 1329 1330 mt76_for_each_q_rx(&dev->mt76, i) 1331 mt76_queue_rx_reset(dev, i); 1332 1333 mt76_tx_status_check(&dev->mt76, true); 1334 1335 /* re-init prefetch settings after reset */ 1336 mt7915_dma_prefetch(dev); 1337 1338 mt76_set(dev, MT_WFDMA0_GLO_CFG, 1339 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1340 if (is_mt7915(&dev->mt76)) 1341 mt76_set(dev, MT_WFDMA1_GLO_CFG, 1342 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1343 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 1344 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 1345 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 1346 if (dev->hif2) { 1347 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 1348 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1349 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1350 1351 if (is_mt7915(&dev->mt76)) 1352 mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 1353 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1354 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 1355 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 1356 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 1357 } 1358 } 1359 1360 void mt7915_tx_token_put(struct mt7915_dev *dev) 1361 { 1362 struct mt76_txwi_cache *txwi; 1363 int id; 1364 1365 spin_lock_bh(&dev->mt76.token_lock); 1366 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1367 mt7915_txwi_free(dev, txwi, NULL, NULL); 1368 dev->mt76.token_count--; 1369 } 1370 spin_unlock_bh(&dev->mt76.token_lock); 1371 idr_destroy(&dev->mt76.token); 1372 } 1373 1374 /* system error recovery */ 1375 void mt7915_mac_reset_work(struct work_struct *work) 1376 { 1377 struct mt7915_phy *phy2; 1378 struct mt76_phy *ext_phy; 1379 struct mt7915_dev *dev; 1380 int i; 1381 1382 dev = container_of(work, struct mt7915_dev, reset_work); 1383 ext_phy = dev->mt76.phys[MT_BAND1]; 1384 phy2 = ext_phy ? ext_phy->priv : NULL; 1385 1386 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA)) 1387 return; 1388 1389 ieee80211_stop_queues(mt76_hw(dev)); 1390 if (ext_phy) 1391 ieee80211_stop_queues(ext_phy->hw); 1392 1393 set_bit(MT76_RESET, &dev->mphy.state); 1394 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1395 wake_up(&dev->mt76.mcu.wait); 1396 cancel_delayed_work_sync(&dev->mphy.mac_work); 1397 if (phy2) { 1398 set_bit(MT76_RESET, &phy2->mt76->state); 1399 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1400 } 1401 mt76_worker_disable(&dev->mt76.tx_worker); 1402 mt76_for_each_q_rx(&dev->mt76, i) 1403 napi_disable(&dev->mt76.napi[i]); 1404 napi_disable(&dev->mt76.tx_napi); 1405 1406 mutex_lock(&dev->mt76.mutex); 1407 1408 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1409 1410 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1411 mt7915_dma_reset(dev); 1412 1413 mt7915_tx_token_put(dev); 1414 idr_init(&dev->mt76.token); 1415 1416 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1417 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1418 } 1419 1420 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1421 clear_bit(MT76_RESET, &dev->mphy.state); 1422 if (phy2) 1423 clear_bit(MT76_RESET, &phy2->mt76->state); 1424 1425 local_bh_disable(); 1426 mt76_for_each_q_rx(&dev->mt76, i) { 1427 napi_enable(&dev->mt76.napi[i]); 1428 napi_schedule(&dev->mt76.napi[i]); 1429 } 1430 local_bh_enable(); 1431 1432 tasklet_schedule(&dev->irq_tasklet); 1433 1434 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1435 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1436 1437 mt76_worker_enable(&dev->mt76.tx_worker); 1438 1439 local_bh_disable(); 1440 napi_enable(&dev->mt76.tx_napi); 1441 napi_schedule(&dev->mt76.tx_napi); 1442 local_bh_enable(); 1443 1444 ieee80211_wake_queues(mt76_hw(dev)); 1445 if (ext_phy) 1446 ieee80211_wake_queues(ext_phy->hw); 1447 1448 mutex_unlock(&dev->mt76.mutex); 1449 1450 mt7915_update_beacons(dev); 1451 1452 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1453 MT7915_WATCHDOG_TIME); 1454 if (phy2) 1455 ieee80211_queue_delayed_work(ext_phy->hw, 1456 &phy2->mt76->mac_work, 1457 MT7915_WATCHDOG_TIME); 1458 } 1459 1460 void mt7915_mac_update_stats(struct mt7915_phy *phy) 1461 { 1462 struct mt7915_dev *dev = phy->dev; 1463 struct mib_stats *mib = &phy->mib; 1464 int i, aggr0, aggr1, cnt; 1465 u32 val; 1466 1467 cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx)); 1468 mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? 1469 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) : 1470 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt); 1471 1472 cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx)); 1473 mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt); 1474 1475 cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx)); 1476 mib->rx_mpdu_cnt += cnt; 1477 1478 cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx)); 1479 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 1480 1481 cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx)); 1482 mib->rx_vector_mismatch_cnt += 1483 FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt); 1484 1485 cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx)); 1486 mib->rx_delimiter_fail_cnt += 1487 FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt); 1488 1489 cnt = mt76_rr(dev, MT_MIB_SDR10(phy->band_idx)); 1490 mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ? 1491 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) : 1492 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt); 1493 1494 cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx)); 1495 mib->rx_len_mismatch_cnt += 1496 FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt); 1497 1498 cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx)); 1499 mib->tx_ampdu_cnt += cnt; 1500 1501 cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx)); 1502 mib->tx_stop_q_empty_cnt += 1503 FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt); 1504 1505 cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx)); 1506 mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ? 1507 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) : 1508 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt); 1509 1510 cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx)); 1511 mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ? 1512 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) : 1513 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt); 1514 1515 cnt = mt76_rr(dev, MT_MIB_SDR16(phy->band_idx)); 1516 mib->primary_cca_busy_time += 1517 FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt); 1518 1519 cnt = mt76_rr(dev, MT_MIB_SDR17(phy->band_idx)); 1520 mib->secondary_cca_busy_time += 1521 FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt); 1522 1523 cnt = mt76_rr(dev, MT_MIB_SDR18(phy->band_idx)); 1524 mib->primary_energy_detect_time += 1525 FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt); 1526 1527 cnt = mt76_rr(dev, MT_MIB_SDR19(phy->band_idx)); 1528 mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt); 1529 1530 cnt = mt76_rr(dev, MT_MIB_SDR20(phy->band_idx)); 1531 mib->ofdm_mdrdy_time += 1532 FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt); 1533 1534 cnt = mt76_rr(dev, MT_MIB_SDR21(phy->band_idx)); 1535 mib->green_mdrdy_time += 1536 FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt); 1537 1538 cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx)); 1539 mib->rx_ampdu_cnt += cnt; 1540 1541 cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx)); 1542 mib->rx_ampdu_bytes_cnt += cnt; 1543 1544 cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx)); 1545 mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ? 1546 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) : 1547 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt); 1548 1549 cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx)); 1550 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 1551 1552 cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx)); 1553 mib->tx_rwp_fail_cnt += 1554 FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt); 1555 1556 cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx)); 1557 mib->tx_rwp_need_cnt += 1558 FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt); 1559 1560 cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx)); 1561 mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ? 1562 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) : 1563 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt); 1564 1565 cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx)); 1566 mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ? 1567 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) : 1568 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt); 1569 1570 cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx)); 1571 mib->rx_ba_cnt += cnt; 1572 1573 cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx)); 1574 mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt); 1575 1576 cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx)); 1577 mib->tx_mu_mpdu_cnt += cnt; 1578 1579 cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx)); 1580 mib->tx_mu_acked_mpdu_cnt += cnt; 1581 1582 cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx)); 1583 mib->tx_su_acked_mpdu_cnt += cnt; 1584 1585 cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx)); 1586 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt); 1587 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt); 1588 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt); 1589 1590 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 1591 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 1592 mib->tx_amsdu[i] += cnt; 1593 mib->tx_amsdu_cnt += cnt; 1594 } 1595 1596 aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; 1597 if (is_mt7915(&dev->mt76)) { 1598 for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { 1599 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4))); 1600 mib->ba_miss_cnt += 1601 FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1602 mib->ack_fail_cnt += 1603 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1604 1605 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4))); 1606 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1607 mib->rts_retries_cnt += 1608 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1609 1610 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1611 dev->mt76.aggr_stats[aggr0++] += val & 0xffff; 1612 dev->mt76.aggr_stats[aggr0++] += val >> 16; 1613 1614 val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1615 dev->mt76.aggr_stats[aggr1++] += val & 0xffff; 1616 dev->mt76.aggr_stats[aggr1++] += val >> 16; 1617 } 1618 1619 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1620 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1621 1622 cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx)); 1623 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt); 1624 1625 cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx)); 1626 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt); 1627 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt); 1628 1629 cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx)); 1630 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt); 1631 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt); 1632 1633 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx)); 1634 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt); 1635 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt); 1636 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt); 1637 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt); 1638 } else { 1639 for (i = 0; i < 2; i++) { 1640 /* rts count */ 1641 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2))); 1642 mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val); 1643 mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val); 1644 1645 /* rts retry count */ 1646 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2))); 1647 mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val); 1648 mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val); 1649 1650 /* ba miss count */ 1651 val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2))); 1652 mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val); 1653 mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val); 1654 1655 /* ack fail count */ 1656 val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2))); 1657 mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val); 1658 mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val); 1659 } 1660 1661 for (i = 0; i < 8; i++) { 1662 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1663 dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val); 1664 dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val); 1665 } 1666 1667 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1668 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1669 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1670 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1671 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1672 1673 cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx)); 1674 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt); 1675 1676 cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx)); 1677 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt); 1678 1679 cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx)); 1680 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1681 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1682 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1683 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1684 1685 cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx)); 1686 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1687 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1688 } 1689 } 1690 1691 static void mt7915_mac_severe_check(struct mt7915_phy *phy) 1692 { 1693 struct mt7915_dev *dev = phy->dev; 1694 bool ext_phy = phy != &dev->phy; 1695 u32 trb; 1696 1697 if (!phy->omac_mask) 1698 return; 1699 1700 /* In rare cases, TRB pointers might be out of sync leads to RMAC 1701 * stopping Rx, so check status periodically to see if TRB hardware 1702 * requires minimal recovery. 1703 */ 1704 trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx)); 1705 1706 if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) != 1707 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) && 1708 (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) != 1709 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) && 1710 trb == phy->trb_ts) 1711 mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT, 1712 ext_phy); 1713 1714 phy->trb_ts = trb; 1715 } 1716 1717 void mt7915_mac_sta_rc_work(struct work_struct *work) 1718 { 1719 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); 1720 struct ieee80211_sta *sta; 1721 struct ieee80211_vif *vif; 1722 struct mt7915_sta *msta; 1723 u32 changed; 1724 LIST_HEAD(list); 1725 1726 spin_lock_bh(&dev->sta_poll_lock); 1727 list_splice_init(&dev->sta_rc_list, &list); 1728 1729 while (!list_empty(&list)) { 1730 msta = list_first_entry(&list, struct mt7915_sta, rc_list); 1731 list_del_init(&msta->rc_list); 1732 changed = msta->changed; 1733 msta->changed = 0; 1734 spin_unlock_bh(&dev->sta_poll_lock); 1735 1736 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 1737 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 1738 1739 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 1740 IEEE80211_RC_NSS_CHANGED | 1741 IEEE80211_RC_BW_CHANGED)) 1742 mt7915_mcu_add_rate_ctrl(dev, vif, sta, true); 1743 1744 if (changed & IEEE80211_RC_SMPS_CHANGED) 1745 mt7915_mcu_add_smps(dev, vif, sta); 1746 1747 spin_lock_bh(&dev->sta_poll_lock); 1748 } 1749 1750 spin_unlock_bh(&dev->sta_poll_lock); 1751 } 1752 1753 void mt7915_mac_work(struct work_struct *work) 1754 { 1755 struct mt7915_phy *phy; 1756 struct mt76_phy *mphy; 1757 1758 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 1759 mac_work.work); 1760 phy = mphy->priv; 1761 1762 mutex_lock(&mphy->dev->mutex); 1763 1764 mt76_update_survey(mphy); 1765 if (++mphy->mac_work_count == 5) { 1766 mphy->mac_work_count = 0; 1767 1768 mt7915_mac_update_stats(phy); 1769 mt7915_mac_severe_check(phy); 1770 } 1771 1772 mutex_unlock(&mphy->dev->mutex); 1773 1774 mt76_tx_status_check(mphy->dev, false); 1775 1776 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 1777 MT7915_WATCHDOG_TIME); 1778 } 1779 1780 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) 1781 { 1782 struct mt7915_dev *dev = phy->dev; 1783 1784 if (phy->rdd_state & BIT(0)) 1785 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, 1786 MT_RX_SEL0, 0); 1787 if (phy->rdd_state & BIT(1)) 1788 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, 1789 MT_RX_SEL0, 0); 1790 } 1791 1792 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) 1793 { 1794 int err, region; 1795 1796 switch (dev->mt76.region) { 1797 case NL80211_DFS_ETSI: 1798 region = 0; 1799 break; 1800 case NL80211_DFS_JP: 1801 region = 2; 1802 break; 1803 case NL80211_DFS_FCC: 1804 default: 1805 region = 1; 1806 break; 1807 } 1808 1809 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, 1810 MT_RX_SEL0, region); 1811 if (err < 0) 1812 return err; 1813 1814 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, 1815 MT_RX_SEL0, 1); 1816 } 1817 1818 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) 1819 { 1820 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 1821 struct mt7915_dev *dev = phy->dev; 1822 int err; 1823 1824 /* start CAC */ 1825 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx, 1826 MT_RX_SEL0, 0); 1827 if (err < 0) 1828 return err; 1829 1830 err = mt7915_dfs_start_rdd(dev, phy->band_idx); 1831 if (err < 0) 1832 return err; 1833 1834 phy->rdd_state |= BIT(phy->band_idx); 1835 1836 if (!is_mt7915(&dev->mt76)) 1837 return 0; 1838 1839 if (chandef->width == NL80211_CHAN_WIDTH_160 || 1840 chandef->width == NL80211_CHAN_WIDTH_80P80) { 1841 err = mt7915_dfs_start_rdd(dev, 1); 1842 if (err < 0) 1843 return err; 1844 1845 phy->rdd_state |= BIT(1); 1846 } 1847 1848 return 0; 1849 } 1850 1851 static int 1852 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy) 1853 { 1854 const struct mt7915_dfs_radar_spec *radar_specs; 1855 struct mt7915_dev *dev = phy->dev; 1856 int err, i; 1857 1858 switch (dev->mt76.region) { 1859 case NL80211_DFS_FCC: 1860 radar_specs = &fcc_radar_specs; 1861 err = mt7915_mcu_set_fcc5_lpn(dev, 8); 1862 if (err < 0) 1863 return err; 1864 break; 1865 case NL80211_DFS_ETSI: 1866 radar_specs = &etsi_radar_specs; 1867 break; 1868 case NL80211_DFS_JP: 1869 radar_specs = &jp_radar_specs; 1870 break; 1871 default: 1872 return -EINVAL; 1873 } 1874 1875 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 1876 err = mt7915_mcu_set_radar_th(dev, i, 1877 &radar_specs->radar_pattern[i]); 1878 if (err < 0) 1879 return err; 1880 } 1881 1882 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 1883 } 1884 1885 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) 1886 { 1887 struct mt7915_dev *dev = phy->dev; 1888 enum mt76_dfs_state dfs_state, prev_state; 1889 int err; 1890 1891 prev_state = phy->mt76->dfs_state; 1892 dfs_state = mt76_phy_dfs_state(phy->mt76); 1893 1894 if (prev_state == dfs_state) 1895 return 0; 1896 1897 if (prev_state == MT_DFS_STATE_UNKNOWN) 1898 mt7915_dfs_stop_radar_detector(phy); 1899 1900 if (dfs_state == MT_DFS_STATE_DISABLED) 1901 goto stop; 1902 1903 if (prev_state <= MT_DFS_STATE_DISABLED) { 1904 err = mt7915_dfs_init_radar_specs(phy); 1905 if (err < 0) 1906 return err; 1907 1908 err = mt7915_dfs_start_radar_detector(phy); 1909 if (err < 0) 1910 return err; 1911 1912 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 1913 } 1914 1915 if (dfs_state == MT_DFS_STATE_CAC) 1916 return 0; 1917 1918 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, 1919 phy->band_idx, MT_RX_SEL0, 0); 1920 if (err < 0) { 1921 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 1922 return err; 1923 } 1924 1925 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 1926 return 0; 1927 1928 stop: 1929 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, 1930 phy->band_idx, MT_RX_SEL0, 0); 1931 if (err < 0) 1932 return err; 1933 1934 mt7915_dfs_stop_radar_detector(phy); 1935 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 1936 1937 return 0; 1938 } 1939 1940 static int 1941 mt7915_mac_twt_duration_align(int duration) 1942 { 1943 return duration << 8; 1944 } 1945 1946 static u64 1947 mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev, 1948 struct mt7915_twt_flow *flow) 1949 { 1950 struct mt7915_twt_flow *iter, *iter_next; 1951 u32 duration = flow->duration << 8; 1952 u64 start_tsf; 1953 1954 iter = list_first_entry_or_null(&dev->twt_list, 1955 struct mt7915_twt_flow, list); 1956 if (!iter || !iter->sched || iter->start_tsf > duration) { 1957 /* add flow as first entry in the list */ 1958 list_add(&flow->list, &dev->twt_list); 1959 return 0; 1960 } 1961 1962 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 1963 start_tsf = iter->start_tsf + 1964 mt7915_mac_twt_duration_align(iter->duration); 1965 if (list_is_last(&iter->list, &dev->twt_list)) 1966 break; 1967 1968 if (!iter_next->sched || 1969 iter_next->start_tsf > start_tsf + duration) { 1970 list_add(&flow->list, &iter->list); 1971 goto out; 1972 } 1973 } 1974 1975 /* add flow as last entry in the list */ 1976 list_add_tail(&flow->list, &dev->twt_list); 1977 out: 1978 return start_tsf; 1979 } 1980 1981 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) 1982 { 1983 struct ieee80211_twt_params *twt_agrt; 1984 u64 interval, duration; 1985 u16 mantissa; 1986 u8 exp; 1987 1988 /* only individual agreement supported */ 1989 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 1990 return -EOPNOTSUPP; 1991 1992 /* only 256us unit supported */ 1993 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 1994 return -EOPNOTSUPP; 1995 1996 twt_agrt = (struct ieee80211_twt_params *)twt->params; 1997 1998 /* explicit agreement not supported */ 1999 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2000 return -EOPNOTSUPP; 2001 2002 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2003 le16_to_cpu(twt_agrt->req_type)); 2004 mantissa = le16_to_cpu(twt_agrt->mantissa); 2005 duration = twt_agrt->min_twt_dur << 8; 2006 2007 interval = (u64)mantissa << exp; 2008 if (interval < duration) 2009 return -EOPNOTSUPP; 2010 2011 return 0; 2012 } 2013 2014 static bool 2015 mt7915_mac_twt_param_equal(struct mt7915_sta *msta, 2016 struct ieee80211_twt_params *twt_agrt) 2017 { 2018 u16 type = le16_to_cpu(twt_agrt->req_type); 2019 u8 exp; 2020 int i; 2021 2022 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2023 for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) { 2024 struct mt7915_twt_flow *f; 2025 2026 if (!(msta->twt.flowid_mask & BIT(i))) 2027 continue; 2028 2029 f = &msta->twt.flow[i]; 2030 if (f->duration == twt_agrt->min_twt_dur && 2031 f->mantissa == twt_agrt->mantissa && 2032 f->exp == exp && 2033 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2034 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2035 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2036 return true; 2037 } 2038 2039 return false; 2040 } 2041 2042 void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, 2043 struct ieee80211_sta *sta, 2044 struct ieee80211_twt_setup *twt) 2045 { 2046 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2047 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 2048 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2049 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2050 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2051 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2052 struct mt7915_twt_flow *flow; 2053 int flowid, table_id; 2054 u8 exp; 2055 2056 if (mt7915_mac_check_twt_req(twt)) 2057 goto out; 2058 2059 mutex_lock(&dev->mt76.mutex); 2060 2061 if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT) 2062 goto unlock; 2063 2064 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2065 goto unlock; 2066 2067 if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) { 2068 setup_cmd = TWT_SETUP_CMD_DICTATE; 2069 twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR; 2070 goto unlock; 2071 } 2072 2073 flowid = ffs(~msta->twt.flowid_mask) - 1; 2074 le16p_replace_bits(&twt_agrt->req_type, flowid, 2075 IEEE80211_TWT_REQTYPE_FLOWID); 2076 2077 table_id = ffs(~dev->twt.table_mask) - 1; 2078 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2079 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2080 2081 if (mt7915_mac_twt_param_equal(msta, twt_agrt)) 2082 goto unlock; 2083 2084 flow = &msta->twt.flow[flowid]; 2085 memset(flow, 0, sizeof(*flow)); 2086 INIT_LIST_HEAD(&flow->list); 2087 flow->wcid = msta->wcid.idx; 2088 flow->table_id = table_id; 2089 flow->id = flowid; 2090 flow->duration = twt_agrt->min_twt_dur; 2091 flow->mantissa = twt_agrt->mantissa; 2092 flow->exp = exp; 2093 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2094 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2095 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2096 2097 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2098 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2099 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2100 u64 flow_tsf, curr_tsf; 2101 u32 rem; 2102 2103 flow->sched = true; 2104 flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow); 2105 curr_tsf = __mt7915_get_tsf(hw, msta->vif); 2106 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2107 flow_tsf = curr_tsf + interval - rem; 2108 twt_agrt->twt = cpu_to_le64(flow_tsf); 2109 } else { 2110 list_add_tail(&flow->list, &dev->twt_list); 2111 } 2112 flow->tsf = le64_to_cpu(twt_agrt->twt); 2113 2114 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2115 goto unlock; 2116 2117 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2118 dev->twt.table_mask |= BIT(table_id); 2119 msta->twt.flowid_mask |= BIT(flowid); 2120 dev->twt.n_agrt++; 2121 2122 unlock: 2123 mutex_unlock(&dev->mt76.mutex); 2124 out: 2125 le16p_replace_bits(&twt_agrt->req_type, setup_cmd, 2126 IEEE80211_TWT_REQTYPE_SETUP_CMD); 2127 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2128 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2129 } 2130 2131 void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, 2132 struct mt7915_sta *msta, 2133 u8 flowid) 2134 { 2135 struct mt7915_twt_flow *flow; 2136 2137 lockdep_assert_held(&dev->mt76.mutex); 2138 2139 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2140 return; 2141 2142 if (!(msta->twt.flowid_mask & BIT(flowid))) 2143 return; 2144 2145 flow = &msta->twt.flow[flowid]; 2146 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, 2147 MCU_TWT_AGRT_DELETE)) 2148 return; 2149 2150 list_del_init(&flow->list); 2151 msta->twt.flowid_mask &= ~BIT(flowid); 2152 dev->twt.table_mask &= ~BIT(flow->table_id); 2153 dev->twt.n_agrt--; 2154 } 2155