1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/timekeeping.h> 6 #include "mt7915.h" 7 #include "../dma.h" 8 #include "mac.h" 9 #include "mcu.h" 10 11 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 12 13 static const struct mt7915_dfs_radar_spec etsi_radar_specs = { 14 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 15 .radar_pattern = { 16 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 17 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 18 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 19 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 20 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 21 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 22 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 23 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 24 }, 25 }; 26 27 static const struct mt7915_dfs_radar_spec fcc_radar_specs = { 28 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 29 .radar_pattern = { 30 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 31 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 32 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 33 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 34 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 35 }, 36 }; 37 38 static const struct mt7915_dfs_radar_spec jp_radar_specs = { 39 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 40 .radar_pattern = { 41 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 42 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 43 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 44 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 45 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 46 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 47 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 48 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 49 }, 50 }; 51 52 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, 53 u16 idx, bool unicast) 54 { 55 struct mt7915_sta *sta; 56 struct mt76_wcid *wcid; 57 58 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 59 return NULL; 60 61 wcid = rcu_dereference(dev->mt76.wcid[idx]); 62 if (unicast || !wcid) 63 return wcid; 64 65 if (!wcid->sta) 66 return NULL; 67 68 sta = container_of(wcid, struct mt7915_sta, wcid); 69 if (!sta->vif) 70 return NULL; 71 72 return &sta->vif->sta.wcid; 73 } 74 75 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 76 { 77 } 78 79 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) 80 { 81 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 82 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 83 84 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 85 0, 5000); 86 } 87 88 u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw) 89 { 90 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 91 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 92 93 return MT_WTBL_LMAC_OFFS(wcid, dw); 94 } 95 96 static void mt7915_mac_sta_poll(struct mt7915_dev *dev) 97 { 98 static const u8 ac_to_tid[] = { 99 [IEEE80211_AC_BE] = 0, 100 [IEEE80211_AC_BK] = 1, 101 [IEEE80211_AC_VI] = 4, 102 [IEEE80211_AC_VO] = 6 103 }; 104 struct ieee80211_sta *sta; 105 struct mt7915_sta *msta; 106 struct rate_info *rate; 107 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 108 LIST_HEAD(sta_poll_list); 109 int i; 110 111 spin_lock_bh(&dev->sta_poll_lock); 112 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 113 spin_unlock_bh(&dev->sta_poll_lock); 114 115 rcu_read_lock(); 116 117 while (true) { 118 bool clear = false; 119 u32 addr, val; 120 u16 idx; 121 u8 bw; 122 123 spin_lock_bh(&dev->sta_poll_lock); 124 if (list_empty(&sta_poll_list)) { 125 spin_unlock_bh(&dev->sta_poll_lock); 126 break; 127 } 128 msta = list_first_entry(&sta_poll_list, 129 struct mt7915_sta, poll_list); 130 list_del_init(&msta->poll_list); 131 spin_unlock_bh(&dev->sta_poll_lock); 132 133 idx = msta->wcid.idx; 134 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20); 135 136 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 137 u32 tx_last = msta->airtime_ac[i]; 138 u32 rx_last = msta->airtime_ac[i + 4]; 139 140 msta->airtime_ac[i] = mt76_rr(dev, addr); 141 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 142 143 tx_time[i] = msta->airtime_ac[i] - tx_last; 144 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 145 146 if ((tx_last | rx_last) & BIT(30)) 147 clear = true; 148 149 addr += 8; 150 } 151 152 if (clear) { 153 mt7915_mac_wtbl_update(dev, idx, 154 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 155 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 156 } 157 158 if (!msta->wcid.sta) 159 continue; 160 161 sta = container_of((void *)msta, struct ieee80211_sta, 162 drv_priv); 163 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 164 u8 q = mt76_connac_lmac_mapping(i); 165 u32 tx_cur = tx_time[q]; 166 u32 rx_cur = rx_time[q]; 167 u8 tid = ac_to_tid[i]; 168 169 if (!tx_cur && !rx_cur) 170 continue; 171 172 ieee80211_sta_register_airtime(sta, tid, tx_cur, 173 rx_cur); 174 } 175 176 /* 177 * We don't support reading GI info from txs packets. 178 * For accurate tx status reporting and AQL improvement, 179 * we need to make sure that flags match so polling GI 180 * from per-sta counters directly. 181 */ 182 rate = &msta->wcid.rate; 183 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7); 184 val = mt76_rr(dev, addr); 185 186 switch (rate->bw) { 187 case RATE_INFO_BW_160: 188 bw = IEEE80211_STA_RX_BW_160; 189 break; 190 case RATE_INFO_BW_80: 191 bw = IEEE80211_STA_RX_BW_80; 192 break; 193 case RATE_INFO_BW_40: 194 bw = IEEE80211_STA_RX_BW_40; 195 break; 196 default: 197 bw = IEEE80211_STA_RX_BW_20; 198 break; 199 } 200 201 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 202 u8 offs = 24 + 2 * bw; 203 204 rate->he_gi = (val & (0x3 << offs)) >> offs; 205 } else if (rate->flags & 206 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 207 if (val & BIT(12 + bw)) 208 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 209 else 210 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 211 } 212 } 213 214 rcu_read_unlock(); 215 } 216 217 static int 218 mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) 219 { 220 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 221 struct mt76_phy *mphy = &dev->mt76.phy; 222 struct mt7915_phy *phy = &dev->phy; 223 struct ieee80211_supported_band *sband; 224 __le32 *rxd = (__le32 *)skb->data; 225 __le32 *rxv = NULL; 226 u32 rxd0 = le32_to_cpu(rxd[0]); 227 u32 rxd1 = le32_to_cpu(rxd[1]); 228 u32 rxd2 = le32_to_cpu(rxd[2]); 229 u32 rxd3 = le32_to_cpu(rxd[3]); 230 u32 rxd4 = le32_to_cpu(rxd[4]); 231 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 232 bool unicast, insert_ccmp_hdr = false; 233 u8 remove_pad, amsdu_info; 234 u8 mode = 0, qos_ctl = 0; 235 struct mt7915_sta *msta = NULL; 236 bool hdr_trans; 237 u16 hdr_gap; 238 u16 seq_ctrl = 0; 239 __le16 fc = 0; 240 int idx; 241 242 memset(status, 0, sizeof(*status)); 243 244 if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) { 245 mphy = dev->mt76.phys[MT_BAND1]; 246 if (!mphy) 247 return -EINVAL; 248 249 phy = mphy->priv; 250 status->phy_idx = 1; 251 } 252 253 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 254 return -EINVAL; 255 256 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 257 return -EINVAL; 258 259 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 260 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 261 return -EINVAL; 262 263 /* ICV error or CCMP/BIP/WPI MIC error */ 264 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 265 status->flag |= RX_FLAG_ONLY_MONITOR; 266 267 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 268 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 269 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); 270 271 if (status->wcid) { 272 msta = container_of(status->wcid, struct mt7915_sta, wcid); 273 spin_lock_bh(&dev->sta_poll_lock); 274 if (list_empty(&msta->poll_list)) 275 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 276 spin_unlock_bh(&dev->sta_poll_lock); 277 } 278 279 status->freq = mphy->chandef.chan->center_freq; 280 status->band = mphy->chandef.chan->band; 281 if (status->band == NL80211_BAND_5GHZ) 282 sband = &mphy->sband_5g.sband; 283 else if (status->band == NL80211_BAND_6GHZ) 284 sband = &mphy->sband_6g.sband; 285 else 286 sband = &mphy->sband_2g.sband; 287 288 if (!sband->channels) 289 return -EINVAL; 290 291 if ((rxd0 & csum_mask) == csum_mask) 292 skb->ip_summed = CHECKSUM_UNNECESSARY; 293 294 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) 295 status->flag |= RX_FLAG_FAILED_FCS_CRC; 296 297 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 298 status->flag |= RX_FLAG_MMIC_ERROR; 299 300 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && 301 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 302 status->flag |= RX_FLAG_DECRYPTED; 303 status->flag |= RX_FLAG_IV_STRIPPED; 304 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 305 } 306 307 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 308 309 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 310 return -EINVAL; 311 312 rxd += 6; 313 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 314 u32 v0 = le32_to_cpu(rxd[0]); 315 u32 v2 = le32_to_cpu(rxd[2]); 316 317 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); 318 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); 319 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); 320 321 rxd += 4; 322 if ((u8 *)rxd - skb->data >= skb->len) 323 return -EINVAL; 324 } 325 326 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 327 u8 *data = (u8 *)rxd; 328 329 if (status->flag & RX_FLAG_DECRYPTED) { 330 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) { 331 case MT_CIPHER_AES_CCMP: 332 case MT_CIPHER_CCMP_CCX: 333 case MT_CIPHER_CCMP_256: 334 insert_ccmp_hdr = 335 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 336 fallthrough; 337 case MT_CIPHER_TKIP: 338 case MT_CIPHER_TKIP_NO_MIC: 339 case MT_CIPHER_GCMP: 340 case MT_CIPHER_GCMP_256: 341 status->iv[0] = data[5]; 342 status->iv[1] = data[4]; 343 status->iv[2] = data[3]; 344 status->iv[3] = data[2]; 345 status->iv[4] = data[1]; 346 status->iv[5] = data[0]; 347 break; 348 default: 349 break; 350 } 351 } 352 rxd += 4; 353 if ((u8 *)rxd - skb->data >= skb->len) 354 return -EINVAL; 355 } 356 357 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 358 status->timestamp = le32_to_cpu(rxd[0]); 359 status->flag |= RX_FLAG_MACTIME_START; 360 361 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 362 status->flag |= RX_FLAG_AMPDU_DETAILS; 363 364 /* all subframes of an A-MPDU have the same timestamp */ 365 if (phy->rx_ampdu_ts != status->timestamp) { 366 if (!++phy->ampdu_ref) 367 phy->ampdu_ref++; 368 } 369 phy->rx_ampdu_ts = status->timestamp; 370 371 status->ampdu_ref = phy->ampdu_ref; 372 } 373 374 rxd += 2; 375 if ((u8 *)rxd - skb->data >= skb->len) 376 return -EINVAL; 377 } 378 379 /* RXD Group 3 - P-RXV */ 380 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 381 u32 v0, v1; 382 int ret; 383 384 rxv = rxd; 385 rxd += 2; 386 if ((u8 *)rxd - skb->data >= skb->len) 387 return -EINVAL; 388 389 v0 = le32_to_cpu(rxv[0]); 390 v1 = le32_to_cpu(rxv[1]); 391 392 if (v0 & MT_PRXV_HT_AD_CODE) 393 status->enc_flags |= RX_ENC_FLAG_LDPC; 394 395 status->chains = mphy->antenna_mask; 396 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); 397 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); 398 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); 399 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); 400 401 /* RXD Group 5 - C-RXV */ 402 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 403 rxd += 18; 404 if ((u8 *)rxd - skb->data >= skb->len) 405 return -EINVAL; 406 } 407 408 if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) { 409 ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, 410 sband, rxv, &mode); 411 if (ret < 0) 412 return ret; 413 } 414 } 415 416 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 417 status->amsdu = !!amsdu_info; 418 if (status->amsdu) { 419 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 420 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 421 } 422 423 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 424 if (hdr_trans && ieee80211_has_morefrags(fc)) { 425 struct ieee80211_vif *vif; 426 int err; 427 428 if (!msta || !msta->vif) 429 return -EINVAL; 430 431 vif = container_of((void *)msta->vif, struct ieee80211_vif, 432 drv_priv); 433 err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap); 434 if (err) 435 return err; 436 437 hdr_trans = false; 438 } else { 439 int pad_start = 0; 440 441 skb_pull(skb, hdr_gap); 442 if (!hdr_trans && status->amsdu) { 443 pad_start = ieee80211_get_hdrlen_from_skb(skb); 444 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 445 /* 446 * When header translation failure is indicated, 447 * the hardware will insert an extra 2-byte field 448 * containing the data length after the protocol 449 * type field. 450 */ 451 pad_start = 12; 452 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 453 pad_start += 4; 454 455 if (get_unaligned_be16(skb->data + pad_start) != 456 skb->len - pad_start - 2) 457 pad_start = 0; 458 } 459 460 if (pad_start) { 461 memmove(skb->data + 2, skb->data, pad_start); 462 skb_pull(skb, 2); 463 } 464 } 465 466 if (!hdr_trans) { 467 struct ieee80211_hdr *hdr; 468 469 if (insert_ccmp_hdr) { 470 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 471 472 mt76_insert_ccmp_hdr(skb, key_id); 473 } 474 475 hdr = mt76_skb_get_hdr(skb); 476 fc = hdr->frame_control; 477 if (ieee80211_is_data_qos(fc)) { 478 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 479 qos_ctl = *ieee80211_get_qos_ctl(hdr); 480 } 481 } else { 482 status->flag |= RX_FLAG_8023; 483 } 484 485 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 486 mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode); 487 488 if (!status->wcid || !ieee80211_is_data_qos(fc)) 489 return 0; 490 491 status->aggr = unicast && 492 !ieee80211_is_qos_nullfunc(fc); 493 status->qos_ctl = qos_ctl; 494 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 495 496 return 0; 497 } 498 499 static void 500 mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) 501 { 502 #ifdef CONFIG_NL80211_TESTMODE 503 struct mt7915_phy *phy = &dev->phy; 504 __le32 *rxd = (__le32 *)skb->data; 505 __le32 *rxv_hdr = rxd + 2; 506 __le32 *rxv = rxd + 4; 507 u32 rcpi, ib_rssi, wb_rssi, v20, v21; 508 u8 band_idx; 509 s32 foe; 510 u8 snr; 511 int i; 512 513 band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX); 514 if (band_idx && !phy->band_idx) { 515 phy = mt7915_ext_phy(dev); 516 if (!phy) 517 goto out; 518 } 519 520 rcpi = le32_to_cpu(rxv[6]); 521 ib_rssi = le32_to_cpu(rxv[7]); 522 wb_rssi = le32_to_cpu(rxv[8]) >> 5; 523 524 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) { 525 if (i == 3) 526 wb_rssi = le32_to_cpu(rxv[9]); 527 528 phy->test.last_rcpi[i] = rcpi & 0xff; 529 phy->test.last_ib_rssi[i] = ib_rssi & 0xff; 530 phy->test.last_wb_rssi[i] = wb_rssi & 0xff; 531 } 532 533 v20 = le32_to_cpu(rxv[20]); 534 v21 = le32_to_cpu(rxv[21]); 535 536 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) | 537 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT); 538 539 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16; 540 541 phy->test.last_freq_offset = foe; 542 phy->test.last_snr = snr; 543 out: 544 #endif 545 dev_kfree_skb(skb); 546 } 547 548 static void 549 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, 550 struct sk_buff *skb) 551 { 552 #ifdef CONFIG_NL80211_TESTMODE 553 struct mt76_testmode_data *td = &phy->mt76->test; 554 const struct ieee80211_rate *r; 555 u8 bw, mode, nss = td->tx_rate_nss; 556 u8 rate_idx = td->tx_rate_idx; 557 u16 rateval = 0; 558 u32 val; 559 bool cck = false; 560 int band; 561 562 if (skb != phy->mt76->test.tx_skb) 563 return; 564 565 switch (td->tx_rate_mode) { 566 case MT76_TM_TX_MODE_HT: 567 nss = 1 + (rate_idx >> 3); 568 mode = MT_PHY_TYPE_HT; 569 break; 570 case MT76_TM_TX_MODE_VHT: 571 mode = MT_PHY_TYPE_VHT; 572 break; 573 case MT76_TM_TX_MODE_HE_SU: 574 mode = MT_PHY_TYPE_HE_SU; 575 break; 576 case MT76_TM_TX_MODE_HE_EXT_SU: 577 mode = MT_PHY_TYPE_HE_EXT_SU; 578 break; 579 case MT76_TM_TX_MODE_HE_TB: 580 mode = MT_PHY_TYPE_HE_TB; 581 break; 582 case MT76_TM_TX_MODE_HE_MU: 583 mode = MT_PHY_TYPE_HE_MU; 584 break; 585 case MT76_TM_TX_MODE_CCK: 586 cck = true; 587 fallthrough; 588 case MT76_TM_TX_MODE_OFDM: 589 band = phy->mt76->chandef.chan->band; 590 if (band == NL80211_BAND_2GHZ && !cck) 591 rate_idx += 4; 592 593 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; 594 val = cck ? r->hw_value_short : r->hw_value; 595 596 mode = val >> 8; 597 rate_idx = val & 0xff; 598 break; 599 default: 600 mode = MT_PHY_TYPE_OFDM; 601 break; 602 } 603 604 switch (phy->mt76->chandef.width) { 605 case NL80211_CHAN_WIDTH_40: 606 bw = 1; 607 break; 608 case NL80211_CHAN_WIDTH_80: 609 bw = 2; 610 break; 611 case NL80211_CHAN_WIDTH_80P80: 612 case NL80211_CHAN_WIDTH_160: 613 bw = 3; 614 break; 615 default: 616 bw = 0; 617 break; 618 } 619 620 if (td->tx_rate_stbc && nss == 1) { 621 nss++; 622 rateval |= MT_TX_RATE_STBC; 623 } 624 625 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 626 FIELD_PREP(MT_TX_RATE_MODE, mode) | 627 FIELD_PREP(MT_TX_RATE_NSS, nss - 1); 628 629 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 630 631 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT); 632 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT) 633 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 634 635 val = MT_TXD6_FIXED_BW | 636 FIELD_PREP(MT_TXD6_BW, bw) | 637 FIELD_PREP(MT_TXD6_TX_RATE, rateval) | 638 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi); 639 640 /* for HE_SU/HE_EXT_SU PPDU 641 * - 1x, 2x, 4x LTF + 0.8us GI 642 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 643 * for HE_MU PPDU 644 * - 2x, 4x LTF + 0.8us GI 645 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 646 * for HE_TB PPDU 647 * - 1x, 2x LTF + 1.6us GI 648 * - 4x LTF + 3.2us GI 649 */ 650 if (mode >= MT_PHY_TYPE_HE_SU) 651 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); 652 653 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) 654 val |= MT_TXD6_LDPC; 655 656 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); 657 txwi[6] |= cpu_to_le32(val); 658 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 659 phy->test.spe_idx)); 660 #endif 661 } 662 663 void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, 664 struct sk_buff *skb, struct mt76_wcid *wcid, int pid, 665 struct ieee80211_key_conf *key, 666 enum mt76_txq_id qid, u32 changed) 667 { 668 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 669 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 670 struct mt76_phy *mphy = &dev->phy; 671 672 if (phy_idx && dev->phys[MT_BAND1]) 673 mphy = dev->phys[MT_BAND1]; 674 675 mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed); 676 677 if (mt76_testmode_enabled(mphy)) 678 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); 679 } 680 681 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 682 enum mt76_txq_id qid, struct mt76_wcid *wcid, 683 struct ieee80211_sta *sta, 684 struct mt76_tx_info *tx_info) 685 { 686 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 687 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 688 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 689 struct ieee80211_key_conf *key = info->control.hw_key; 690 struct ieee80211_vif *vif = info->control.vif; 691 struct mt76_connac_fw_txp *txp; 692 struct mt76_txwi_cache *t; 693 int id, i, nbuf = tx_info->nbuf - 1; 694 u8 *txwi = (u8 *)txwi_ptr; 695 int pid; 696 697 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 698 return -EINVAL; 699 700 if (!wcid) 701 wcid = &dev->mt76.global_wcid; 702 703 if (sta) { 704 struct mt7915_sta *msta; 705 706 msta = (struct mt7915_sta *)sta->drv_priv; 707 708 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 709 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 710 msta->jiffies = jiffies; 711 } 712 } 713 714 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 715 t->skb = tx_info->skb; 716 717 id = mt76_token_consume(mdev, &t); 718 if (id < 0) 719 return id; 720 721 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 722 mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key, 723 qid, 0); 724 725 txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE); 726 for (i = 0; i < nbuf; i++) { 727 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 728 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 729 } 730 txp->nbuf = nbuf; 731 732 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); 733 734 if (!key) 735 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 736 737 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 738 ieee80211_is_mgmt(hdr->frame_control)) 739 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 740 741 if (vif) { 742 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 743 744 txp->bss_idx = mvif->mt76.idx; 745 } 746 747 txp->token = cpu_to_le16(id); 748 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) 749 txp->rept_wds_wcid = cpu_to_le16(wcid->idx); 750 else 751 txp->rept_wds_wcid = cpu_to_le16(0x3ff); 752 tx_info->skb = DMA_DUMMY_DATA; 753 754 /* pass partial skb header to fw */ 755 tx_info->buf[1].len = MT_CT_PARSE_LEN; 756 tx_info->buf[1].skip_unmap = true; 757 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 758 759 return 0; 760 } 761 762 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 763 { 764 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 765 __le32 *txwi = ptr; 766 u32 val; 767 768 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 769 770 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 771 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 772 txwi[0] = cpu_to_le32(val); 773 774 val = MT_TXD1_LONG_FORMAT | 775 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 776 txwi[1] = cpu_to_le32(val); 777 778 txp->token = cpu_to_le16(token_id); 779 txp->nbuf = 1; 780 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 781 782 return MT_TXD_SIZE + sizeof(*txp); 783 } 784 785 static void 786 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 787 { 788 struct mt7915_sta *msta; 789 u16 fc, tid; 790 u32 val; 791 792 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 793 return; 794 795 tid = le32_get_bits(txwi[1], MT_TXD1_TID); 796 if (tid >= 6) /* skip VO queue */ 797 return; 798 799 val = le32_to_cpu(txwi[2]); 800 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 801 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 802 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 803 return; 804 805 msta = (struct mt7915_sta *)sta->drv_priv; 806 if (!test_and_set_bit(tid, &msta->ampdu_state)) 807 ieee80211_start_tx_ba_session(sta, tid, 0); 808 } 809 810 static void 811 mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, 812 struct ieee80211_sta *sta, struct list_head *free_list) 813 { 814 struct mt76_dev *mdev = &dev->mt76; 815 struct mt7915_sta *msta; 816 struct mt76_wcid *wcid; 817 __le32 *txwi; 818 u16 wcid_idx; 819 820 mt76_connac_txp_skb_unmap(mdev, t); 821 if (!t->skb) 822 goto out; 823 824 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 825 if (sta) { 826 wcid = (struct mt76_wcid *)sta->drv_priv; 827 wcid_idx = wcid->idx; 828 } else { 829 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); 830 wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]); 831 832 if (wcid && wcid->sta) { 833 msta = container_of(wcid, struct mt7915_sta, wcid); 834 sta = container_of((void *)msta, struct ieee80211_sta, 835 drv_priv); 836 spin_lock_bh(&dev->sta_poll_lock); 837 if (list_empty(&msta->poll_list)) 838 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 839 spin_unlock_bh(&dev->sta_poll_lock); 840 } 841 } 842 843 if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 844 mt7915_tx_check_aggr(sta, txwi); 845 846 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 847 848 out: 849 t->skb = NULL; 850 mt76_put_txwi(mdev, t); 851 } 852 853 static void 854 mt7915_mac_tx_free_prepare(struct mt7915_dev *dev) 855 { 856 struct mt76_dev *mdev = &dev->mt76; 857 struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1]; 858 859 /* clean DMA queues and unmap buffers first */ 860 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 861 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 862 if (mphy_ext) { 863 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); 864 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); 865 } 866 } 867 868 static void 869 mt7915_mac_tx_free_done(struct mt7915_dev *dev, 870 struct list_head *free_list, bool wake) 871 { 872 struct sk_buff *skb, *tmp; 873 874 mt7915_mac_sta_poll(dev); 875 876 if (wake) 877 mt76_set_tx_blocked(&dev->mt76, false); 878 879 mt76_worker_schedule(&dev->mt76.tx_worker); 880 881 list_for_each_entry_safe(skb, tmp, free_list, list) { 882 skb_list_del_init(skb); 883 napi_consume_skb(skb, 1); 884 } 885 } 886 887 static void 888 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) 889 { 890 struct mt76_connac_tx_free *free = data; 891 __le32 *tx_info = (__le32 *)(data + sizeof(*free)); 892 struct mt76_dev *mdev = &dev->mt76; 893 struct mt76_txwi_cache *txwi; 894 struct ieee80211_sta *sta = NULL; 895 LIST_HEAD(free_list); 896 void *end = data + len; 897 bool v3, wake = false; 898 u16 total, count = 0; 899 u32 txd = le32_to_cpu(free->txd); 900 __le32 *cur_info; 901 902 mt7915_mac_tx_free_prepare(dev); 903 904 total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT); 905 v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4); 906 if (WARN_ON_ONCE((void *)&tx_info[total >> v3] > end)) 907 return; 908 909 for (cur_info = tx_info; count < total; cur_info++) { 910 u32 msdu, info = le32_to_cpu(*cur_info); 911 u8 i; 912 913 /* 914 * 1'b1: new wcid pair. 915 * 1'b0: msdu_id with the same 'wcid pair' as above. 916 */ 917 if (info & MT_TX_FREE_PAIR) { 918 struct mt7915_sta *msta; 919 struct mt76_wcid *wcid; 920 u16 idx; 921 922 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 923 wcid = rcu_dereference(dev->mt76.wcid[idx]); 924 sta = wcid_to_sta(wcid); 925 if (!sta) 926 continue; 927 928 msta = container_of(wcid, struct mt7915_sta, wcid); 929 spin_lock_bh(&dev->sta_poll_lock); 930 if (list_empty(&msta->poll_list)) 931 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 932 spin_unlock_bh(&dev->sta_poll_lock); 933 continue; 934 } 935 936 if (v3 && (info & MT_TX_FREE_MPDU_HEADER)) 937 continue; 938 939 for (i = 0; i < 1 + v3; i++) { 940 if (v3) { 941 msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3; 942 if (msdu == MT_TX_FREE_MSDU_ID_V3) 943 continue; 944 } else { 945 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 946 } 947 count++; 948 txwi = mt76_token_release(mdev, msdu, &wake); 949 if (!txwi) 950 continue; 951 952 mt7915_txwi_free(dev, txwi, sta, &free_list); 953 } 954 } 955 956 mt7915_mac_tx_free_done(dev, &free_list, wake); 957 } 958 959 static void 960 mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len) 961 { 962 struct mt76_connac_tx_free *free = data; 963 __le16 *info = (__le16 *)(data + sizeof(*free)); 964 struct mt76_dev *mdev = &dev->mt76; 965 void *end = data + len; 966 LIST_HEAD(free_list); 967 bool wake = false; 968 u8 i, count; 969 970 mt7915_mac_tx_free_prepare(dev); 971 972 count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl)); 973 if (WARN_ON_ONCE((void *)&info[count] > end)) 974 return; 975 976 for (i = 0; i < count; i++) { 977 struct mt76_txwi_cache *txwi; 978 u16 msdu = le16_to_cpu(info[i]); 979 980 txwi = mt76_token_release(mdev, msdu, &wake); 981 if (!txwi) 982 continue; 983 984 mt7915_txwi_free(dev, txwi, NULL, &free_list); 985 } 986 987 mt7915_mac_tx_free_done(dev, &free_list, wake); 988 } 989 990 static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) 991 { 992 struct mt7915_sta *msta = NULL; 993 struct mt76_wcid *wcid; 994 __le32 *txs_data = data; 995 u16 wcidx; 996 u8 pid; 997 998 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) 999 return; 1000 1001 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1002 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1003 1004 if (pid < MT_PACKET_ID_WED) 1005 return; 1006 1007 if (wcidx >= mt7915_wtbl_size(dev)) 1008 return; 1009 1010 rcu_read_lock(); 1011 1012 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1013 if (!wcid) 1014 goto out; 1015 1016 msta = container_of(wcid, struct mt7915_sta, wcid); 1017 1018 if (pid == MT_PACKET_ID_WED) 1019 mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data); 1020 else 1021 mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data); 1022 1023 if (!wcid->sta) 1024 goto out; 1025 1026 spin_lock_bh(&dev->sta_poll_lock); 1027 if (list_empty(&msta->poll_list)) 1028 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1029 spin_unlock_bh(&dev->sta_poll_lock); 1030 1031 out: 1032 rcu_read_unlock(); 1033 } 1034 1035 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) 1036 { 1037 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1038 __le32 *rxd = (__le32 *)data; 1039 __le32 *end = (__le32 *)&rxd[len / 4]; 1040 enum rx_pkt_type type; 1041 1042 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1043 1044 switch (type) { 1045 case PKT_TYPE_TXRX_NOTIFY: 1046 mt7915_mac_tx_free(dev, data, len); 1047 return false; 1048 case PKT_TYPE_TXRX_NOTIFY_V0: 1049 mt7915_mac_tx_free_v0(dev, data, len); 1050 return false; 1051 case PKT_TYPE_TXS: 1052 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1053 mt7915_mac_add_txs(dev, rxd); 1054 return false; 1055 case PKT_TYPE_RX_FW_MONITOR: 1056 mt7915_debugfs_rx_fw_monitor(dev, data, len); 1057 return false; 1058 default: 1059 return true; 1060 } 1061 } 1062 1063 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1064 struct sk_buff *skb) 1065 { 1066 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1067 __le32 *rxd = (__le32 *)skb->data; 1068 __le32 *end = (__le32 *)&skb->data[skb->len]; 1069 enum rx_pkt_type type; 1070 1071 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1072 1073 switch (type) { 1074 case PKT_TYPE_TXRX_NOTIFY: 1075 mt7915_mac_tx_free(dev, skb->data, skb->len); 1076 napi_consume_skb(skb, 1); 1077 break; 1078 case PKT_TYPE_TXRX_NOTIFY_V0: 1079 mt7915_mac_tx_free_v0(dev, skb->data, skb->len); 1080 napi_consume_skb(skb, 1); 1081 break; 1082 case PKT_TYPE_RX_EVENT: 1083 mt7915_mcu_rx_event(dev, skb); 1084 break; 1085 case PKT_TYPE_TXRXV: 1086 mt7915_mac_fill_rx_vector(dev, skb); 1087 break; 1088 case PKT_TYPE_TXS: 1089 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1090 mt7915_mac_add_txs(dev, rxd); 1091 dev_kfree_skb(skb); 1092 break; 1093 case PKT_TYPE_RX_FW_MONITOR: 1094 mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1095 dev_kfree_skb(skb); 1096 break; 1097 case PKT_TYPE_NORMAL: 1098 if (!mt7915_mac_fill_rx(dev, skb)) { 1099 mt76_rx(&dev->mt76, q, skb); 1100 return; 1101 } 1102 fallthrough; 1103 default: 1104 dev_kfree_skb(skb); 1105 break; 1106 } 1107 } 1108 1109 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) 1110 { 1111 struct mt7915_dev *dev = phy->dev; 1112 u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx); 1113 1114 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); 1115 mt76_set(dev, reg, BIT(11) | BIT(9)); 1116 } 1117 1118 void mt7915_mac_reset_counters(struct mt7915_phy *phy) 1119 { 1120 struct mt7915_dev *dev = phy->dev; 1121 int i; 1122 1123 for (i = 0; i < 4; i++) { 1124 mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1125 mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1126 } 1127 1128 i = 0; 1129 phy->mt76->survey_time = ktime_get_boottime(); 1130 if (phy->band_idx) 1131 i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2; 1132 1133 memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2); 1134 1135 /* reset airtime counters */ 1136 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx), 1137 MT_WF_RMAC_MIB_RXTIME_CLR); 1138 1139 mt7915_mcu_get_chan_mib_info(phy, true); 1140 } 1141 1142 void mt7915_mac_set_timing(struct mt7915_phy *phy) 1143 { 1144 s16 coverage_class = phy->coverage_class; 1145 struct mt7915_dev *dev = phy->dev; 1146 struct mt7915_phy *ext_phy = mt7915_ext_phy(dev); 1147 u32 val, reg_offset; 1148 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1149 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1150 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1151 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1152 int offset; 1153 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ); 1154 1155 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1156 return; 1157 1158 if (ext_phy) 1159 coverage_class = max_t(s16, dev->phy.coverage_class, 1160 ext_phy->coverage_class); 1161 1162 mt76_set(dev, MT_ARB_SCR(phy->band_idx), 1163 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1164 udelay(1); 1165 1166 offset = 3 * coverage_class; 1167 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1168 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1169 1170 mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset); 1171 mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset); 1172 mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx), 1173 FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) | 1174 FIELD_PREP(MT_IFS_RIFS, 2) | 1175 FIELD_PREP(MT_IFS_SIFS, 10) | 1176 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1177 1178 mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx), 1179 FIELD_PREP(MT_IFS_EIFS_CCK, 314)); 1180 1181 if (phy->slottime < 20 || a_band) 1182 val = MT7915_CFEND_RATE_DEFAULT; 1183 else 1184 val = MT7915_CFEND_RATE_11B; 1185 1186 mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val); 1187 mt76_clear(dev, MT_ARB_SCR(phy->band_idx), 1188 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1189 } 1190 1191 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) 1192 { 1193 u32 reg; 1194 1195 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) : 1196 MT_WF_PHY_RXTD12_MT7916(ext_phy); 1197 mt76_set(dev, reg, 1198 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | 1199 MT_WF_PHY_RXTD12_IRPI_SW_CLR); 1200 1201 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) : 1202 MT_WF_PHY_RX_CTRL1_MT7916(ext_phy); 1203 mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); 1204 } 1205 1206 static u8 1207 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) 1208 { 1209 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1210 struct mt7915_dev *dev = phy->dev; 1211 u32 val, sum = 0, n = 0; 1212 int nss, i; 1213 1214 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) { 1215 u32 reg = is_mt7915(&dev->mt76) ? 1216 MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) : 1217 MT_WF_IRPI_NSS_MT7916(idx, nss); 1218 1219 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1220 val = mt76_rr(dev, reg); 1221 sum += val * nf_power[i]; 1222 n += val; 1223 } 1224 } 1225 1226 if (!n) 1227 return 0; 1228 1229 return sum / n; 1230 } 1231 1232 void mt7915_update_channel(struct mt76_phy *mphy) 1233 { 1234 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv; 1235 struct mt76_channel_state *state = mphy->chan_state; 1236 int nf; 1237 1238 mt7915_mcu_get_chan_mib_info(phy, false); 1239 1240 nf = mt7915_phy_get_nf(phy, phy->band_idx); 1241 if (!phy->noise) 1242 phy->noise = nf << 4; 1243 else if (nf) 1244 phy->noise += nf - (phy->noise >> 4); 1245 1246 state->noise = -(phy->noise >> 4); 1247 } 1248 1249 static bool 1250 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state) 1251 { 1252 bool ret; 1253 1254 ret = wait_event_timeout(dev->reset_wait, 1255 (READ_ONCE(dev->reset_state) & state), 1256 MT7915_RESET_TIMEOUT); 1257 1258 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1259 return ret; 1260 } 1261 1262 static void 1263 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1264 { 1265 struct ieee80211_hw *hw = priv; 1266 1267 switch (vif->type) { 1268 case NL80211_IFTYPE_MESH_POINT: 1269 case NL80211_IFTYPE_ADHOC: 1270 case NL80211_IFTYPE_AP: 1271 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon, 1272 BSS_CHANGED_BEACON_ENABLED); 1273 break; 1274 default: 1275 break; 1276 } 1277 } 1278 1279 static void 1280 mt7915_update_beacons(struct mt7915_dev *dev) 1281 { 1282 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1283 1284 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1285 IEEE80211_IFACE_ITER_RESUME_ALL, 1286 mt7915_update_vif_beacon, dev->mt76.hw); 1287 1288 if (!mphy_ext) 1289 return; 1290 1291 ieee80211_iterate_active_interfaces(mphy_ext->hw, 1292 IEEE80211_IFACE_ITER_RESUME_ALL, 1293 mt7915_update_vif_beacon, mphy_ext->hw); 1294 } 1295 1296 static void 1297 mt7915_dma_reset(struct mt7915_dev *dev) 1298 { 1299 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1300 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 1301 int i; 1302 1303 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 1304 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1305 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1306 1307 if (is_mt7915(&dev->mt76)) 1308 mt76_clear(dev, MT_WFDMA1_GLO_CFG, 1309 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1310 MT_WFDMA1_GLO_CFG_RX_DMA_EN); 1311 if (dev->hif2) { 1312 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 1313 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1314 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1315 1316 if (is_mt7915(&dev->mt76)) 1317 mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 1318 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1319 MT_WFDMA1_GLO_CFG_RX_DMA_EN); 1320 } 1321 1322 usleep_range(1000, 2000); 1323 1324 for (i = 0; i < __MT_TXQ_MAX; i++) { 1325 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 1326 if (mphy_ext) 1327 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); 1328 } 1329 1330 for (i = 0; i < __MT_MCUQ_MAX; i++) 1331 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 1332 1333 mt76_for_each_q_rx(&dev->mt76, i) 1334 mt76_queue_rx_reset(dev, i); 1335 1336 mt76_tx_status_check(&dev->mt76, true); 1337 1338 /* re-init prefetch settings after reset */ 1339 mt7915_dma_prefetch(dev); 1340 1341 mt76_set(dev, MT_WFDMA0_GLO_CFG, 1342 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1343 if (is_mt7915(&dev->mt76)) 1344 mt76_set(dev, MT_WFDMA1_GLO_CFG, 1345 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1346 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 1347 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 1348 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 1349 if (dev->hif2) { 1350 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 1351 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1352 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1353 1354 if (is_mt7915(&dev->mt76)) 1355 mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 1356 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1357 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 1358 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 1359 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 1360 } 1361 } 1362 1363 void mt7915_tx_token_put(struct mt7915_dev *dev) 1364 { 1365 struct mt76_txwi_cache *txwi; 1366 int id; 1367 1368 spin_lock_bh(&dev->mt76.token_lock); 1369 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1370 mt7915_txwi_free(dev, txwi, NULL, NULL); 1371 dev->mt76.token_count--; 1372 } 1373 spin_unlock_bh(&dev->mt76.token_lock); 1374 idr_destroy(&dev->mt76.token); 1375 } 1376 1377 /* system error recovery */ 1378 void mt7915_mac_reset_work(struct work_struct *work) 1379 { 1380 struct mt7915_phy *phy2; 1381 struct mt76_phy *ext_phy; 1382 struct mt7915_dev *dev; 1383 int i; 1384 1385 dev = container_of(work, struct mt7915_dev, reset_work); 1386 ext_phy = dev->mt76.phys[MT_BAND1]; 1387 phy2 = ext_phy ? ext_phy->priv : NULL; 1388 1389 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA)) 1390 return; 1391 1392 ieee80211_stop_queues(mt76_hw(dev)); 1393 if (ext_phy) 1394 ieee80211_stop_queues(ext_phy->hw); 1395 1396 set_bit(MT76_RESET, &dev->mphy.state); 1397 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1398 wake_up(&dev->mt76.mcu.wait); 1399 cancel_delayed_work_sync(&dev->mphy.mac_work); 1400 if (phy2) { 1401 set_bit(MT76_RESET, &phy2->mt76->state); 1402 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1403 } 1404 mt76_worker_disable(&dev->mt76.tx_worker); 1405 mt76_for_each_q_rx(&dev->mt76, i) 1406 napi_disable(&dev->mt76.napi[i]); 1407 napi_disable(&dev->mt76.tx_napi); 1408 1409 mutex_lock(&dev->mt76.mutex); 1410 1411 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1412 1413 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1414 mt7915_dma_reset(dev); 1415 1416 mt7915_tx_token_put(dev); 1417 idr_init(&dev->mt76.token); 1418 1419 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1420 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1421 } 1422 1423 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1424 clear_bit(MT76_RESET, &dev->mphy.state); 1425 if (phy2) 1426 clear_bit(MT76_RESET, &phy2->mt76->state); 1427 1428 local_bh_disable(); 1429 mt76_for_each_q_rx(&dev->mt76, i) { 1430 napi_enable(&dev->mt76.napi[i]); 1431 napi_schedule(&dev->mt76.napi[i]); 1432 } 1433 local_bh_enable(); 1434 1435 tasklet_schedule(&dev->irq_tasklet); 1436 1437 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1438 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1439 1440 mt76_worker_enable(&dev->mt76.tx_worker); 1441 1442 local_bh_disable(); 1443 napi_enable(&dev->mt76.tx_napi); 1444 napi_schedule(&dev->mt76.tx_napi); 1445 local_bh_enable(); 1446 1447 ieee80211_wake_queues(mt76_hw(dev)); 1448 if (ext_phy) 1449 ieee80211_wake_queues(ext_phy->hw); 1450 1451 mutex_unlock(&dev->mt76.mutex); 1452 1453 mt7915_update_beacons(dev); 1454 1455 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1456 MT7915_WATCHDOG_TIME); 1457 if (phy2) 1458 ieee80211_queue_delayed_work(ext_phy->hw, 1459 &phy2->mt76->mac_work, 1460 MT7915_WATCHDOG_TIME); 1461 } 1462 1463 void mt7915_mac_update_stats(struct mt7915_phy *phy) 1464 { 1465 struct mt7915_dev *dev = phy->dev; 1466 struct mib_stats *mib = &phy->mib; 1467 int i, aggr0, aggr1, cnt; 1468 u32 val; 1469 1470 cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx)); 1471 mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? 1472 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) : 1473 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt); 1474 1475 cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx)); 1476 mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt); 1477 1478 cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx)); 1479 mib->rx_mpdu_cnt += cnt; 1480 1481 cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx)); 1482 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 1483 1484 cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx)); 1485 mib->rx_vector_mismatch_cnt += 1486 FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt); 1487 1488 cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx)); 1489 mib->rx_delimiter_fail_cnt += 1490 FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt); 1491 1492 cnt = mt76_rr(dev, MT_MIB_SDR10(phy->band_idx)); 1493 mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ? 1494 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) : 1495 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt); 1496 1497 cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx)); 1498 mib->rx_len_mismatch_cnt += 1499 FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt); 1500 1501 cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx)); 1502 mib->tx_ampdu_cnt += cnt; 1503 1504 cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx)); 1505 mib->tx_stop_q_empty_cnt += 1506 FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt); 1507 1508 cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx)); 1509 mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ? 1510 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) : 1511 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt); 1512 1513 cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx)); 1514 mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ? 1515 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) : 1516 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt); 1517 1518 cnt = mt76_rr(dev, MT_MIB_SDR16(phy->band_idx)); 1519 mib->primary_cca_busy_time += 1520 FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt); 1521 1522 cnt = mt76_rr(dev, MT_MIB_SDR17(phy->band_idx)); 1523 mib->secondary_cca_busy_time += 1524 FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt); 1525 1526 cnt = mt76_rr(dev, MT_MIB_SDR18(phy->band_idx)); 1527 mib->primary_energy_detect_time += 1528 FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt); 1529 1530 cnt = mt76_rr(dev, MT_MIB_SDR19(phy->band_idx)); 1531 mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt); 1532 1533 cnt = mt76_rr(dev, MT_MIB_SDR20(phy->band_idx)); 1534 mib->ofdm_mdrdy_time += 1535 FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt); 1536 1537 cnt = mt76_rr(dev, MT_MIB_SDR21(phy->band_idx)); 1538 mib->green_mdrdy_time += 1539 FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt); 1540 1541 cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx)); 1542 mib->rx_ampdu_cnt += cnt; 1543 1544 cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx)); 1545 mib->rx_ampdu_bytes_cnt += cnt; 1546 1547 cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx)); 1548 mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ? 1549 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) : 1550 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt); 1551 1552 cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx)); 1553 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 1554 1555 cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx)); 1556 mib->tx_rwp_fail_cnt += 1557 FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt); 1558 1559 cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx)); 1560 mib->tx_rwp_need_cnt += 1561 FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt); 1562 1563 cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx)); 1564 mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ? 1565 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) : 1566 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt); 1567 1568 cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx)); 1569 mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ? 1570 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) : 1571 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt); 1572 1573 cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx)); 1574 mib->rx_ba_cnt += cnt; 1575 1576 cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx)); 1577 mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt); 1578 1579 cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx)); 1580 mib->tx_mu_mpdu_cnt += cnt; 1581 1582 cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx)); 1583 mib->tx_mu_acked_mpdu_cnt += cnt; 1584 1585 cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx)); 1586 mib->tx_su_acked_mpdu_cnt += cnt; 1587 1588 cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx)); 1589 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt); 1590 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt); 1591 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt); 1592 1593 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 1594 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 1595 mib->tx_amsdu[i] += cnt; 1596 mib->tx_amsdu_cnt += cnt; 1597 } 1598 1599 aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; 1600 if (is_mt7915(&dev->mt76)) { 1601 for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { 1602 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4))); 1603 mib->ba_miss_cnt += 1604 FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1605 mib->ack_fail_cnt += 1606 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1607 1608 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4))); 1609 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1610 mib->rts_retries_cnt += 1611 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1612 1613 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1614 dev->mt76.aggr_stats[aggr0++] += val & 0xffff; 1615 dev->mt76.aggr_stats[aggr0++] += val >> 16; 1616 1617 val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1618 dev->mt76.aggr_stats[aggr1++] += val & 0xffff; 1619 dev->mt76.aggr_stats[aggr1++] += val >> 16; 1620 } 1621 1622 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1623 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1624 1625 cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx)); 1626 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt); 1627 1628 cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx)); 1629 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt); 1630 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt); 1631 1632 cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx)); 1633 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt); 1634 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt); 1635 1636 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx)); 1637 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt); 1638 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt); 1639 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt); 1640 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt); 1641 } else { 1642 for (i = 0; i < 2; i++) { 1643 /* rts count */ 1644 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2))); 1645 mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val); 1646 mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val); 1647 1648 /* rts retry count */ 1649 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2))); 1650 mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val); 1651 mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val); 1652 1653 /* ba miss count */ 1654 val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2))); 1655 mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val); 1656 mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val); 1657 1658 /* ack fail count */ 1659 val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2))); 1660 mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val); 1661 mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val); 1662 } 1663 1664 for (i = 0; i < 8; i++) { 1665 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1666 dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val); 1667 dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val); 1668 } 1669 1670 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1671 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1672 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1673 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1674 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1675 1676 cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx)); 1677 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt); 1678 1679 cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx)); 1680 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt); 1681 1682 cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx)); 1683 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1684 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1685 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1686 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1687 1688 cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx)); 1689 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1690 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1691 } 1692 } 1693 1694 static void mt7915_mac_severe_check(struct mt7915_phy *phy) 1695 { 1696 struct mt7915_dev *dev = phy->dev; 1697 bool ext_phy = phy != &dev->phy; 1698 u32 trb; 1699 1700 if (!phy->omac_mask) 1701 return; 1702 1703 /* In rare cases, TRB pointers might be out of sync leads to RMAC 1704 * stopping Rx, so check status periodically to see if TRB hardware 1705 * requires minimal recovery. 1706 */ 1707 trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx)); 1708 1709 if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) != 1710 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) && 1711 (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) != 1712 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) && 1713 trb == phy->trb_ts) 1714 mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT, 1715 ext_phy); 1716 1717 phy->trb_ts = trb; 1718 } 1719 1720 void mt7915_mac_sta_rc_work(struct work_struct *work) 1721 { 1722 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); 1723 struct ieee80211_sta *sta; 1724 struct ieee80211_vif *vif; 1725 struct mt7915_sta *msta; 1726 u32 changed; 1727 LIST_HEAD(list); 1728 1729 spin_lock_bh(&dev->sta_poll_lock); 1730 list_splice_init(&dev->sta_rc_list, &list); 1731 1732 while (!list_empty(&list)) { 1733 msta = list_first_entry(&list, struct mt7915_sta, rc_list); 1734 list_del_init(&msta->rc_list); 1735 changed = msta->changed; 1736 msta->changed = 0; 1737 spin_unlock_bh(&dev->sta_poll_lock); 1738 1739 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 1740 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 1741 1742 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 1743 IEEE80211_RC_NSS_CHANGED | 1744 IEEE80211_RC_BW_CHANGED)) 1745 mt7915_mcu_add_rate_ctrl(dev, vif, sta, true); 1746 1747 if (changed & IEEE80211_RC_SMPS_CHANGED) 1748 mt7915_mcu_add_smps(dev, vif, sta); 1749 1750 spin_lock_bh(&dev->sta_poll_lock); 1751 } 1752 1753 spin_unlock_bh(&dev->sta_poll_lock); 1754 } 1755 1756 void mt7915_mac_work(struct work_struct *work) 1757 { 1758 struct mt7915_phy *phy; 1759 struct mt76_phy *mphy; 1760 1761 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 1762 mac_work.work); 1763 phy = mphy->priv; 1764 1765 mutex_lock(&mphy->dev->mutex); 1766 1767 mt76_update_survey(mphy); 1768 if (++mphy->mac_work_count == 5) { 1769 mphy->mac_work_count = 0; 1770 1771 mt7915_mac_update_stats(phy); 1772 mt7915_mac_severe_check(phy); 1773 } 1774 1775 mutex_unlock(&mphy->dev->mutex); 1776 1777 mt76_tx_status_check(mphy->dev, false); 1778 1779 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 1780 MT7915_WATCHDOG_TIME); 1781 } 1782 1783 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) 1784 { 1785 struct mt7915_dev *dev = phy->dev; 1786 1787 if (phy->rdd_state & BIT(0)) 1788 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, 1789 MT_RX_SEL0, 0); 1790 if (phy->rdd_state & BIT(1)) 1791 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, 1792 MT_RX_SEL0, 0); 1793 } 1794 1795 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) 1796 { 1797 int err, region; 1798 1799 switch (dev->mt76.region) { 1800 case NL80211_DFS_ETSI: 1801 region = 0; 1802 break; 1803 case NL80211_DFS_JP: 1804 region = 2; 1805 break; 1806 case NL80211_DFS_FCC: 1807 default: 1808 region = 1; 1809 break; 1810 } 1811 1812 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, 1813 MT_RX_SEL0, region); 1814 if (err < 0) 1815 return err; 1816 1817 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, 1818 MT_RX_SEL0, 1); 1819 } 1820 1821 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) 1822 { 1823 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 1824 struct mt7915_dev *dev = phy->dev; 1825 int err; 1826 1827 /* start CAC */ 1828 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx, 1829 MT_RX_SEL0, 0); 1830 if (err < 0) 1831 return err; 1832 1833 err = mt7915_dfs_start_rdd(dev, phy->band_idx); 1834 if (err < 0) 1835 return err; 1836 1837 phy->rdd_state |= BIT(phy->band_idx); 1838 1839 if (!is_mt7915(&dev->mt76)) 1840 return 0; 1841 1842 if (chandef->width == NL80211_CHAN_WIDTH_160 || 1843 chandef->width == NL80211_CHAN_WIDTH_80P80) { 1844 err = mt7915_dfs_start_rdd(dev, 1); 1845 if (err < 0) 1846 return err; 1847 1848 phy->rdd_state |= BIT(1); 1849 } 1850 1851 return 0; 1852 } 1853 1854 static int 1855 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy) 1856 { 1857 const struct mt7915_dfs_radar_spec *radar_specs; 1858 struct mt7915_dev *dev = phy->dev; 1859 int err, i; 1860 1861 switch (dev->mt76.region) { 1862 case NL80211_DFS_FCC: 1863 radar_specs = &fcc_radar_specs; 1864 err = mt7915_mcu_set_fcc5_lpn(dev, 8); 1865 if (err < 0) 1866 return err; 1867 break; 1868 case NL80211_DFS_ETSI: 1869 radar_specs = &etsi_radar_specs; 1870 break; 1871 case NL80211_DFS_JP: 1872 radar_specs = &jp_radar_specs; 1873 break; 1874 default: 1875 return -EINVAL; 1876 } 1877 1878 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 1879 err = mt7915_mcu_set_radar_th(dev, i, 1880 &radar_specs->radar_pattern[i]); 1881 if (err < 0) 1882 return err; 1883 } 1884 1885 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 1886 } 1887 1888 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) 1889 { 1890 struct mt7915_dev *dev = phy->dev; 1891 enum mt76_dfs_state dfs_state, prev_state; 1892 int err; 1893 1894 prev_state = phy->mt76->dfs_state; 1895 dfs_state = mt76_phy_dfs_state(phy->mt76); 1896 1897 if (prev_state == dfs_state) 1898 return 0; 1899 1900 if (prev_state == MT_DFS_STATE_UNKNOWN) 1901 mt7915_dfs_stop_radar_detector(phy); 1902 1903 if (dfs_state == MT_DFS_STATE_DISABLED) 1904 goto stop; 1905 1906 if (prev_state <= MT_DFS_STATE_DISABLED) { 1907 err = mt7915_dfs_init_radar_specs(phy); 1908 if (err < 0) 1909 return err; 1910 1911 err = mt7915_dfs_start_radar_detector(phy); 1912 if (err < 0) 1913 return err; 1914 1915 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 1916 } 1917 1918 if (dfs_state == MT_DFS_STATE_CAC) 1919 return 0; 1920 1921 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, 1922 phy->band_idx, MT_RX_SEL0, 0); 1923 if (err < 0) { 1924 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 1925 return err; 1926 } 1927 1928 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 1929 return 0; 1930 1931 stop: 1932 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, 1933 phy->band_idx, MT_RX_SEL0, 0); 1934 if (err < 0) 1935 return err; 1936 1937 mt7915_dfs_stop_radar_detector(phy); 1938 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 1939 1940 return 0; 1941 } 1942 1943 static int 1944 mt7915_mac_twt_duration_align(int duration) 1945 { 1946 return duration << 8; 1947 } 1948 1949 static u64 1950 mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev, 1951 struct mt7915_twt_flow *flow) 1952 { 1953 struct mt7915_twt_flow *iter, *iter_next; 1954 u32 duration = flow->duration << 8; 1955 u64 start_tsf; 1956 1957 iter = list_first_entry_or_null(&dev->twt_list, 1958 struct mt7915_twt_flow, list); 1959 if (!iter || !iter->sched || iter->start_tsf > duration) { 1960 /* add flow as first entry in the list */ 1961 list_add(&flow->list, &dev->twt_list); 1962 return 0; 1963 } 1964 1965 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 1966 start_tsf = iter->start_tsf + 1967 mt7915_mac_twt_duration_align(iter->duration); 1968 if (list_is_last(&iter->list, &dev->twt_list)) 1969 break; 1970 1971 if (!iter_next->sched || 1972 iter_next->start_tsf > start_tsf + duration) { 1973 list_add(&flow->list, &iter->list); 1974 goto out; 1975 } 1976 } 1977 1978 /* add flow as last entry in the list */ 1979 list_add_tail(&flow->list, &dev->twt_list); 1980 out: 1981 return start_tsf; 1982 } 1983 1984 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) 1985 { 1986 struct ieee80211_twt_params *twt_agrt; 1987 u64 interval, duration; 1988 u16 mantissa; 1989 u8 exp; 1990 1991 /* only individual agreement supported */ 1992 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 1993 return -EOPNOTSUPP; 1994 1995 /* only 256us unit supported */ 1996 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 1997 return -EOPNOTSUPP; 1998 1999 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2000 2001 /* explicit agreement not supported */ 2002 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2003 return -EOPNOTSUPP; 2004 2005 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2006 le16_to_cpu(twt_agrt->req_type)); 2007 mantissa = le16_to_cpu(twt_agrt->mantissa); 2008 duration = twt_agrt->min_twt_dur << 8; 2009 2010 interval = (u64)mantissa << exp; 2011 if (interval < duration) 2012 return -EOPNOTSUPP; 2013 2014 return 0; 2015 } 2016 2017 static bool 2018 mt7915_mac_twt_param_equal(struct mt7915_sta *msta, 2019 struct ieee80211_twt_params *twt_agrt) 2020 { 2021 u16 type = le16_to_cpu(twt_agrt->req_type); 2022 u8 exp; 2023 int i; 2024 2025 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2026 for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) { 2027 struct mt7915_twt_flow *f; 2028 2029 if (!(msta->twt.flowid_mask & BIT(i))) 2030 continue; 2031 2032 f = &msta->twt.flow[i]; 2033 if (f->duration == twt_agrt->min_twt_dur && 2034 f->mantissa == twt_agrt->mantissa && 2035 f->exp == exp && 2036 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2037 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2038 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2039 return true; 2040 } 2041 2042 return false; 2043 } 2044 2045 void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, 2046 struct ieee80211_sta *sta, 2047 struct ieee80211_twt_setup *twt) 2048 { 2049 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2050 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 2051 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2052 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2053 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2054 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2055 struct mt7915_twt_flow *flow; 2056 int flowid, table_id; 2057 u8 exp; 2058 2059 if (mt7915_mac_check_twt_req(twt)) 2060 goto out; 2061 2062 mutex_lock(&dev->mt76.mutex); 2063 2064 if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT) 2065 goto unlock; 2066 2067 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2068 goto unlock; 2069 2070 if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) { 2071 setup_cmd = TWT_SETUP_CMD_DICTATE; 2072 twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR; 2073 goto unlock; 2074 } 2075 2076 flowid = ffs(~msta->twt.flowid_mask) - 1; 2077 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 2078 twt_agrt->req_type |= le16_encode_bits(flowid, 2079 IEEE80211_TWT_REQTYPE_FLOWID); 2080 2081 table_id = ffs(~dev->twt.table_mask) - 1; 2082 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2083 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2084 2085 if (mt7915_mac_twt_param_equal(msta, twt_agrt)) 2086 goto unlock; 2087 2088 flow = &msta->twt.flow[flowid]; 2089 memset(flow, 0, sizeof(*flow)); 2090 INIT_LIST_HEAD(&flow->list); 2091 flow->wcid = msta->wcid.idx; 2092 flow->table_id = table_id; 2093 flow->id = flowid; 2094 flow->duration = twt_agrt->min_twt_dur; 2095 flow->mantissa = twt_agrt->mantissa; 2096 flow->exp = exp; 2097 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2098 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2099 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2100 2101 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2102 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2103 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2104 u64 flow_tsf, curr_tsf; 2105 u32 rem; 2106 2107 flow->sched = true; 2108 flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow); 2109 curr_tsf = __mt7915_get_tsf(hw, msta->vif); 2110 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2111 flow_tsf = curr_tsf + interval - rem; 2112 twt_agrt->twt = cpu_to_le64(flow_tsf); 2113 } else { 2114 list_add_tail(&flow->list, &dev->twt_list); 2115 } 2116 flow->tsf = le64_to_cpu(twt_agrt->twt); 2117 2118 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2119 goto unlock; 2120 2121 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2122 dev->twt.table_mask |= BIT(table_id); 2123 msta->twt.flowid_mask |= BIT(flowid); 2124 dev->twt.n_agrt++; 2125 2126 unlock: 2127 mutex_unlock(&dev->mt76.mutex); 2128 out: 2129 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 2130 twt_agrt->req_type |= 2131 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 2132 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2133 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2134 } 2135 2136 void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, 2137 struct mt7915_sta *msta, 2138 u8 flowid) 2139 { 2140 struct mt7915_twt_flow *flow; 2141 2142 lockdep_assert_held(&dev->mt76.mutex); 2143 2144 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2145 return; 2146 2147 if (!(msta->twt.flowid_mask & BIT(flowid))) 2148 return; 2149 2150 flow = &msta->twt.flow[flowid]; 2151 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, 2152 MCU_TWT_AGRT_DELETE)) 2153 return; 2154 2155 list_del_init(&flow->list); 2156 msta->twt.flowid_mask &= ~BIT(flowid); 2157 dev->twt.table_mask &= ~BIT(flow->table_id); 2158 dev->twt.n_agrt--; 2159 } 2160