1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/timekeeping.h> 6 #include "mt7915.h" 7 #include "../dma.h" 8 #include "mac.h" 9 #include "mcu.h" 10 11 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 12 13 static const struct mt7915_dfs_radar_spec etsi_radar_specs = { 14 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 15 .radar_pattern = { 16 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 17 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 18 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 19 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 20 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 21 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 22 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 23 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 24 }, 25 }; 26 27 static const struct mt7915_dfs_radar_spec fcc_radar_specs = { 28 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 29 .radar_pattern = { 30 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 31 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 32 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 33 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 34 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 35 }, 36 }; 37 38 static const struct mt7915_dfs_radar_spec jp_radar_specs = { 39 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 40 .radar_pattern = { 41 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 42 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 43 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 44 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 45 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 46 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 47 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 48 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 49 }, 50 }; 51 52 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, 53 u16 idx, bool unicast) 54 { 55 struct mt7915_sta *sta; 56 struct mt76_wcid *wcid; 57 58 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 59 return NULL; 60 61 wcid = rcu_dereference(dev->mt76.wcid[idx]); 62 if (unicast || !wcid) 63 return wcid; 64 65 if (!wcid->sta) 66 return NULL; 67 68 sta = container_of(wcid, struct mt7915_sta, wcid); 69 if (!sta->vif) 70 return NULL; 71 72 return &sta->vif->sta.wcid; 73 } 74 75 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 76 { 77 } 78 79 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) 80 { 81 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 82 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 83 84 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 85 0, 5000); 86 } 87 88 u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw) 89 { 90 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 91 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 92 93 return MT_WTBL_LMAC_OFFS(wcid, dw); 94 } 95 96 static void mt7915_mac_sta_poll(struct mt7915_dev *dev) 97 { 98 static const u8 ac_to_tid[] = { 99 [IEEE80211_AC_BE] = 0, 100 [IEEE80211_AC_BK] = 1, 101 [IEEE80211_AC_VI] = 4, 102 [IEEE80211_AC_VO] = 6 103 }; 104 struct ieee80211_sta *sta; 105 struct mt7915_sta *msta; 106 struct rate_info *rate; 107 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 108 LIST_HEAD(sta_poll_list); 109 int i; 110 111 spin_lock_bh(&dev->sta_poll_lock); 112 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 113 spin_unlock_bh(&dev->sta_poll_lock); 114 115 rcu_read_lock(); 116 117 while (true) { 118 bool clear = false; 119 u32 addr, val; 120 u16 idx; 121 s8 rssi[4]; 122 u8 bw; 123 124 spin_lock_bh(&dev->sta_poll_lock); 125 if (list_empty(&sta_poll_list)) { 126 spin_unlock_bh(&dev->sta_poll_lock); 127 break; 128 } 129 msta = list_first_entry(&sta_poll_list, 130 struct mt7915_sta, poll_list); 131 list_del_init(&msta->poll_list); 132 spin_unlock_bh(&dev->sta_poll_lock); 133 134 idx = msta->wcid.idx; 135 136 /* refresh peer's airtime reporting */ 137 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20); 138 139 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 140 u32 tx_last = msta->airtime_ac[i]; 141 u32 rx_last = msta->airtime_ac[i + 4]; 142 143 msta->airtime_ac[i] = mt76_rr(dev, addr); 144 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 145 146 tx_time[i] = msta->airtime_ac[i] - tx_last; 147 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 148 149 if ((tx_last | rx_last) & BIT(30)) 150 clear = true; 151 152 addr += 8; 153 } 154 155 if (clear) { 156 mt7915_mac_wtbl_update(dev, idx, 157 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 158 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 159 } 160 161 if (!msta->wcid.sta) 162 continue; 163 164 sta = container_of((void *)msta, struct ieee80211_sta, 165 drv_priv); 166 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 167 u8 q = mt76_connac_lmac_mapping(i); 168 u32 tx_cur = tx_time[q]; 169 u32 rx_cur = rx_time[q]; 170 u8 tid = ac_to_tid[i]; 171 172 if (!tx_cur && !rx_cur) 173 continue; 174 175 ieee80211_sta_register_airtime(sta, tid, tx_cur, 176 rx_cur); 177 } 178 179 /* 180 * We don't support reading GI info from txs packets. 181 * For accurate tx status reporting and AQL improvement, 182 * we need to make sure that flags match so polling GI 183 * from per-sta counters directly. 184 */ 185 rate = &msta->wcid.rate; 186 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7); 187 val = mt76_rr(dev, addr); 188 189 switch (rate->bw) { 190 case RATE_INFO_BW_160: 191 bw = IEEE80211_STA_RX_BW_160; 192 break; 193 case RATE_INFO_BW_80: 194 bw = IEEE80211_STA_RX_BW_80; 195 break; 196 case RATE_INFO_BW_40: 197 bw = IEEE80211_STA_RX_BW_40; 198 break; 199 default: 200 bw = IEEE80211_STA_RX_BW_20; 201 break; 202 } 203 204 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 205 u8 offs = 24 + 2 * bw; 206 207 rate->he_gi = (val & (0x3 << offs)) >> offs; 208 } else if (rate->flags & 209 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 210 if (val & BIT(12 + bw)) 211 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 212 else 213 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 214 } 215 216 /* get signal strength of resp frames (CTS/BA/ACK) */ 217 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 30); 218 val = mt76_rr(dev, addr); 219 220 rssi[0] = to_rssi(GENMASK(7, 0), val); 221 rssi[1] = to_rssi(GENMASK(15, 8), val); 222 rssi[2] = to_rssi(GENMASK(23, 16), val); 223 rssi[3] = to_rssi(GENMASK(31, 14), val); 224 225 msta->ack_signal = 226 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); 227 228 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); 229 } 230 231 rcu_read_unlock(); 232 } 233 234 void mt7915_mac_enable_rtscts(struct mt7915_dev *dev, 235 struct ieee80211_vif *vif, bool enable) 236 { 237 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 238 u32 addr; 239 240 addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5); 241 if (enable) 242 mt76_set(dev, addr, BIT(5)); 243 else 244 mt76_clear(dev, addr, BIT(5)); 245 } 246 247 static int 248 mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) 249 { 250 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 251 struct mt76_phy *mphy = &dev->mt76.phy; 252 struct mt7915_phy *phy = &dev->phy; 253 struct ieee80211_supported_band *sband; 254 __le32 *rxd = (__le32 *)skb->data; 255 __le32 *rxv = NULL; 256 u32 rxd0 = le32_to_cpu(rxd[0]); 257 u32 rxd1 = le32_to_cpu(rxd[1]); 258 u32 rxd2 = le32_to_cpu(rxd[2]); 259 u32 rxd3 = le32_to_cpu(rxd[3]); 260 u32 rxd4 = le32_to_cpu(rxd[4]); 261 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 262 bool unicast, insert_ccmp_hdr = false; 263 u8 remove_pad, amsdu_info; 264 u8 mode = 0, qos_ctl = 0; 265 struct mt7915_sta *msta = NULL; 266 u32 csum_status = *(u32 *)skb->cb; 267 bool hdr_trans; 268 u16 hdr_gap; 269 u16 seq_ctrl = 0; 270 __le16 fc = 0; 271 int idx; 272 273 memset(status, 0, sizeof(*status)); 274 275 if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) { 276 mphy = dev->mt76.phys[MT_BAND1]; 277 if (!mphy) 278 return -EINVAL; 279 280 phy = mphy->priv; 281 status->phy_idx = 1; 282 } 283 284 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 285 return -EINVAL; 286 287 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 288 return -EINVAL; 289 290 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 291 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 292 return -EINVAL; 293 294 /* ICV error or CCMP/BIP/WPI MIC error */ 295 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 296 status->flag |= RX_FLAG_ONLY_MONITOR; 297 298 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 299 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 300 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); 301 302 if (status->wcid) { 303 msta = container_of(status->wcid, struct mt7915_sta, wcid); 304 spin_lock_bh(&dev->sta_poll_lock); 305 if (list_empty(&msta->poll_list)) 306 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 307 spin_unlock_bh(&dev->sta_poll_lock); 308 } 309 310 status->freq = mphy->chandef.chan->center_freq; 311 status->band = mphy->chandef.chan->band; 312 if (status->band == NL80211_BAND_5GHZ) 313 sband = &mphy->sband_5g.sband; 314 else if (status->band == NL80211_BAND_6GHZ) 315 sband = &mphy->sband_6g.sband; 316 else 317 sband = &mphy->sband_2g.sband; 318 319 if (!sband->channels) 320 return -EINVAL; 321 322 if ((rxd0 & csum_mask) == csum_mask && 323 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 324 skb->ip_summed = CHECKSUM_UNNECESSARY; 325 326 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) 327 status->flag |= RX_FLAG_FAILED_FCS_CRC; 328 329 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 330 status->flag |= RX_FLAG_MMIC_ERROR; 331 332 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && 333 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 334 status->flag |= RX_FLAG_DECRYPTED; 335 status->flag |= RX_FLAG_IV_STRIPPED; 336 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 337 } 338 339 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 340 341 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 342 return -EINVAL; 343 344 rxd += 6; 345 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 346 u32 v0 = le32_to_cpu(rxd[0]); 347 u32 v2 = le32_to_cpu(rxd[2]); 348 349 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); 350 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); 351 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); 352 353 rxd += 4; 354 if ((u8 *)rxd - skb->data >= skb->len) 355 return -EINVAL; 356 } 357 358 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 359 u8 *data = (u8 *)rxd; 360 361 if (status->flag & RX_FLAG_DECRYPTED) { 362 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) { 363 case MT_CIPHER_AES_CCMP: 364 case MT_CIPHER_CCMP_CCX: 365 case MT_CIPHER_CCMP_256: 366 insert_ccmp_hdr = 367 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 368 fallthrough; 369 case MT_CIPHER_TKIP: 370 case MT_CIPHER_TKIP_NO_MIC: 371 case MT_CIPHER_GCMP: 372 case MT_CIPHER_GCMP_256: 373 status->iv[0] = data[5]; 374 status->iv[1] = data[4]; 375 status->iv[2] = data[3]; 376 status->iv[3] = data[2]; 377 status->iv[4] = data[1]; 378 status->iv[5] = data[0]; 379 break; 380 default: 381 break; 382 } 383 } 384 rxd += 4; 385 if ((u8 *)rxd - skb->data >= skb->len) 386 return -EINVAL; 387 } 388 389 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 390 status->timestamp = le32_to_cpu(rxd[0]); 391 status->flag |= RX_FLAG_MACTIME_START; 392 393 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 394 status->flag |= RX_FLAG_AMPDU_DETAILS; 395 396 /* all subframes of an A-MPDU have the same timestamp */ 397 if (phy->rx_ampdu_ts != status->timestamp) { 398 if (!++phy->ampdu_ref) 399 phy->ampdu_ref++; 400 } 401 phy->rx_ampdu_ts = status->timestamp; 402 403 status->ampdu_ref = phy->ampdu_ref; 404 } 405 406 rxd += 2; 407 if ((u8 *)rxd - skb->data >= skb->len) 408 return -EINVAL; 409 } 410 411 /* RXD Group 3 - P-RXV */ 412 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 413 u32 v0, v1; 414 int ret; 415 416 rxv = rxd; 417 rxd += 2; 418 if ((u8 *)rxd - skb->data >= skb->len) 419 return -EINVAL; 420 421 v0 = le32_to_cpu(rxv[0]); 422 v1 = le32_to_cpu(rxv[1]); 423 424 if (v0 & MT_PRXV_HT_AD_CODE) 425 status->enc_flags |= RX_ENC_FLAG_LDPC; 426 427 status->chains = mphy->antenna_mask; 428 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); 429 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); 430 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); 431 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); 432 433 /* RXD Group 5 - C-RXV */ 434 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 435 rxd += 18; 436 if ((u8 *)rxd - skb->data >= skb->len) 437 return -EINVAL; 438 } 439 440 if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) { 441 ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, 442 sband, rxv, &mode); 443 if (ret < 0) 444 return ret; 445 } 446 } 447 448 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 449 status->amsdu = !!amsdu_info; 450 if (status->amsdu) { 451 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 452 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 453 } 454 455 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 456 if (hdr_trans && ieee80211_has_morefrags(fc)) { 457 struct ieee80211_vif *vif; 458 int err; 459 460 if (!msta || !msta->vif) 461 return -EINVAL; 462 463 vif = container_of((void *)msta->vif, struct ieee80211_vif, 464 drv_priv); 465 err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap); 466 if (err) 467 return err; 468 469 hdr_trans = false; 470 } else { 471 int pad_start = 0; 472 473 skb_pull(skb, hdr_gap); 474 if (!hdr_trans && status->amsdu) { 475 pad_start = ieee80211_get_hdrlen_from_skb(skb); 476 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 477 /* 478 * When header translation failure is indicated, 479 * the hardware will insert an extra 2-byte field 480 * containing the data length after the protocol 481 * type field. This happens either when the LLC-SNAP 482 * pattern did not match, or if a VLAN header was 483 * detected. 484 */ 485 pad_start = 12; 486 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 487 pad_start += 4; 488 else 489 pad_start = 0; 490 } 491 492 if (pad_start) { 493 memmove(skb->data + 2, skb->data, pad_start); 494 skb_pull(skb, 2); 495 } 496 } 497 498 if (!hdr_trans) { 499 struct ieee80211_hdr *hdr; 500 501 if (insert_ccmp_hdr) { 502 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 503 504 mt76_insert_ccmp_hdr(skb, key_id); 505 } 506 507 hdr = mt76_skb_get_hdr(skb); 508 fc = hdr->frame_control; 509 if (ieee80211_is_data_qos(fc)) { 510 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 511 qos_ctl = *ieee80211_get_qos_ctl(hdr); 512 } 513 } else { 514 status->flag |= RX_FLAG_8023; 515 } 516 517 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 518 mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode); 519 520 if (!status->wcid || !ieee80211_is_data_qos(fc)) 521 return 0; 522 523 status->aggr = unicast && 524 !ieee80211_is_qos_nullfunc(fc); 525 status->qos_ctl = qos_ctl; 526 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 527 528 return 0; 529 } 530 531 static void 532 mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) 533 { 534 #ifdef CONFIG_NL80211_TESTMODE 535 struct mt7915_phy *phy = &dev->phy; 536 __le32 *rxd = (__le32 *)skb->data; 537 __le32 *rxv_hdr = rxd + 2; 538 __le32 *rxv = rxd + 4; 539 u32 rcpi, ib_rssi, wb_rssi, v20, v21; 540 u8 band_idx; 541 s32 foe; 542 u8 snr; 543 int i; 544 545 band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX); 546 if (band_idx && !phy->band_idx) { 547 phy = mt7915_ext_phy(dev); 548 if (!phy) 549 goto out; 550 } 551 552 rcpi = le32_to_cpu(rxv[6]); 553 ib_rssi = le32_to_cpu(rxv[7]); 554 wb_rssi = le32_to_cpu(rxv[8]) >> 5; 555 556 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) { 557 if (i == 3) 558 wb_rssi = le32_to_cpu(rxv[9]); 559 560 phy->test.last_rcpi[i] = rcpi & 0xff; 561 phy->test.last_ib_rssi[i] = ib_rssi & 0xff; 562 phy->test.last_wb_rssi[i] = wb_rssi & 0xff; 563 } 564 565 v20 = le32_to_cpu(rxv[20]); 566 v21 = le32_to_cpu(rxv[21]); 567 568 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) | 569 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT); 570 571 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16; 572 573 phy->test.last_freq_offset = foe; 574 phy->test.last_snr = snr; 575 out: 576 #endif 577 dev_kfree_skb(skb); 578 } 579 580 static void 581 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, 582 struct sk_buff *skb) 583 { 584 #ifdef CONFIG_NL80211_TESTMODE 585 struct mt76_testmode_data *td = &phy->mt76->test; 586 const struct ieee80211_rate *r; 587 u8 bw, mode, nss = td->tx_rate_nss; 588 u8 rate_idx = td->tx_rate_idx; 589 u16 rateval = 0; 590 u32 val; 591 bool cck = false; 592 int band; 593 594 if (skb != phy->mt76->test.tx_skb) 595 return; 596 597 switch (td->tx_rate_mode) { 598 case MT76_TM_TX_MODE_HT: 599 nss = 1 + (rate_idx >> 3); 600 mode = MT_PHY_TYPE_HT; 601 break; 602 case MT76_TM_TX_MODE_VHT: 603 mode = MT_PHY_TYPE_VHT; 604 break; 605 case MT76_TM_TX_MODE_HE_SU: 606 mode = MT_PHY_TYPE_HE_SU; 607 break; 608 case MT76_TM_TX_MODE_HE_EXT_SU: 609 mode = MT_PHY_TYPE_HE_EXT_SU; 610 break; 611 case MT76_TM_TX_MODE_HE_TB: 612 mode = MT_PHY_TYPE_HE_TB; 613 break; 614 case MT76_TM_TX_MODE_HE_MU: 615 mode = MT_PHY_TYPE_HE_MU; 616 break; 617 case MT76_TM_TX_MODE_CCK: 618 cck = true; 619 fallthrough; 620 case MT76_TM_TX_MODE_OFDM: 621 band = phy->mt76->chandef.chan->band; 622 if (band == NL80211_BAND_2GHZ && !cck) 623 rate_idx += 4; 624 625 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; 626 val = cck ? r->hw_value_short : r->hw_value; 627 628 mode = val >> 8; 629 rate_idx = val & 0xff; 630 break; 631 default: 632 mode = MT_PHY_TYPE_OFDM; 633 break; 634 } 635 636 switch (phy->mt76->chandef.width) { 637 case NL80211_CHAN_WIDTH_40: 638 bw = 1; 639 break; 640 case NL80211_CHAN_WIDTH_80: 641 bw = 2; 642 break; 643 case NL80211_CHAN_WIDTH_80P80: 644 case NL80211_CHAN_WIDTH_160: 645 bw = 3; 646 break; 647 default: 648 bw = 0; 649 break; 650 } 651 652 if (td->tx_rate_stbc && nss == 1) { 653 nss++; 654 rateval |= MT_TX_RATE_STBC; 655 } 656 657 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 658 FIELD_PREP(MT_TX_RATE_MODE, mode) | 659 FIELD_PREP(MT_TX_RATE_NSS, nss - 1); 660 661 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 662 663 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT); 664 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT) 665 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 666 667 val = MT_TXD6_FIXED_BW | 668 FIELD_PREP(MT_TXD6_BW, bw) | 669 FIELD_PREP(MT_TXD6_TX_RATE, rateval) | 670 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi); 671 672 /* for HE_SU/HE_EXT_SU PPDU 673 * - 1x, 2x, 4x LTF + 0.8us GI 674 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 675 * for HE_MU PPDU 676 * - 2x, 4x LTF + 0.8us GI 677 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 678 * for HE_TB PPDU 679 * - 1x, 2x LTF + 1.6us GI 680 * - 4x LTF + 3.2us GI 681 */ 682 if (mode >= MT_PHY_TYPE_HE_SU) 683 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); 684 685 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) 686 val |= MT_TXD6_LDPC; 687 688 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); 689 txwi[6] |= cpu_to_le32(val); 690 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 691 phy->test.spe_idx)); 692 #endif 693 } 694 695 void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, 696 struct sk_buff *skb, struct mt76_wcid *wcid, int pid, 697 struct ieee80211_key_conf *key, 698 enum mt76_txq_id qid, u32 changed) 699 { 700 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 701 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 702 struct mt76_phy *mphy = &dev->phy; 703 704 if (phy_idx && dev->phys[MT_BAND1]) 705 mphy = dev->phys[MT_BAND1]; 706 707 mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed); 708 709 if (mt76_testmode_enabled(mphy)) 710 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); 711 } 712 713 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 714 enum mt76_txq_id qid, struct mt76_wcid *wcid, 715 struct ieee80211_sta *sta, 716 struct mt76_tx_info *tx_info) 717 { 718 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 719 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 720 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 721 struct ieee80211_key_conf *key = info->control.hw_key; 722 struct ieee80211_vif *vif = info->control.vif; 723 struct mt76_connac_fw_txp *txp; 724 struct mt76_txwi_cache *t; 725 int id, i, nbuf = tx_info->nbuf - 1; 726 u8 *txwi = (u8 *)txwi_ptr; 727 int pid; 728 729 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 730 return -EINVAL; 731 732 if (!wcid) 733 wcid = &dev->mt76.global_wcid; 734 735 if (sta) { 736 struct mt7915_sta *msta; 737 738 msta = (struct mt7915_sta *)sta->drv_priv; 739 740 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 741 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 742 msta->jiffies = jiffies; 743 } 744 } 745 746 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 747 t->skb = tx_info->skb; 748 749 id = mt76_token_consume(mdev, &t); 750 if (id < 0) 751 return id; 752 753 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 754 mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key, 755 qid, 0); 756 757 txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE); 758 for (i = 0; i < nbuf; i++) { 759 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 760 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 761 } 762 txp->nbuf = nbuf; 763 764 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); 765 766 if (!key) 767 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 768 769 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 770 ieee80211_is_mgmt(hdr->frame_control)) 771 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 772 773 if (vif) { 774 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 775 776 txp->bss_idx = mvif->mt76.idx; 777 } 778 779 txp->token = cpu_to_le16(id); 780 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) 781 txp->rept_wds_wcid = cpu_to_le16(wcid->idx); 782 else 783 txp->rept_wds_wcid = cpu_to_le16(0x3ff); 784 tx_info->skb = DMA_DUMMY_DATA; 785 786 /* pass partial skb header to fw */ 787 tx_info->buf[1].len = MT_CT_PARSE_LEN; 788 tx_info->buf[1].skip_unmap = true; 789 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 790 791 return 0; 792 } 793 794 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 795 { 796 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 797 __le32 *txwi = ptr; 798 u32 val; 799 800 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 801 802 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 803 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 804 txwi[0] = cpu_to_le32(val); 805 806 val = MT_TXD1_LONG_FORMAT | 807 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 808 txwi[1] = cpu_to_le32(val); 809 810 txp->token = cpu_to_le16(token_id); 811 txp->nbuf = 1; 812 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 813 814 return MT_TXD_SIZE + sizeof(*txp); 815 } 816 817 static void 818 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 819 { 820 struct mt7915_sta *msta; 821 u16 fc, tid; 822 u32 val; 823 824 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 825 return; 826 827 tid = le32_get_bits(txwi[1], MT_TXD1_TID); 828 if (tid >= 6) /* skip VO queue */ 829 return; 830 831 val = le32_to_cpu(txwi[2]); 832 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 833 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 834 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 835 return; 836 837 msta = (struct mt7915_sta *)sta->drv_priv; 838 if (!test_and_set_bit(tid, &msta->ampdu_state)) 839 ieee80211_start_tx_ba_session(sta, tid, 0); 840 } 841 842 static void 843 mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, 844 struct ieee80211_sta *sta, struct list_head *free_list) 845 { 846 struct mt76_dev *mdev = &dev->mt76; 847 struct mt7915_sta *msta; 848 struct mt76_wcid *wcid; 849 __le32 *txwi; 850 u16 wcid_idx; 851 852 mt76_connac_txp_skb_unmap(mdev, t); 853 if (!t->skb) 854 goto out; 855 856 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 857 if (sta) { 858 wcid = (struct mt76_wcid *)sta->drv_priv; 859 wcid_idx = wcid->idx; 860 } else { 861 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); 862 wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]); 863 864 if (wcid && wcid->sta) { 865 msta = container_of(wcid, struct mt7915_sta, wcid); 866 sta = container_of((void *)msta, struct ieee80211_sta, 867 drv_priv); 868 spin_lock_bh(&dev->sta_poll_lock); 869 if (list_empty(&msta->poll_list)) 870 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 871 spin_unlock_bh(&dev->sta_poll_lock); 872 } 873 } 874 875 if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 876 mt7915_tx_check_aggr(sta, txwi); 877 878 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 879 880 out: 881 t->skb = NULL; 882 mt76_put_txwi(mdev, t); 883 } 884 885 static void 886 mt7915_mac_tx_free_prepare(struct mt7915_dev *dev) 887 { 888 struct mt76_dev *mdev = &dev->mt76; 889 struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1]; 890 891 /* clean DMA queues and unmap buffers first */ 892 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 893 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 894 if (mphy_ext) { 895 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); 896 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); 897 } 898 } 899 900 static void 901 mt7915_mac_tx_free_done(struct mt7915_dev *dev, 902 struct list_head *free_list, bool wake) 903 { 904 struct sk_buff *skb, *tmp; 905 906 mt7915_mac_sta_poll(dev); 907 908 if (wake) 909 mt76_set_tx_blocked(&dev->mt76, false); 910 911 mt76_worker_schedule(&dev->mt76.tx_worker); 912 913 list_for_each_entry_safe(skb, tmp, free_list, list) { 914 skb_list_del_init(skb); 915 napi_consume_skb(skb, 1); 916 } 917 } 918 919 static void 920 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) 921 { 922 struct mt76_connac_tx_free *free = data; 923 __le32 *tx_info = (__le32 *)(data + sizeof(*free)); 924 struct mt76_dev *mdev = &dev->mt76; 925 struct mt76_txwi_cache *txwi; 926 struct ieee80211_sta *sta = NULL; 927 LIST_HEAD(free_list); 928 void *end = data + len; 929 bool v3, wake = false; 930 u16 total, count = 0; 931 u32 txd = le32_to_cpu(free->txd); 932 __le32 *cur_info; 933 934 mt7915_mac_tx_free_prepare(dev); 935 936 total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT); 937 v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4); 938 939 for (cur_info = tx_info; count < total; cur_info++) { 940 u32 msdu, info; 941 u8 i; 942 943 if (WARN_ON_ONCE((void *)cur_info >= end)) 944 return; 945 946 /* 947 * 1'b1: new wcid pair. 948 * 1'b0: msdu_id with the same 'wcid pair' as above. 949 */ 950 info = le32_to_cpu(*cur_info); 951 if (info & MT_TX_FREE_PAIR) { 952 struct mt7915_sta *msta; 953 struct mt76_wcid *wcid; 954 u16 idx; 955 956 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 957 wcid = rcu_dereference(dev->mt76.wcid[idx]); 958 sta = wcid_to_sta(wcid); 959 if (!sta) 960 continue; 961 962 msta = container_of(wcid, struct mt7915_sta, wcid); 963 spin_lock_bh(&dev->sta_poll_lock); 964 if (list_empty(&msta->poll_list)) 965 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 966 spin_unlock_bh(&dev->sta_poll_lock); 967 continue; 968 } 969 970 if (v3 && (info & MT_TX_FREE_MPDU_HEADER)) 971 continue; 972 973 for (i = 0; i < 1 + v3; i++) { 974 if (v3) { 975 msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3; 976 if (msdu == MT_TX_FREE_MSDU_ID_V3) 977 continue; 978 } else { 979 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 980 } 981 count++; 982 txwi = mt76_token_release(mdev, msdu, &wake); 983 if (!txwi) 984 continue; 985 986 mt7915_txwi_free(dev, txwi, sta, &free_list); 987 } 988 } 989 990 mt7915_mac_tx_free_done(dev, &free_list, wake); 991 } 992 993 static void 994 mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len) 995 { 996 struct mt76_connac_tx_free *free = data; 997 __le16 *info = (__le16 *)(data + sizeof(*free)); 998 struct mt76_dev *mdev = &dev->mt76; 999 void *end = data + len; 1000 LIST_HEAD(free_list); 1001 bool wake = false; 1002 u8 i, count; 1003 1004 mt7915_mac_tx_free_prepare(dev); 1005 1006 count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl)); 1007 if (WARN_ON_ONCE((void *)&info[count] > end)) 1008 return; 1009 1010 for (i = 0; i < count; i++) { 1011 struct mt76_txwi_cache *txwi; 1012 u16 msdu = le16_to_cpu(info[i]); 1013 1014 txwi = mt76_token_release(mdev, msdu, &wake); 1015 if (!txwi) 1016 continue; 1017 1018 mt7915_txwi_free(dev, txwi, NULL, &free_list); 1019 } 1020 1021 mt7915_mac_tx_free_done(dev, &free_list, wake); 1022 } 1023 1024 static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) 1025 { 1026 struct mt7915_sta *msta = NULL; 1027 struct mt76_wcid *wcid; 1028 __le32 *txs_data = data; 1029 u16 wcidx; 1030 u8 pid; 1031 1032 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) 1033 return; 1034 1035 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1036 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1037 1038 if (pid < MT_PACKET_ID_WED) 1039 return; 1040 1041 if (wcidx >= mt7915_wtbl_size(dev)) 1042 return; 1043 1044 rcu_read_lock(); 1045 1046 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1047 if (!wcid) 1048 goto out; 1049 1050 msta = container_of(wcid, struct mt7915_sta, wcid); 1051 1052 if (pid == MT_PACKET_ID_WED) 1053 mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data); 1054 else 1055 mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data); 1056 1057 if (!wcid->sta) 1058 goto out; 1059 1060 spin_lock_bh(&dev->sta_poll_lock); 1061 if (list_empty(&msta->poll_list)) 1062 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1063 spin_unlock_bh(&dev->sta_poll_lock); 1064 1065 out: 1066 rcu_read_unlock(); 1067 } 1068 1069 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) 1070 { 1071 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1072 __le32 *rxd = (__le32 *)data; 1073 __le32 *end = (__le32 *)&rxd[len / 4]; 1074 enum rx_pkt_type type; 1075 1076 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1077 1078 switch (type) { 1079 case PKT_TYPE_TXRX_NOTIFY: 1080 mt7915_mac_tx_free(dev, data, len); 1081 return false; 1082 case PKT_TYPE_TXRX_NOTIFY_V0: 1083 mt7915_mac_tx_free_v0(dev, data, len); 1084 return false; 1085 case PKT_TYPE_TXS: 1086 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1087 mt7915_mac_add_txs(dev, rxd); 1088 return false; 1089 case PKT_TYPE_RX_FW_MONITOR: 1090 mt7915_debugfs_rx_fw_monitor(dev, data, len); 1091 return false; 1092 default: 1093 return true; 1094 } 1095 } 1096 1097 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1098 struct sk_buff *skb) 1099 { 1100 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1101 __le32 *rxd = (__le32 *)skb->data; 1102 __le32 *end = (__le32 *)&skb->data[skb->len]; 1103 enum rx_pkt_type type; 1104 1105 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1106 1107 switch (type) { 1108 case PKT_TYPE_TXRX_NOTIFY: 1109 mt7915_mac_tx_free(dev, skb->data, skb->len); 1110 napi_consume_skb(skb, 1); 1111 break; 1112 case PKT_TYPE_TXRX_NOTIFY_V0: 1113 mt7915_mac_tx_free_v0(dev, skb->data, skb->len); 1114 napi_consume_skb(skb, 1); 1115 break; 1116 case PKT_TYPE_RX_EVENT: 1117 mt7915_mcu_rx_event(dev, skb); 1118 break; 1119 case PKT_TYPE_TXRXV: 1120 mt7915_mac_fill_rx_vector(dev, skb); 1121 break; 1122 case PKT_TYPE_TXS: 1123 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1124 mt7915_mac_add_txs(dev, rxd); 1125 dev_kfree_skb(skb); 1126 break; 1127 case PKT_TYPE_RX_FW_MONITOR: 1128 mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1129 dev_kfree_skb(skb); 1130 break; 1131 case PKT_TYPE_NORMAL: 1132 if (!mt7915_mac_fill_rx(dev, skb)) { 1133 mt76_rx(&dev->mt76, q, skb); 1134 return; 1135 } 1136 fallthrough; 1137 default: 1138 dev_kfree_skb(skb); 1139 break; 1140 } 1141 } 1142 1143 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) 1144 { 1145 struct mt7915_dev *dev = phy->dev; 1146 u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx); 1147 1148 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); 1149 mt76_set(dev, reg, BIT(11) | BIT(9)); 1150 } 1151 1152 void mt7915_mac_reset_counters(struct mt7915_phy *phy) 1153 { 1154 struct mt7915_dev *dev = phy->dev; 1155 int i; 1156 1157 for (i = 0; i < 4; i++) { 1158 mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1159 mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1160 } 1161 1162 phy->mt76->survey_time = ktime_get_boottime(); 1163 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 1164 1165 /* reset airtime counters */ 1166 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx), 1167 MT_WF_RMAC_MIB_RXTIME_CLR); 1168 1169 mt7915_mcu_get_chan_mib_info(phy, true); 1170 } 1171 1172 void mt7915_mac_set_timing(struct mt7915_phy *phy) 1173 { 1174 s16 coverage_class = phy->coverage_class; 1175 struct mt7915_dev *dev = phy->dev; 1176 struct mt7915_phy *ext_phy = mt7915_ext_phy(dev); 1177 u32 val, reg_offset; 1178 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1179 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1180 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1181 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1182 int eifs_ofdm = 360, sifs = 10, offset; 1183 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ); 1184 1185 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1186 return; 1187 1188 if (ext_phy) 1189 coverage_class = max_t(s16, dev->phy.coverage_class, 1190 ext_phy->coverage_class); 1191 1192 mt76_set(dev, MT_ARB_SCR(phy->band_idx), 1193 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1194 udelay(1); 1195 1196 offset = 3 * coverage_class; 1197 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1198 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1199 1200 if (!is_mt7915(&dev->mt76)) { 1201 if (!a_band) { 1202 mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx), 1203 FIELD_PREP(MT_IFS_EIFS_CCK, 314)); 1204 eifs_ofdm = 78; 1205 } else { 1206 eifs_ofdm = 84; 1207 } 1208 } else if (a_band) { 1209 sifs = 16; 1210 } 1211 1212 mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset); 1213 mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset); 1214 mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx), 1215 FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) | 1216 FIELD_PREP(MT_IFS_RIFS, 2) | 1217 FIELD_PREP(MT_IFS_SIFS, sifs) | 1218 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1219 1220 if (phy->slottime < 20 || a_band) 1221 val = MT7915_CFEND_RATE_DEFAULT; 1222 else 1223 val = MT7915_CFEND_RATE_11B; 1224 1225 mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val); 1226 mt76_clear(dev, MT_ARB_SCR(phy->band_idx), 1227 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1228 } 1229 1230 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) 1231 { 1232 u32 reg; 1233 1234 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) : 1235 MT_WF_PHY_RXTD12_MT7916(ext_phy); 1236 mt76_set(dev, reg, 1237 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | 1238 MT_WF_PHY_RXTD12_IRPI_SW_CLR); 1239 1240 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) : 1241 MT_WF_PHY_RX_CTRL1_MT7916(ext_phy); 1242 mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); 1243 } 1244 1245 static u8 1246 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) 1247 { 1248 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1249 struct mt7915_dev *dev = phy->dev; 1250 u32 val, sum = 0, n = 0; 1251 int nss, i; 1252 1253 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) { 1254 u32 reg = is_mt7915(&dev->mt76) ? 1255 MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) : 1256 MT_WF_IRPI_NSS_MT7916(idx, nss); 1257 1258 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1259 val = mt76_rr(dev, reg); 1260 sum += val * nf_power[i]; 1261 n += val; 1262 } 1263 } 1264 1265 if (!n) 1266 return 0; 1267 1268 return sum / n; 1269 } 1270 1271 void mt7915_update_channel(struct mt76_phy *mphy) 1272 { 1273 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv; 1274 struct mt76_channel_state *state = mphy->chan_state; 1275 int nf; 1276 1277 mt7915_mcu_get_chan_mib_info(phy, false); 1278 1279 nf = mt7915_phy_get_nf(phy, phy->band_idx); 1280 if (!phy->noise) 1281 phy->noise = nf << 4; 1282 else if (nf) 1283 phy->noise += nf - (phy->noise >> 4); 1284 1285 state->noise = -(phy->noise >> 4); 1286 } 1287 1288 static bool 1289 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state) 1290 { 1291 bool ret; 1292 1293 ret = wait_event_timeout(dev->reset_wait, 1294 (READ_ONCE(dev->recovery.state) & state), 1295 MT7915_RESET_TIMEOUT); 1296 1297 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1298 return ret; 1299 } 1300 1301 static void 1302 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1303 { 1304 struct ieee80211_hw *hw = priv; 1305 1306 switch (vif->type) { 1307 case NL80211_IFTYPE_MESH_POINT: 1308 case NL80211_IFTYPE_ADHOC: 1309 case NL80211_IFTYPE_AP: 1310 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon, 1311 BSS_CHANGED_BEACON_ENABLED); 1312 break; 1313 default: 1314 break; 1315 } 1316 } 1317 1318 static void 1319 mt7915_update_beacons(struct mt7915_dev *dev) 1320 { 1321 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1322 1323 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1324 IEEE80211_IFACE_ITER_RESUME_ALL, 1325 mt7915_update_vif_beacon, dev->mt76.hw); 1326 1327 if (!mphy_ext) 1328 return; 1329 1330 ieee80211_iterate_active_interfaces(mphy_ext->hw, 1331 IEEE80211_IFACE_ITER_RESUME_ALL, 1332 mt7915_update_vif_beacon, mphy_ext->hw); 1333 } 1334 1335 void mt7915_tx_token_put(struct mt7915_dev *dev) 1336 { 1337 struct mt76_txwi_cache *txwi; 1338 int id; 1339 1340 spin_lock_bh(&dev->mt76.token_lock); 1341 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1342 mt7915_txwi_free(dev, txwi, NULL, NULL); 1343 dev->mt76.token_count--; 1344 } 1345 spin_unlock_bh(&dev->mt76.token_lock); 1346 idr_destroy(&dev->mt76.token); 1347 } 1348 1349 static int 1350 mt7915_mac_restart(struct mt7915_dev *dev) 1351 { 1352 struct mt7915_phy *phy2; 1353 struct mt76_phy *ext_phy; 1354 struct mt76_dev *mdev = &dev->mt76; 1355 int i, ret; 1356 1357 ext_phy = dev->mt76.phys[MT_BAND1]; 1358 phy2 = ext_phy ? ext_phy->priv : NULL; 1359 1360 if (dev->hif2) { 1361 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 1362 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1363 } 1364 1365 if (dev_is_pci(mdev->dev)) { 1366 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1367 if (dev->hif2) 1368 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 1369 } 1370 1371 set_bit(MT76_RESET, &dev->mphy.state); 1372 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1373 wake_up(&dev->mt76.mcu.wait); 1374 if (ext_phy) { 1375 set_bit(MT76_RESET, &ext_phy->state); 1376 set_bit(MT76_MCU_RESET, &ext_phy->state); 1377 } 1378 1379 /* lock/unlock all queues to ensure that no tx is pending */ 1380 mt76_txq_schedule_all(&dev->mphy); 1381 if (ext_phy) 1382 mt76_txq_schedule_all(ext_phy); 1383 1384 /* disable all tx/rx napi */ 1385 mt76_worker_disable(&dev->mt76.tx_worker); 1386 mt76_for_each_q_rx(mdev, i) { 1387 if (mdev->q_rx[i].ndesc) 1388 napi_disable(&dev->mt76.napi[i]); 1389 } 1390 napi_disable(&dev->mt76.tx_napi); 1391 1392 /* token reinit */ 1393 mt7915_tx_token_put(dev); 1394 idr_init(&dev->mt76.token); 1395 1396 mt7915_dma_reset(dev, true); 1397 1398 local_bh_disable(); 1399 mt76_for_each_q_rx(mdev, i) { 1400 if (mdev->q_rx[i].ndesc) { 1401 napi_enable(&dev->mt76.napi[i]); 1402 napi_schedule(&dev->mt76.napi[i]); 1403 } 1404 } 1405 local_bh_enable(); 1406 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1407 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 1408 1409 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 1410 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 1411 1412 if (dev->hif2) { 1413 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 1414 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1415 } 1416 if (dev_is_pci(mdev->dev)) { 1417 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1418 if (dev->hif2) 1419 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 1420 } 1421 1422 /* load firmware */ 1423 ret = mt7915_mcu_init_firmware(dev); 1424 if (ret) 1425 goto out; 1426 1427 /* set the necessary init items */ 1428 ret = mt7915_mcu_set_eeprom(dev); 1429 if (ret) 1430 goto out; 1431 1432 mt7915_mac_init(dev); 1433 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband); 1434 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband); 1435 ret = mt7915_txbf_init(dev); 1436 1437 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) { 1438 ret = mt7915_run(dev->mphy.hw); 1439 if (ret) 1440 goto out; 1441 } 1442 1443 if (ext_phy && test_bit(MT76_STATE_RUNNING, &ext_phy->state)) { 1444 ret = mt7915_run(ext_phy->hw); 1445 if (ret) 1446 goto out; 1447 } 1448 1449 out: 1450 /* reset done */ 1451 clear_bit(MT76_RESET, &dev->mphy.state); 1452 if (phy2) 1453 clear_bit(MT76_RESET, &phy2->mt76->state); 1454 1455 local_bh_disable(); 1456 napi_enable(&dev->mt76.tx_napi); 1457 napi_schedule(&dev->mt76.tx_napi); 1458 local_bh_enable(); 1459 1460 mt76_worker_enable(&dev->mt76.tx_worker); 1461 1462 return ret; 1463 } 1464 1465 static void 1466 mt7915_mac_full_reset(struct mt7915_dev *dev) 1467 { 1468 struct mt76_phy *ext_phy; 1469 int i; 1470 1471 ext_phy = dev->mt76.phys[MT_BAND1]; 1472 1473 dev->recovery.hw_full_reset = true; 1474 1475 wake_up(&dev->mt76.mcu.wait); 1476 ieee80211_stop_queues(mt76_hw(dev)); 1477 if (ext_phy) 1478 ieee80211_stop_queues(ext_phy->hw); 1479 1480 cancel_delayed_work_sync(&dev->mphy.mac_work); 1481 if (ext_phy) 1482 cancel_delayed_work_sync(&ext_phy->mac_work); 1483 1484 mutex_lock(&dev->mt76.mutex); 1485 for (i = 0; i < 10; i++) { 1486 if (!mt7915_mac_restart(dev)) 1487 break; 1488 } 1489 mutex_unlock(&dev->mt76.mutex); 1490 1491 if (i == 10) 1492 dev_err(dev->mt76.dev, "chip full reset failed\n"); 1493 1494 ieee80211_restart_hw(mt76_hw(dev)); 1495 if (ext_phy) 1496 ieee80211_restart_hw(ext_phy->hw); 1497 1498 ieee80211_wake_queues(mt76_hw(dev)); 1499 if (ext_phy) 1500 ieee80211_wake_queues(ext_phy->hw); 1501 1502 dev->recovery.hw_full_reset = false; 1503 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1504 MT7915_WATCHDOG_TIME); 1505 if (ext_phy) 1506 ieee80211_queue_delayed_work(ext_phy->hw, 1507 &ext_phy->mac_work, 1508 MT7915_WATCHDOG_TIME); 1509 } 1510 1511 /* system error recovery */ 1512 void mt7915_mac_reset_work(struct work_struct *work) 1513 { 1514 struct mt7915_phy *phy2; 1515 struct mt76_phy *ext_phy; 1516 struct mt7915_dev *dev; 1517 int i; 1518 1519 dev = container_of(work, struct mt7915_dev, reset_work); 1520 ext_phy = dev->mt76.phys[MT_BAND1]; 1521 phy2 = ext_phy ? ext_phy->priv : NULL; 1522 1523 /* chip full reset */ 1524 if (dev->recovery.restart) { 1525 /* disable WA/WM WDT */ 1526 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 1527 MT_MCU_CMD_WDT_MASK); 1528 1529 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 1530 dev->recovery.wa_reset_count++; 1531 else 1532 dev->recovery.wm_reset_count++; 1533 1534 mt7915_mac_full_reset(dev); 1535 1536 /* enable mcu irq */ 1537 mt7915_irq_enable(dev, MT_INT_MCU_CMD); 1538 mt7915_irq_disable(dev, 0); 1539 1540 /* enable WA/WM WDT */ 1541 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 1542 1543 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 1544 dev->recovery.restart = false; 1545 return; 1546 } 1547 1548 /* chip partial reset */ 1549 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 1550 return; 1551 1552 ieee80211_stop_queues(mt76_hw(dev)); 1553 if (ext_phy) 1554 ieee80211_stop_queues(ext_phy->hw); 1555 1556 set_bit(MT76_RESET, &dev->mphy.state); 1557 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1558 wake_up(&dev->mt76.mcu.wait); 1559 cancel_delayed_work_sync(&dev->mphy.mac_work); 1560 if (phy2) { 1561 set_bit(MT76_RESET, &phy2->mt76->state); 1562 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1563 } 1564 mt76_worker_disable(&dev->mt76.tx_worker); 1565 mt76_for_each_q_rx(&dev->mt76, i) 1566 napi_disable(&dev->mt76.napi[i]); 1567 napi_disable(&dev->mt76.tx_napi); 1568 1569 mutex_lock(&dev->mt76.mutex); 1570 1571 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1572 1573 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1574 mt7915_dma_reset(dev, false); 1575 1576 mt7915_tx_token_put(dev); 1577 idr_init(&dev->mt76.token); 1578 1579 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1580 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1581 } 1582 1583 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1584 clear_bit(MT76_RESET, &dev->mphy.state); 1585 if (phy2) 1586 clear_bit(MT76_RESET, &phy2->mt76->state); 1587 1588 local_bh_disable(); 1589 mt76_for_each_q_rx(&dev->mt76, i) { 1590 napi_enable(&dev->mt76.napi[i]); 1591 napi_schedule(&dev->mt76.napi[i]); 1592 } 1593 local_bh_enable(); 1594 1595 tasklet_schedule(&dev->irq_tasklet); 1596 1597 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1598 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1599 1600 mt76_worker_enable(&dev->mt76.tx_worker); 1601 1602 local_bh_disable(); 1603 napi_enable(&dev->mt76.tx_napi); 1604 napi_schedule(&dev->mt76.tx_napi); 1605 local_bh_enable(); 1606 1607 ieee80211_wake_queues(mt76_hw(dev)); 1608 if (ext_phy) 1609 ieee80211_wake_queues(ext_phy->hw); 1610 1611 mutex_unlock(&dev->mt76.mutex); 1612 1613 mt7915_update_beacons(dev); 1614 1615 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1616 MT7915_WATCHDOG_TIME); 1617 if (phy2) 1618 ieee80211_queue_delayed_work(ext_phy->hw, 1619 &phy2->mt76->mac_work, 1620 MT7915_WATCHDOG_TIME); 1621 } 1622 1623 void mt7915_reset(struct mt7915_dev *dev) 1624 { 1625 if (!dev->recovery.hw_init_done) 1626 return; 1627 1628 if (dev->recovery.hw_full_reset) 1629 return; 1630 1631 /* wm/wa exception: do full recovery */ 1632 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 1633 dev->recovery.restart = true; 1634 dev_info(dev->mt76.dev, 1635 "%s indicated firmware crash, attempting recovery\n", 1636 wiphy_name(dev->mt76.hw->wiphy)); 1637 1638 mt7915_irq_disable(dev, MT_INT_MCU_CMD); 1639 queue_work(dev->mt76.wq, &dev->reset_work); 1640 return; 1641 } 1642 1643 queue_work(dev->mt76.wq, &dev->reset_work); 1644 wake_up(&dev->reset_wait); 1645 } 1646 1647 void mt7915_mac_update_stats(struct mt7915_phy *phy) 1648 { 1649 struct mt7915_dev *dev = phy->dev; 1650 struct mib_stats *mib = &phy->mib; 1651 int i, aggr0 = 0, aggr1, cnt; 1652 u32 val; 1653 1654 cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx)); 1655 mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? 1656 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) : 1657 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt); 1658 1659 cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx)); 1660 mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt); 1661 1662 cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx)); 1663 mib->rx_mpdu_cnt += cnt; 1664 1665 cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx)); 1666 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 1667 1668 cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx)); 1669 mib->rx_vector_mismatch_cnt += 1670 FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt); 1671 1672 cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx)); 1673 mib->rx_delimiter_fail_cnt += 1674 FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt); 1675 1676 cnt = mt76_rr(dev, MT_MIB_SDR10(phy->band_idx)); 1677 mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ? 1678 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) : 1679 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt); 1680 1681 cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx)); 1682 mib->rx_len_mismatch_cnt += 1683 FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt); 1684 1685 cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx)); 1686 mib->tx_ampdu_cnt += cnt; 1687 1688 cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx)); 1689 mib->tx_stop_q_empty_cnt += 1690 FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt); 1691 1692 cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx)); 1693 mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ? 1694 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) : 1695 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt); 1696 1697 cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx)); 1698 mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ? 1699 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) : 1700 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt); 1701 1702 cnt = mt76_rr(dev, MT_MIB_SDR16(phy->band_idx)); 1703 mib->primary_cca_busy_time += 1704 FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt); 1705 1706 cnt = mt76_rr(dev, MT_MIB_SDR17(phy->band_idx)); 1707 mib->secondary_cca_busy_time += 1708 FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt); 1709 1710 cnt = mt76_rr(dev, MT_MIB_SDR18(phy->band_idx)); 1711 mib->primary_energy_detect_time += 1712 FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt); 1713 1714 cnt = mt76_rr(dev, MT_MIB_SDR19(phy->band_idx)); 1715 mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt); 1716 1717 cnt = mt76_rr(dev, MT_MIB_SDR20(phy->band_idx)); 1718 mib->ofdm_mdrdy_time += 1719 FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt); 1720 1721 cnt = mt76_rr(dev, MT_MIB_SDR21(phy->band_idx)); 1722 mib->green_mdrdy_time += 1723 FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt); 1724 1725 cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx)); 1726 mib->rx_ampdu_cnt += cnt; 1727 1728 cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx)); 1729 mib->rx_ampdu_bytes_cnt += cnt; 1730 1731 cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx)); 1732 mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ? 1733 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) : 1734 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt); 1735 1736 cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx)); 1737 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 1738 1739 cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx)); 1740 mib->tx_rwp_fail_cnt += 1741 FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt); 1742 1743 cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx)); 1744 mib->tx_rwp_need_cnt += 1745 FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt); 1746 1747 cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx)); 1748 mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ? 1749 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) : 1750 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt); 1751 1752 cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx)); 1753 mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ? 1754 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) : 1755 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt); 1756 1757 cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx)); 1758 mib->rx_ba_cnt += cnt; 1759 1760 cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx)); 1761 mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt); 1762 1763 cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx)); 1764 mib->tx_mu_mpdu_cnt += cnt; 1765 1766 cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx)); 1767 mib->tx_mu_acked_mpdu_cnt += cnt; 1768 1769 cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx)); 1770 mib->tx_su_acked_mpdu_cnt += cnt; 1771 1772 cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx)); 1773 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt); 1774 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt); 1775 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt); 1776 1777 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 1778 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 1779 mib->tx_amsdu[i] += cnt; 1780 mib->tx_amsdu_cnt += cnt; 1781 } 1782 1783 if (is_mt7915(&dev->mt76)) { 1784 for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) { 1785 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4))); 1786 mib->ba_miss_cnt += 1787 FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1788 mib->ack_fail_cnt += 1789 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1790 1791 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4))); 1792 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1793 mib->rts_retries_cnt += 1794 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1795 1796 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1797 phy->mt76->aggr_stats[aggr0++] += val & 0xffff; 1798 phy->mt76->aggr_stats[aggr0++] += val >> 16; 1799 1800 val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1801 phy->mt76->aggr_stats[aggr1++] += val & 0xffff; 1802 phy->mt76->aggr_stats[aggr1++] += val >> 16; 1803 } 1804 1805 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1806 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1807 1808 cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx)); 1809 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt); 1810 1811 cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx)); 1812 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt); 1813 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt); 1814 1815 cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx)); 1816 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt); 1817 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt); 1818 1819 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx)); 1820 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt); 1821 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt); 1822 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt); 1823 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt); 1824 } else { 1825 for (i = 0; i < 2; i++) { 1826 /* rts count */ 1827 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2))); 1828 mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val); 1829 mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val); 1830 1831 /* rts retry count */ 1832 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2))); 1833 mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val); 1834 mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val); 1835 1836 /* ba miss count */ 1837 val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2))); 1838 mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val); 1839 mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val); 1840 1841 /* ack fail count */ 1842 val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2))); 1843 mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val); 1844 mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val); 1845 } 1846 1847 for (i = 0; i < 8; i++) { 1848 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1849 phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val); 1850 phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val); 1851 } 1852 1853 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1854 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1855 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1856 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1857 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1858 1859 cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx)); 1860 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt); 1861 1862 cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx)); 1863 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt); 1864 1865 cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx)); 1866 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1867 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1868 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1869 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1870 1871 cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx)); 1872 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1873 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1874 } 1875 } 1876 1877 static void mt7915_mac_severe_check(struct mt7915_phy *phy) 1878 { 1879 struct mt7915_dev *dev = phy->dev; 1880 bool ext_phy = phy != &dev->phy; 1881 u32 trb; 1882 1883 if (!phy->omac_mask) 1884 return; 1885 1886 /* In rare cases, TRB pointers might be out of sync leads to RMAC 1887 * stopping Rx, so check status periodically to see if TRB hardware 1888 * requires minimal recovery. 1889 */ 1890 trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx)); 1891 1892 if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) != 1893 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) && 1894 (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) != 1895 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) && 1896 trb == phy->trb_ts) 1897 mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT, 1898 ext_phy); 1899 1900 phy->trb_ts = trb; 1901 } 1902 1903 void mt7915_mac_sta_rc_work(struct work_struct *work) 1904 { 1905 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); 1906 struct ieee80211_sta *sta; 1907 struct ieee80211_vif *vif; 1908 struct mt7915_sta *msta; 1909 u32 changed; 1910 LIST_HEAD(list); 1911 1912 spin_lock_bh(&dev->sta_poll_lock); 1913 list_splice_init(&dev->sta_rc_list, &list); 1914 1915 while (!list_empty(&list)) { 1916 msta = list_first_entry(&list, struct mt7915_sta, rc_list); 1917 list_del_init(&msta->rc_list); 1918 changed = msta->changed; 1919 msta->changed = 0; 1920 spin_unlock_bh(&dev->sta_poll_lock); 1921 1922 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 1923 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 1924 1925 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 1926 IEEE80211_RC_NSS_CHANGED | 1927 IEEE80211_RC_BW_CHANGED)) 1928 mt7915_mcu_add_rate_ctrl(dev, vif, sta, true); 1929 1930 if (changed & IEEE80211_RC_SMPS_CHANGED) 1931 mt7915_mcu_add_smps(dev, vif, sta); 1932 1933 spin_lock_bh(&dev->sta_poll_lock); 1934 } 1935 1936 spin_unlock_bh(&dev->sta_poll_lock); 1937 } 1938 1939 void mt7915_mac_work(struct work_struct *work) 1940 { 1941 struct mt7915_phy *phy; 1942 struct mt76_phy *mphy; 1943 1944 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 1945 mac_work.work); 1946 phy = mphy->priv; 1947 1948 mutex_lock(&mphy->dev->mutex); 1949 1950 mt76_update_survey(mphy); 1951 if (++mphy->mac_work_count == 5) { 1952 mphy->mac_work_count = 0; 1953 1954 mt7915_mac_update_stats(phy); 1955 mt7915_mac_severe_check(phy); 1956 } 1957 1958 mutex_unlock(&mphy->dev->mutex); 1959 1960 mt76_tx_status_check(mphy->dev, false); 1961 1962 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 1963 MT7915_WATCHDOG_TIME); 1964 } 1965 1966 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) 1967 { 1968 struct mt7915_dev *dev = phy->dev; 1969 1970 if (phy->rdd_state & BIT(0)) 1971 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, 1972 MT_RX_SEL0, 0); 1973 if (phy->rdd_state & BIT(1)) 1974 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, 1975 MT_RX_SEL0, 0); 1976 } 1977 1978 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) 1979 { 1980 int err, region; 1981 1982 switch (dev->mt76.region) { 1983 case NL80211_DFS_ETSI: 1984 region = 0; 1985 break; 1986 case NL80211_DFS_JP: 1987 region = 2; 1988 break; 1989 case NL80211_DFS_FCC: 1990 default: 1991 region = 1; 1992 break; 1993 } 1994 1995 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, 1996 MT_RX_SEL0, region); 1997 if (err < 0) 1998 return err; 1999 2000 if (is_mt7915(&dev->mt76)) { 2001 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain, 2002 0, dev->dbdc_support ? 2 : 0); 2003 if (err < 0) 2004 return err; 2005 } 2006 2007 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, 2008 MT_RX_SEL0, 1); 2009 } 2010 2011 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) 2012 { 2013 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2014 struct mt7915_dev *dev = phy->dev; 2015 int err; 2016 2017 /* start CAC */ 2018 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx, 2019 MT_RX_SEL0, 0); 2020 if (err < 0) 2021 return err; 2022 2023 err = mt7915_dfs_start_rdd(dev, phy->band_idx); 2024 if (err < 0) 2025 return err; 2026 2027 phy->rdd_state |= BIT(phy->band_idx); 2028 2029 if (!is_mt7915(&dev->mt76)) 2030 return 0; 2031 2032 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2033 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2034 err = mt7915_dfs_start_rdd(dev, 1); 2035 if (err < 0) 2036 return err; 2037 2038 phy->rdd_state |= BIT(1); 2039 } 2040 2041 return 0; 2042 } 2043 2044 static int 2045 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy) 2046 { 2047 const struct mt7915_dfs_radar_spec *radar_specs; 2048 struct mt7915_dev *dev = phy->dev; 2049 int err, i; 2050 2051 switch (dev->mt76.region) { 2052 case NL80211_DFS_FCC: 2053 radar_specs = &fcc_radar_specs; 2054 err = mt7915_mcu_set_fcc5_lpn(dev, 8); 2055 if (err < 0) 2056 return err; 2057 break; 2058 case NL80211_DFS_ETSI: 2059 radar_specs = &etsi_radar_specs; 2060 break; 2061 case NL80211_DFS_JP: 2062 radar_specs = &jp_radar_specs; 2063 break; 2064 default: 2065 return -EINVAL; 2066 } 2067 2068 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2069 err = mt7915_mcu_set_radar_th(dev, i, 2070 &radar_specs->radar_pattern[i]); 2071 if (err < 0) 2072 return err; 2073 } 2074 2075 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2076 } 2077 2078 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) 2079 { 2080 struct mt7915_dev *dev = phy->dev; 2081 enum mt76_dfs_state dfs_state, prev_state; 2082 int err; 2083 2084 prev_state = phy->mt76->dfs_state; 2085 dfs_state = mt76_phy_dfs_state(phy->mt76); 2086 2087 if (prev_state == dfs_state) 2088 return 0; 2089 2090 if (prev_state == MT_DFS_STATE_UNKNOWN) 2091 mt7915_dfs_stop_radar_detector(phy); 2092 2093 if (dfs_state == MT_DFS_STATE_DISABLED) 2094 goto stop; 2095 2096 if (prev_state <= MT_DFS_STATE_DISABLED) { 2097 err = mt7915_dfs_init_radar_specs(phy); 2098 if (err < 0) 2099 return err; 2100 2101 err = mt7915_dfs_start_radar_detector(phy); 2102 if (err < 0) 2103 return err; 2104 2105 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2106 } 2107 2108 if (dfs_state == MT_DFS_STATE_CAC) 2109 return 0; 2110 2111 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, 2112 phy->band_idx, MT_RX_SEL0, 0); 2113 if (err < 0) { 2114 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2115 return err; 2116 } 2117 2118 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2119 return 0; 2120 2121 stop: 2122 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, 2123 phy->band_idx, MT_RX_SEL0, 0); 2124 if (err < 0) 2125 return err; 2126 2127 if (is_mt7915(&dev->mt76)) { 2128 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, 2129 phy->band_idx, 0, 2130 dev->dbdc_support ? 2 : 0); 2131 if (err < 0) 2132 return err; 2133 } 2134 2135 mt7915_dfs_stop_radar_detector(phy); 2136 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2137 2138 return 0; 2139 } 2140 2141 static int 2142 mt7915_mac_twt_duration_align(int duration) 2143 { 2144 return duration << 8; 2145 } 2146 2147 static u64 2148 mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev, 2149 struct mt7915_twt_flow *flow) 2150 { 2151 struct mt7915_twt_flow *iter, *iter_next; 2152 u32 duration = flow->duration << 8; 2153 u64 start_tsf; 2154 2155 iter = list_first_entry_or_null(&dev->twt_list, 2156 struct mt7915_twt_flow, list); 2157 if (!iter || !iter->sched || iter->start_tsf > duration) { 2158 /* add flow as first entry in the list */ 2159 list_add(&flow->list, &dev->twt_list); 2160 return 0; 2161 } 2162 2163 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2164 start_tsf = iter->start_tsf + 2165 mt7915_mac_twt_duration_align(iter->duration); 2166 if (list_is_last(&iter->list, &dev->twt_list)) 2167 break; 2168 2169 if (!iter_next->sched || 2170 iter_next->start_tsf > start_tsf + duration) { 2171 list_add(&flow->list, &iter->list); 2172 goto out; 2173 } 2174 } 2175 2176 /* add flow as last entry in the list */ 2177 list_add_tail(&flow->list, &dev->twt_list); 2178 out: 2179 return start_tsf; 2180 } 2181 2182 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2183 { 2184 struct ieee80211_twt_params *twt_agrt; 2185 u64 interval, duration; 2186 u16 mantissa; 2187 u8 exp; 2188 2189 /* only individual agreement supported */ 2190 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2191 return -EOPNOTSUPP; 2192 2193 /* only 256us unit supported */ 2194 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2195 return -EOPNOTSUPP; 2196 2197 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2198 2199 /* explicit agreement not supported */ 2200 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2201 return -EOPNOTSUPP; 2202 2203 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2204 le16_to_cpu(twt_agrt->req_type)); 2205 mantissa = le16_to_cpu(twt_agrt->mantissa); 2206 duration = twt_agrt->min_twt_dur << 8; 2207 2208 interval = (u64)mantissa << exp; 2209 if (interval < duration) 2210 return -EOPNOTSUPP; 2211 2212 return 0; 2213 } 2214 2215 static bool 2216 mt7915_mac_twt_param_equal(struct mt7915_sta *msta, 2217 struct ieee80211_twt_params *twt_agrt) 2218 { 2219 u16 type = le16_to_cpu(twt_agrt->req_type); 2220 u8 exp; 2221 int i; 2222 2223 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2224 for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) { 2225 struct mt7915_twt_flow *f; 2226 2227 if (!(msta->twt.flowid_mask & BIT(i))) 2228 continue; 2229 2230 f = &msta->twt.flow[i]; 2231 if (f->duration == twt_agrt->min_twt_dur && 2232 f->mantissa == twt_agrt->mantissa && 2233 f->exp == exp && 2234 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2235 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2236 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2237 return true; 2238 } 2239 2240 return false; 2241 } 2242 2243 void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, 2244 struct ieee80211_sta *sta, 2245 struct ieee80211_twt_setup *twt) 2246 { 2247 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2248 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 2249 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2250 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2251 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2252 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2253 struct mt7915_twt_flow *flow; 2254 int flowid, table_id; 2255 u8 exp; 2256 2257 if (mt7915_mac_check_twt_req(twt)) 2258 goto out; 2259 2260 mutex_lock(&dev->mt76.mutex); 2261 2262 if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT) 2263 goto unlock; 2264 2265 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2266 goto unlock; 2267 2268 if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) { 2269 setup_cmd = TWT_SETUP_CMD_DICTATE; 2270 twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR; 2271 goto unlock; 2272 } 2273 2274 flowid = ffs(~msta->twt.flowid_mask) - 1; 2275 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 2276 twt_agrt->req_type |= le16_encode_bits(flowid, 2277 IEEE80211_TWT_REQTYPE_FLOWID); 2278 2279 table_id = ffs(~dev->twt.table_mask) - 1; 2280 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2281 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2282 2283 if (mt7915_mac_twt_param_equal(msta, twt_agrt)) 2284 goto unlock; 2285 2286 flow = &msta->twt.flow[flowid]; 2287 memset(flow, 0, sizeof(*flow)); 2288 INIT_LIST_HEAD(&flow->list); 2289 flow->wcid = msta->wcid.idx; 2290 flow->table_id = table_id; 2291 flow->id = flowid; 2292 flow->duration = twt_agrt->min_twt_dur; 2293 flow->mantissa = twt_agrt->mantissa; 2294 flow->exp = exp; 2295 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2296 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2297 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2298 2299 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2300 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2301 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2302 u64 flow_tsf, curr_tsf; 2303 u32 rem; 2304 2305 flow->sched = true; 2306 flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow); 2307 curr_tsf = __mt7915_get_tsf(hw, msta->vif); 2308 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2309 flow_tsf = curr_tsf + interval - rem; 2310 twt_agrt->twt = cpu_to_le64(flow_tsf); 2311 } else { 2312 list_add_tail(&flow->list, &dev->twt_list); 2313 } 2314 flow->tsf = le64_to_cpu(twt_agrt->twt); 2315 2316 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2317 goto unlock; 2318 2319 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2320 dev->twt.table_mask |= BIT(table_id); 2321 msta->twt.flowid_mask |= BIT(flowid); 2322 dev->twt.n_agrt++; 2323 2324 unlock: 2325 mutex_unlock(&dev->mt76.mutex); 2326 out: 2327 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 2328 twt_agrt->req_type |= 2329 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 2330 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2331 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2332 } 2333 2334 void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, 2335 struct mt7915_sta *msta, 2336 u8 flowid) 2337 { 2338 struct mt7915_twt_flow *flow; 2339 2340 lockdep_assert_held(&dev->mt76.mutex); 2341 2342 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2343 return; 2344 2345 if (!(msta->twt.flowid_mask & BIT(flowid))) 2346 return; 2347 2348 flow = &msta->twt.flow[flowid]; 2349 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, 2350 MCU_TWT_AGRT_DELETE)) 2351 return; 2352 2353 list_del_init(&flow->list); 2354 msta->twt.flowid_mask &= ~BIT(flowid); 2355 dev->twt.table_mask &= ~BIT(flow->table_id); 2356 dev->twt.n_agrt--; 2357 } 2358