1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/timekeeping.h> 6 #include "coredump.h" 7 #include "mt7915.h" 8 #include "../dma.h" 9 #include "mac.h" 10 #include "mcu.h" 11 12 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 13 14 static const struct mt7915_dfs_radar_spec etsi_radar_specs = { 15 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 16 .radar_pattern = { 17 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 18 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 19 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 20 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 21 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 22 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 23 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 24 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 25 }, 26 }; 27 28 static const struct mt7915_dfs_radar_spec fcc_radar_specs = { 29 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 30 .radar_pattern = { 31 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 32 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 33 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 34 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 35 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 36 }, 37 }; 38 39 static const struct mt7915_dfs_radar_spec jp_radar_specs = { 40 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 41 .radar_pattern = { 42 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 43 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 44 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 45 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 46 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 47 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 48 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 49 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 50 }, 51 }; 52 53 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, 54 u16 idx, bool unicast) 55 { 56 struct mt7915_sta *sta; 57 struct mt76_wcid *wcid; 58 59 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 60 return NULL; 61 62 wcid = rcu_dereference(dev->mt76.wcid[idx]); 63 if (unicast || !wcid) 64 return wcid; 65 66 if (!wcid->sta) 67 return NULL; 68 69 sta = container_of(wcid, struct mt7915_sta, wcid); 70 if (!sta->vif) 71 return NULL; 72 73 return &sta->vif->sta.wcid; 74 } 75 76 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 77 { 78 } 79 80 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) 81 { 82 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 83 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 84 85 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 86 0, 5000); 87 } 88 89 u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw) 90 { 91 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 92 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 93 94 return MT_WTBL_LMAC_OFFS(wcid, dw); 95 } 96 97 static void mt7915_mac_sta_poll(struct mt7915_dev *dev) 98 { 99 static const u8 ac_to_tid[] = { 100 [IEEE80211_AC_BE] = 0, 101 [IEEE80211_AC_BK] = 1, 102 [IEEE80211_AC_VI] = 4, 103 [IEEE80211_AC_VO] = 6 104 }; 105 struct ieee80211_sta *sta; 106 struct mt7915_sta *msta; 107 struct rate_info *rate; 108 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 109 LIST_HEAD(sta_poll_list); 110 int i; 111 112 spin_lock_bh(&dev->sta_poll_lock); 113 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 114 spin_unlock_bh(&dev->sta_poll_lock); 115 116 rcu_read_lock(); 117 118 while (true) { 119 bool clear = false; 120 u32 addr, val; 121 u16 idx; 122 s8 rssi[4]; 123 u8 bw; 124 125 spin_lock_bh(&dev->sta_poll_lock); 126 if (list_empty(&sta_poll_list)) { 127 spin_unlock_bh(&dev->sta_poll_lock); 128 break; 129 } 130 msta = list_first_entry(&sta_poll_list, 131 struct mt7915_sta, poll_list); 132 list_del_init(&msta->poll_list); 133 spin_unlock_bh(&dev->sta_poll_lock); 134 135 idx = msta->wcid.idx; 136 137 /* refresh peer's airtime reporting */ 138 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20); 139 140 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 141 u32 tx_last = msta->airtime_ac[i]; 142 u32 rx_last = msta->airtime_ac[i + 4]; 143 144 msta->airtime_ac[i] = mt76_rr(dev, addr); 145 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 146 147 tx_time[i] = msta->airtime_ac[i] - tx_last; 148 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 149 150 if ((tx_last | rx_last) & BIT(30)) 151 clear = true; 152 153 addr += 8; 154 } 155 156 if (clear) { 157 mt7915_mac_wtbl_update(dev, idx, 158 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 159 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 160 } 161 162 if (!msta->wcid.sta) 163 continue; 164 165 sta = container_of((void *)msta, struct ieee80211_sta, 166 drv_priv); 167 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 168 u8 q = mt76_connac_lmac_mapping(i); 169 u32 tx_cur = tx_time[q]; 170 u32 rx_cur = rx_time[q]; 171 u8 tid = ac_to_tid[i]; 172 173 if (!tx_cur && !rx_cur) 174 continue; 175 176 ieee80211_sta_register_airtime(sta, tid, tx_cur, 177 rx_cur); 178 } 179 180 /* 181 * We don't support reading GI info from txs packets. 182 * For accurate tx status reporting and AQL improvement, 183 * we need to make sure that flags match so polling GI 184 * from per-sta counters directly. 185 */ 186 rate = &msta->wcid.rate; 187 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7); 188 val = mt76_rr(dev, addr); 189 190 switch (rate->bw) { 191 case RATE_INFO_BW_160: 192 bw = IEEE80211_STA_RX_BW_160; 193 break; 194 case RATE_INFO_BW_80: 195 bw = IEEE80211_STA_RX_BW_80; 196 break; 197 case RATE_INFO_BW_40: 198 bw = IEEE80211_STA_RX_BW_40; 199 break; 200 default: 201 bw = IEEE80211_STA_RX_BW_20; 202 break; 203 } 204 205 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 206 u8 offs = 24 + 2 * bw; 207 208 rate->he_gi = (val & (0x3 << offs)) >> offs; 209 } else if (rate->flags & 210 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 211 if (val & BIT(12 + bw)) 212 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 213 else 214 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 215 } 216 217 /* get signal strength of resp frames (CTS/BA/ACK) */ 218 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 30); 219 val = mt76_rr(dev, addr); 220 221 rssi[0] = to_rssi(GENMASK(7, 0), val); 222 rssi[1] = to_rssi(GENMASK(15, 8), val); 223 rssi[2] = to_rssi(GENMASK(23, 16), val); 224 rssi[3] = to_rssi(GENMASK(31, 14), val); 225 226 msta->ack_signal = 227 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); 228 229 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); 230 } 231 232 rcu_read_unlock(); 233 } 234 235 void mt7915_mac_enable_rtscts(struct mt7915_dev *dev, 236 struct ieee80211_vif *vif, bool enable) 237 { 238 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 239 u32 addr; 240 241 addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5); 242 if (enable) 243 mt76_set(dev, addr, BIT(5)); 244 else 245 mt76_clear(dev, addr, BIT(5)); 246 } 247 248 static int 249 mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) 250 { 251 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 252 struct mt76_phy *mphy = &dev->mt76.phy; 253 struct mt7915_phy *phy = &dev->phy; 254 struct ieee80211_supported_band *sband; 255 __le32 *rxd = (__le32 *)skb->data; 256 __le32 *rxv = NULL; 257 u32 rxd0 = le32_to_cpu(rxd[0]); 258 u32 rxd1 = le32_to_cpu(rxd[1]); 259 u32 rxd2 = le32_to_cpu(rxd[2]); 260 u32 rxd3 = le32_to_cpu(rxd[3]); 261 u32 rxd4 = le32_to_cpu(rxd[4]); 262 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 263 bool unicast, insert_ccmp_hdr = false; 264 u8 remove_pad, amsdu_info; 265 u8 mode = 0, qos_ctl = 0; 266 struct mt7915_sta *msta = NULL; 267 u32 csum_status = *(u32 *)skb->cb; 268 bool hdr_trans; 269 u16 hdr_gap; 270 u16 seq_ctrl = 0; 271 __le16 fc = 0; 272 int idx; 273 274 memset(status, 0, sizeof(*status)); 275 276 if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) { 277 mphy = dev->mt76.phys[MT_BAND1]; 278 if (!mphy) 279 return -EINVAL; 280 281 phy = mphy->priv; 282 status->phy_idx = 1; 283 } 284 285 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 286 return -EINVAL; 287 288 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 289 return -EINVAL; 290 291 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 292 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 293 return -EINVAL; 294 295 /* ICV error or CCMP/BIP/WPI MIC error */ 296 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 297 status->flag |= RX_FLAG_ONLY_MONITOR; 298 299 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 300 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 301 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); 302 303 if (status->wcid) { 304 msta = container_of(status->wcid, struct mt7915_sta, wcid); 305 spin_lock_bh(&dev->sta_poll_lock); 306 if (list_empty(&msta->poll_list)) 307 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 308 spin_unlock_bh(&dev->sta_poll_lock); 309 } 310 311 status->freq = mphy->chandef.chan->center_freq; 312 status->band = mphy->chandef.chan->band; 313 if (status->band == NL80211_BAND_5GHZ) 314 sband = &mphy->sband_5g.sband; 315 else if (status->band == NL80211_BAND_6GHZ) 316 sband = &mphy->sband_6g.sband; 317 else 318 sband = &mphy->sband_2g.sband; 319 320 if (!sband->channels) 321 return -EINVAL; 322 323 if ((rxd0 & csum_mask) == csum_mask && 324 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 325 skb->ip_summed = CHECKSUM_UNNECESSARY; 326 327 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) 328 status->flag |= RX_FLAG_FAILED_FCS_CRC; 329 330 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 331 status->flag |= RX_FLAG_MMIC_ERROR; 332 333 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && 334 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 335 status->flag |= RX_FLAG_DECRYPTED; 336 status->flag |= RX_FLAG_IV_STRIPPED; 337 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 338 } 339 340 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 341 342 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 343 return -EINVAL; 344 345 rxd += 6; 346 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 347 u32 v0 = le32_to_cpu(rxd[0]); 348 u32 v2 = le32_to_cpu(rxd[2]); 349 350 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); 351 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); 352 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); 353 354 rxd += 4; 355 if ((u8 *)rxd - skb->data >= skb->len) 356 return -EINVAL; 357 } 358 359 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 360 u8 *data = (u8 *)rxd; 361 362 if (status->flag & RX_FLAG_DECRYPTED) { 363 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) { 364 case MT_CIPHER_AES_CCMP: 365 case MT_CIPHER_CCMP_CCX: 366 case MT_CIPHER_CCMP_256: 367 insert_ccmp_hdr = 368 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 369 fallthrough; 370 case MT_CIPHER_TKIP: 371 case MT_CIPHER_TKIP_NO_MIC: 372 case MT_CIPHER_GCMP: 373 case MT_CIPHER_GCMP_256: 374 status->iv[0] = data[5]; 375 status->iv[1] = data[4]; 376 status->iv[2] = data[3]; 377 status->iv[3] = data[2]; 378 status->iv[4] = data[1]; 379 status->iv[5] = data[0]; 380 break; 381 default: 382 break; 383 } 384 } 385 rxd += 4; 386 if ((u8 *)rxd - skb->data >= skb->len) 387 return -EINVAL; 388 } 389 390 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 391 status->timestamp = le32_to_cpu(rxd[0]); 392 status->flag |= RX_FLAG_MACTIME_START; 393 394 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 395 status->flag |= RX_FLAG_AMPDU_DETAILS; 396 397 /* all subframes of an A-MPDU have the same timestamp */ 398 if (phy->rx_ampdu_ts != status->timestamp) { 399 if (!++phy->ampdu_ref) 400 phy->ampdu_ref++; 401 } 402 phy->rx_ampdu_ts = status->timestamp; 403 404 status->ampdu_ref = phy->ampdu_ref; 405 } 406 407 rxd += 2; 408 if ((u8 *)rxd - skb->data >= skb->len) 409 return -EINVAL; 410 } 411 412 /* RXD Group 3 - P-RXV */ 413 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 414 u32 v0, v1; 415 int ret; 416 417 rxv = rxd; 418 rxd += 2; 419 if ((u8 *)rxd - skb->data >= skb->len) 420 return -EINVAL; 421 422 v0 = le32_to_cpu(rxv[0]); 423 v1 = le32_to_cpu(rxv[1]); 424 425 if (v0 & MT_PRXV_HT_AD_CODE) 426 status->enc_flags |= RX_ENC_FLAG_LDPC; 427 428 status->chains = mphy->antenna_mask; 429 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); 430 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); 431 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); 432 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); 433 434 /* RXD Group 5 - C-RXV */ 435 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 436 rxd += 18; 437 if ((u8 *)rxd - skb->data >= skb->len) 438 return -EINVAL; 439 } 440 441 if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) { 442 ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, 443 sband, rxv, &mode); 444 if (ret < 0) 445 return ret; 446 } 447 } 448 449 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 450 status->amsdu = !!amsdu_info; 451 if (status->amsdu) { 452 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 453 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 454 } 455 456 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 457 if (hdr_trans && ieee80211_has_morefrags(fc)) { 458 struct ieee80211_vif *vif; 459 int err; 460 461 if (!msta || !msta->vif) 462 return -EINVAL; 463 464 vif = container_of((void *)msta->vif, struct ieee80211_vif, 465 drv_priv); 466 err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap); 467 if (err) 468 return err; 469 470 hdr_trans = false; 471 } else { 472 int pad_start = 0; 473 474 skb_pull(skb, hdr_gap); 475 if (!hdr_trans && status->amsdu) { 476 pad_start = ieee80211_get_hdrlen_from_skb(skb); 477 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 478 /* 479 * When header translation failure is indicated, 480 * the hardware will insert an extra 2-byte field 481 * containing the data length after the protocol 482 * type field. This happens either when the LLC-SNAP 483 * pattern did not match, or if a VLAN header was 484 * detected. 485 */ 486 pad_start = 12; 487 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 488 pad_start += 4; 489 else 490 pad_start = 0; 491 } 492 493 if (pad_start) { 494 memmove(skb->data + 2, skb->data, pad_start); 495 skb_pull(skb, 2); 496 } 497 } 498 499 if (!hdr_trans) { 500 struct ieee80211_hdr *hdr; 501 502 if (insert_ccmp_hdr) { 503 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 504 505 mt76_insert_ccmp_hdr(skb, key_id); 506 } 507 508 hdr = mt76_skb_get_hdr(skb); 509 fc = hdr->frame_control; 510 if (ieee80211_is_data_qos(fc)) { 511 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 512 qos_ctl = *ieee80211_get_qos_ctl(hdr); 513 } 514 } else { 515 status->flag |= RX_FLAG_8023; 516 } 517 518 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 519 mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode); 520 521 if (!status->wcid || !ieee80211_is_data_qos(fc)) 522 return 0; 523 524 status->aggr = unicast && 525 !ieee80211_is_qos_nullfunc(fc); 526 status->qos_ctl = qos_ctl; 527 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 528 529 return 0; 530 } 531 532 static void 533 mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) 534 { 535 #ifdef CONFIG_NL80211_TESTMODE 536 struct mt7915_phy *phy = &dev->phy; 537 __le32 *rxd = (__le32 *)skb->data; 538 __le32 *rxv_hdr = rxd + 2; 539 __le32 *rxv = rxd + 4; 540 u32 rcpi, ib_rssi, wb_rssi, v20, v21; 541 u8 band_idx; 542 s32 foe; 543 u8 snr; 544 int i; 545 546 band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX); 547 if (band_idx && !phy->band_idx) { 548 phy = mt7915_ext_phy(dev); 549 if (!phy) 550 goto out; 551 } 552 553 rcpi = le32_to_cpu(rxv[6]); 554 ib_rssi = le32_to_cpu(rxv[7]); 555 wb_rssi = le32_to_cpu(rxv[8]) >> 5; 556 557 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) { 558 if (i == 3) 559 wb_rssi = le32_to_cpu(rxv[9]); 560 561 phy->test.last_rcpi[i] = rcpi & 0xff; 562 phy->test.last_ib_rssi[i] = ib_rssi & 0xff; 563 phy->test.last_wb_rssi[i] = wb_rssi & 0xff; 564 } 565 566 v20 = le32_to_cpu(rxv[20]); 567 v21 = le32_to_cpu(rxv[21]); 568 569 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) | 570 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT); 571 572 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16; 573 574 phy->test.last_freq_offset = foe; 575 phy->test.last_snr = snr; 576 out: 577 #endif 578 dev_kfree_skb(skb); 579 } 580 581 static void 582 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, 583 struct sk_buff *skb) 584 { 585 #ifdef CONFIG_NL80211_TESTMODE 586 struct mt76_testmode_data *td = &phy->mt76->test; 587 const struct ieee80211_rate *r; 588 u8 bw, mode, nss = td->tx_rate_nss; 589 u8 rate_idx = td->tx_rate_idx; 590 u16 rateval = 0; 591 u32 val; 592 bool cck = false; 593 int band; 594 595 if (skb != phy->mt76->test.tx_skb) 596 return; 597 598 switch (td->tx_rate_mode) { 599 case MT76_TM_TX_MODE_HT: 600 nss = 1 + (rate_idx >> 3); 601 mode = MT_PHY_TYPE_HT; 602 break; 603 case MT76_TM_TX_MODE_VHT: 604 mode = MT_PHY_TYPE_VHT; 605 break; 606 case MT76_TM_TX_MODE_HE_SU: 607 mode = MT_PHY_TYPE_HE_SU; 608 break; 609 case MT76_TM_TX_MODE_HE_EXT_SU: 610 mode = MT_PHY_TYPE_HE_EXT_SU; 611 break; 612 case MT76_TM_TX_MODE_HE_TB: 613 mode = MT_PHY_TYPE_HE_TB; 614 break; 615 case MT76_TM_TX_MODE_HE_MU: 616 mode = MT_PHY_TYPE_HE_MU; 617 break; 618 case MT76_TM_TX_MODE_CCK: 619 cck = true; 620 fallthrough; 621 case MT76_TM_TX_MODE_OFDM: 622 band = phy->mt76->chandef.chan->band; 623 if (band == NL80211_BAND_2GHZ && !cck) 624 rate_idx += 4; 625 626 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; 627 val = cck ? r->hw_value_short : r->hw_value; 628 629 mode = val >> 8; 630 rate_idx = val & 0xff; 631 break; 632 default: 633 mode = MT_PHY_TYPE_OFDM; 634 break; 635 } 636 637 switch (phy->mt76->chandef.width) { 638 case NL80211_CHAN_WIDTH_40: 639 bw = 1; 640 break; 641 case NL80211_CHAN_WIDTH_80: 642 bw = 2; 643 break; 644 case NL80211_CHAN_WIDTH_80P80: 645 case NL80211_CHAN_WIDTH_160: 646 bw = 3; 647 break; 648 default: 649 bw = 0; 650 break; 651 } 652 653 if (td->tx_rate_stbc && nss == 1) { 654 nss++; 655 rateval |= MT_TX_RATE_STBC; 656 } 657 658 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 659 FIELD_PREP(MT_TX_RATE_MODE, mode) | 660 FIELD_PREP(MT_TX_RATE_NSS, nss - 1); 661 662 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 663 664 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT); 665 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT) 666 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 667 668 val = MT_TXD6_FIXED_BW | 669 FIELD_PREP(MT_TXD6_BW, bw) | 670 FIELD_PREP(MT_TXD6_TX_RATE, rateval) | 671 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi); 672 673 /* for HE_SU/HE_EXT_SU PPDU 674 * - 1x, 2x, 4x LTF + 0.8us GI 675 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 676 * for HE_MU PPDU 677 * - 2x, 4x LTF + 0.8us GI 678 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 679 * for HE_TB PPDU 680 * - 1x, 2x LTF + 1.6us GI 681 * - 4x LTF + 3.2us GI 682 */ 683 if (mode >= MT_PHY_TYPE_HE_SU) 684 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); 685 686 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) 687 val |= MT_TXD6_LDPC; 688 689 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); 690 txwi[6] |= cpu_to_le32(val); 691 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 692 phy->test.spe_idx)); 693 #endif 694 } 695 696 void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, 697 struct sk_buff *skb, struct mt76_wcid *wcid, int pid, 698 struct ieee80211_key_conf *key, 699 enum mt76_txq_id qid, u32 changed) 700 { 701 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 702 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 703 struct mt76_phy *mphy = &dev->phy; 704 705 if (phy_idx && dev->phys[MT_BAND1]) 706 mphy = dev->phys[MT_BAND1]; 707 708 mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed); 709 710 if (mt76_testmode_enabled(mphy)) 711 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); 712 } 713 714 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 715 enum mt76_txq_id qid, struct mt76_wcid *wcid, 716 struct ieee80211_sta *sta, 717 struct mt76_tx_info *tx_info) 718 { 719 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 720 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 721 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 722 struct ieee80211_key_conf *key = info->control.hw_key; 723 struct ieee80211_vif *vif = info->control.vif; 724 struct mt76_connac_fw_txp *txp; 725 struct mt76_txwi_cache *t; 726 int id, i, nbuf = tx_info->nbuf - 1; 727 u8 *txwi = (u8 *)txwi_ptr; 728 int pid; 729 730 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 731 return -EINVAL; 732 733 if (!wcid) 734 wcid = &dev->mt76.global_wcid; 735 736 if (sta) { 737 struct mt7915_sta *msta; 738 739 msta = (struct mt7915_sta *)sta->drv_priv; 740 741 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 742 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 743 msta->jiffies = jiffies; 744 } 745 } 746 747 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 748 t->skb = tx_info->skb; 749 750 id = mt76_token_consume(mdev, &t); 751 if (id < 0) 752 return id; 753 754 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 755 mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key, 756 qid, 0); 757 758 txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE); 759 for (i = 0; i < nbuf; i++) { 760 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 761 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 762 } 763 txp->nbuf = nbuf; 764 765 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); 766 767 if (!key) 768 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 769 770 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 771 ieee80211_is_mgmt(hdr->frame_control)) 772 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 773 774 if (vif) { 775 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 776 777 txp->bss_idx = mvif->mt76.idx; 778 } 779 780 txp->token = cpu_to_le16(id); 781 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) 782 txp->rept_wds_wcid = cpu_to_le16(wcid->idx); 783 else 784 txp->rept_wds_wcid = cpu_to_le16(0x3ff); 785 tx_info->skb = DMA_DUMMY_DATA; 786 787 /* pass partial skb header to fw */ 788 tx_info->buf[1].len = MT_CT_PARSE_LEN; 789 tx_info->buf[1].skip_unmap = true; 790 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 791 792 return 0; 793 } 794 795 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 796 { 797 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 798 __le32 *txwi = ptr; 799 u32 val; 800 801 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 802 803 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 804 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 805 txwi[0] = cpu_to_le32(val); 806 807 val = MT_TXD1_LONG_FORMAT | 808 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 809 txwi[1] = cpu_to_le32(val); 810 811 txp->token = cpu_to_le16(token_id); 812 txp->nbuf = 1; 813 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 814 815 return MT_TXD_SIZE + sizeof(*txp); 816 } 817 818 static void 819 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 820 { 821 struct mt7915_sta *msta; 822 u16 fc, tid; 823 u32 val; 824 825 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 826 return; 827 828 tid = le32_get_bits(txwi[1], MT_TXD1_TID); 829 if (tid >= 6) /* skip VO queue */ 830 return; 831 832 val = le32_to_cpu(txwi[2]); 833 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 834 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 835 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 836 return; 837 838 msta = (struct mt7915_sta *)sta->drv_priv; 839 if (!test_and_set_bit(tid, &msta->ampdu_state)) 840 ieee80211_start_tx_ba_session(sta, tid, 0); 841 } 842 843 static void 844 mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, 845 struct ieee80211_sta *sta, struct list_head *free_list) 846 { 847 struct mt76_dev *mdev = &dev->mt76; 848 struct mt7915_sta *msta; 849 struct mt76_wcid *wcid; 850 __le32 *txwi; 851 u16 wcid_idx; 852 853 mt76_connac_txp_skb_unmap(mdev, t); 854 if (!t->skb) 855 goto out; 856 857 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 858 if (sta) { 859 wcid = (struct mt76_wcid *)sta->drv_priv; 860 wcid_idx = wcid->idx; 861 } else { 862 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); 863 wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]); 864 865 if (wcid && wcid->sta) { 866 msta = container_of(wcid, struct mt7915_sta, wcid); 867 sta = container_of((void *)msta, struct ieee80211_sta, 868 drv_priv); 869 spin_lock_bh(&dev->sta_poll_lock); 870 if (list_empty(&msta->poll_list)) 871 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 872 spin_unlock_bh(&dev->sta_poll_lock); 873 } 874 } 875 876 if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 877 mt7915_tx_check_aggr(sta, txwi); 878 879 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 880 881 out: 882 t->skb = NULL; 883 mt76_put_txwi(mdev, t); 884 } 885 886 static void 887 mt7915_mac_tx_free_prepare(struct mt7915_dev *dev) 888 { 889 struct mt76_dev *mdev = &dev->mt76; 890 struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1]; 891 892 /* clean DMA queues and unmap buffers first */ 893 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 894 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 895 if (mphy_ext) { 896 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); 897 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); 898 } 899 } 900 901 static void 902 mt7915_mac_tx_free_done(struct mt7915_dev *dev, 903 struct list_head *free_list, bool wake) 904 { 905 struct sk_buff *skb, *tmp; 906 907 mt7915_mac_sta_poll(dev); 908 909 if (wake) 910 mt76_set_tx_blocked(&dev->mt76, false); 911 912 mt76_worker_schedule(&dev->mt76.tx_worker); 913 914 list_for_each_entry_safe(skb, tmp, free_list, list) { 915 skb_list_del_init(skb); 916 napi_consume_skb(skb, 1); 917 } 918 } 919 920 static void 921 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) 922 { 923 struct mt76_connac_tx_free *free = data; 924 __le32 *tx_info = (__le32 *)(data + sizeof(*free)); 925 struct mt76_dev *mdev = &dev->mt76; 926 struct mt76_txwi_cache *txwi; 927 struct ieee80211_sta *sta = NULL; 928 LIST_HEAD(free_list); 929 void *end = data + len; 930 bool v3, wake = false; 931 u16 total, count = 0; 932 u32 txd = le32_to_cpu(free->txd); 933 __le32 *cur_info; 934 935 mt7915_mac_tx_free_prepare(dev); 936 937 total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT); 938 v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4); 939 940 for (cur_info = tx_info; count < total; cur_info++) { 941 u32 msdu, info; 942 u8 i; 943 944 if (WARN_ON_ONCE((void *)cur_info >= end)) 945 return; 946 947 /* 948 * 1'b1: new wcid pair. 949 * 1'b0: msdu_id with the same 'wcid pair' as above. 950 */ 951 info = le32_to_cpu(*cur_info); 952 if (info & MT_TX_FREE_PAIR) { 953 struct mt7915_sta *msta; 954 struct mt76_wcid *wcid; 955 u16 idx; 956 957 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 958 wcid = rcu_dereference(dev->mt76.wcid[idx]); 959 sta = wcid_to_sta(wcid); 960 if (!sta) 961 continue; 962 963 msta = container_of(wcid, struct mt7915_sta, wcid); 964 spin_lock_bh(&dev->sta_poll_lock); 965 if (list_empty(&msta->poll_list)) 966 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 967 spin_unlock_bh(&dev->sta_poll_lock); 968 continue; 969 } 970 971 if (v3 && (info & MT_TX_FREE_MPDU_HEADER)) 972 continue; 973 974 for (i = 0; i < 1 + v3; i++) { 975 if (v3) { 976 msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3; 977 if (msdu == MT_TX_FREE_MSDU_ID_V3) 978 continue; 979 } else { 980 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 981 } 982 count++; 983 txwi = mt76_token_release(mdev, msdu, &wake); 984 if (!txwi) 985 continue; 986 987 mt7915_txwi_free(dev, txwi, sta, &free_list); 988 } 989 } 990 991 mt7915_mac_tx_free_done(dev, &free_list, wake); 992 } 993 994 static void 995 mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len) 996 { 997 struct mt76_connac_tx_free *free = data; 998 __le16 *info = (__le16 *)(data + sizeof(*free)); 999 struct mt76_dev *mdev = &dev->mt76; 1000 void *end = data + len; 1001 LIST_HEAD(free_list); 1002 bool wake = false; 1003 u8 i, count; 1004 1005 mt7915_mac_tx_free_prepare(dev); 1006 1007 count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl)); 1008 if (WARN_ON_ONCE((void *)&info[count] > end)) 1009 return; 1010 1011 for (i = 0; i < count; i++) { 1012 struct mt76_txwi_cache *txwi; 1013 u16 msdu = le16_to_cpu(info[i]); 1014 1015 txwi = mt76_token_release(mdev, msdu, &wake); 1016 if (!txwi) 1017 continue; 1018 1019 mt7915_txwi_free(dev, txwi, NULL, &free_list); 1020 } 1021 1022 mt7915_mac_tx_free_done(dev, &free_list, wake); 1023 } 1024 1025 static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) 1026 { 1027 struct mt7915_sta *msta = NULL; 1028 struct mt76_wcid *wcid; 1029 __le32 *txs_data = data; 1030 u16 wcidx; 1031 u8 pid; 1032 1033 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) 1034 return; 1035 1036 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1037 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1038 1039 if (pid < MT_PACKET_ID_WED) 1040 return; 1041 1042 if (wcidx >= mt7915_wtbl_size(dev)) 1043 return; 1044 1045 rcu_read_lock(); 1046 1047 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1048 if (!wcid) 1049 goto out; 1050 1051 msta = container_of(wcid, struct mt7915_sta, wcid); 1052 1053 if (pid == MT_PACKET_ID_WED) 1054 mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data); 1055 else 1056 mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data); 1057 1058 if (!wcid->sta) 1059 goto out; 1060 1061 spin_lock_bh(&dev->sta_poll_lock); 1062 if (list_empty(&msta->poll_list)) 1063 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1064 spin_unlock_bh(&dev->sta_poll_lock); 1065 1066 out: 1067 rcu_read_unlock(); 1068 } 1069 1070 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) 1071 { 1072 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1073 __le32 *rxd = (__le32 *)data; 1074 __le32 *end = (__le32 *)&rxd[len / 4]; 1075 enum rx_pkt_type type; 1076 1077 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1078 1079 switch (type) { 1080 case PKT_TYPE_TXRX_NOTIFY: 1081 mt7915_mac_tx_free(dev, data, len); 1082 return false; 1083 case PKT_TYPE_TXRX_NOTIFY_V0: 1084 mt7915_mac_tx_free_v0(dev, data, len); 1085 return false; 1086 case PKT_TYPE_TXS: 1087 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1088 mt7915_mac_add_txs(dev, rxd); 1089 return false; 1090 case PKT_TYPE_RX_FW_MONITOR: 1091 mt7915_debugfs_rx_fw_monitor(dev, data, len); 1092 return false; 1093 default: 1094 return true; 1095 } 1096 } 1097 1098 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1099 struct sk_buff *skb) 1100 { 1101 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1102 __le32 *rxd = (__le32 *)skb->data; 1103 __le32 *end = (__le32 *)&skb->data[skb->len]; 1104 enum rx_pkt_type type; 1105 1106 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1107 1108 switch (type) { 1109 case PKT_TYPE_TXRX_NOTIFY: 1110 mt7915_mac_tx_free(dev, skb->data, skb->len); 1111 napi_consume_skb(skb, 1); 1112 break; 1113 case PKT_TYPE_TXRX_NOTIFY_V0: 1114 mt7915_mac_tx_free_v0(dev, skb->data, skb->len); 1115 napi_consume_skb(skb, 1); 1116 break; 1117 case PKT_TYPE_RX_EVENT: 1118 mt7915_mcu_rx_event(dev, skb); 1119 break; 1120 case PKT_TYPE_TXRXV: 1121 mt7915_mac_fill_rx_vector(dev, skb); 1122 break; 1123 case PKT_TYPE_TXS: 1124 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1125 mt7915_mac_add_txs(dev, rxd); 1126 dev_kfree_skb(skb); 1127 break; 1128 case PKT_TYPE_RX_FW_MONITOR: 1129 mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1130 dev_kfree_skb(skb); 1131 break; 1132 case PKT_TYPE_NORMAL: 1133 if (!mt7915_mac_fill_rx(dev, skb)) { 1134 mt76_rx(&dev->mt76, q, skb); 1135 return; 1136 } 1137 fallthrough; 1138 default: 1139 dev_kfree_skb(skb); 1140 break; 1141 } 1142 } 1143 1144 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) 1145 { 1146 struct mt7915_dev *dev = phy->dev; 1147 u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx); 1148 1149 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); 1150 mt76_set(dev, reg, BIT(11) | BIT(9)); 1151 } 1152 1153 void mt7915_mac_reset_counters(struct mt7915_phy *phy) 1154 { 1155 struct mt7915_dev *dev = phy->dev; 1156 int i; 1157 1158 for (i = 0; i < 4; i++) { 1159 mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1160 mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1161 } 1162 1163 phy->mt76->survey_time = ktime_get_boottime(); 1164 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 1165 1166 /* reset airtime counters */ 1167 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx), 1168 MT_WF_RMAC_MIB_RXTIME_CLR); 1169 1170 mt7915_mcu_get_chan_mib_info(phy, true); 1171 } 1172 1173 void mt7915_mac_set_timing(struct mt7915_phy *phy) 1174 { 1175 s16 coverage_class = phy->coverage_class; 1176 struct mt7915_dev *dev = phy->dev; 1177 struct mt7915_phy *ext_phy = mt7915_ext_phy(dev); 1178 u32 val, reg_offset; 1179 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1180 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1181 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1182 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1183 int eifs_ofdm = 360, sifs = 10, offset; 1184 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ); 1185 1186 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1187 return; 1188 1189 if (ext_phy) 1190 coverage_class = max_t(s16, dev->phy.coverage_class, 1191 ext_phy->coverage_class); 1192 1193 mt76_set(dev, MT_ARB_SCR(phy->band_idx), 1194 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1195 udelay(1); 1196 1197 offset = 3 * coverage_class; 1198 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1199 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1200 1201 if (!is_mt7915(&dev->mt76)) { 1202 if (!a_band) { 1203 mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx), 1204 FIELD_PREP(MT_IFS_EIFS_CCK, 314)); 1205 eifs_ofdm = 78; 1206 } else { 1207 eifs_ofdm = 84; 1208 } 1209 } else if (a_band) { 1210 sifs = 16; 1211 } 1212 1213 mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset); 1214 mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset); 1215 mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx), 1216 FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) | 1217 FIELD_PREP(MT_IFS_RIFS, 2) | 1218 FIELD_PREP(MT_IFS_SIFS, sifs) | 1219 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1220 1221 if (phy->slottime < 20 || a_band) 1222 val = MT7915_CFEND_RATE_DEFAULT; 1223 else 1224 val = MT7915_CFEND_RATE_11B; 1225 1226 mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val); 1227 mt76_clear(dev, MT_ARB_SCR(phy->band_idx), 1228 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1229 } 1230 1231 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) 1232 { 1233 u32 reg; 1234 1235 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) : 1236 MT_WF_PHY_RXTD12_MT7916(ext_phy); 1237 mt76_set(dev, reg, 1238 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | 1239 MT_WF_PHY_RXTD12_IRPI_SW_CLR); 1240 1241 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) : 1242 MT_WF_PHY_RX_CTRL1_MT7916(ext_phy); 1243 mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); 1244 } 1245 1246 static u8 1247 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) 1248 { 1249 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1250 struct mt7915_dev *dev = phy->dev; 1251 u32 val, sum = 0, n = 0; 1252 int nss, i; 1253 1254 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) { 1255 u32 reg = is_mt7915(&dev->mt76) ? 1256 MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) : 1257 MT_WF_IRPI_NSS_MT7916(idx, nss); 1258 1259 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1260 val = mt76_rr(dev, reg); 1261 sum += val * nf_power[i]; 1262 n += val; 1263 } 1264 } 1265 1266 if (!n) 1267 return 0; 1268 1269 return sum / n; 1270 } 1271 1272 void mt7915_update_channel(struct mt76_phy *mphy) 1273 { 1274 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv; 1275 struct mt76_channel_state *state = mphy->chan_state; 1276 int nf; 1277 1278 mt7915_mcu_get_chan_mib_info(phy, false); 1279 1280 nf = mt7915_phy_get_nf(phy, phy->band_idx); 1281 if (!phy->noise) 1282 phy->noise = nf << 4; 1283 else if (nf) 1284 phy->noise += nf - (phy->noise >> 4); 1285 1286 state->noise = -(phy->noise >> 4); 1287 } 1288 1289 static bool 1290 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state) 1291 { 1292 bool ret; 1293 1294 ret = wait_event_timeout(dev->reset_wait, 1295 (READ_ONCE(dev->recovery.state) & state), 1296 MT7915_RESET_TIMEOUT); 1297 1298 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1299 return ret; 1300 } 1301 1302 static void 1303 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1304 { 1305 struct ieee80211_hw *hw = priv; 1306 1307 switch (vif->type) { 1308 case NL80211_IFTYPE_MESH_POINT: 1309 case NL80211_IFTYPE_ADHOC: 1310 case NL80211_IFTYPE_AP: 1311 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon, 1312 BSS_CHANGED_BEACON_ENABLED); 1313 break; 1314 default: 1315 break; 1316 } 1317 } 1318 1319 static void 1320 mt7915_update_beacons(struct mt7915_dev *dev) 1321 { 1322 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1323 1324 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1325 IEEE80211_IFACE_ITER_RESUME_ALL, 1326 mt7915_update_vif_beacon, dev->mt76.hw); 1327 1328 if (!mphy_ext) 1329 return; 1330 1331 ieee80211_iterate_active_interfaces(mphy_ext->hw, 1332 IEEE80211_IFACE_ITER_RESUME_ALL, 1333 mt7915_update_vif_beacon, mphy_ext->hw); 1334 } 1335 1336 void mt7915_tx_token_put(struct mt7915_dev *dev) 1337 { 1338 struct mt76_txwi_cache *txwi; 1339 int id; 1340 1341 spin_lock_bh(&dev->mt76.token_lock); 1342 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1343 mt7915_txwi_free(dev, txwi, NULL, NULL); 1344 dev->mt76.token_count--; 1345 } 1346 spin_unlock_bh(&dev->mt76.token_lock); 1347 idr_destroy(&dev->mt76.token); 1348 } 1349 1350 static int 1351 mt7915_mac_restart(struct mt7915_dev *dev) 1352 { 1353 struct mt7915_phy *phy2; 1354 struct mt76_phy *ext_phy; 1355 struct mt76_dev *mdev = &dev->mt76; 1356 int i, ret; 1357 1358 ext_phy = dev->mt76.phys[MT_BAND1]; 1359 phy2 = ext_phy ? ext_phy->priv : NULL; 1360 1361 if (dev->hif2) { 1362 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 1363 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1364 } 1365 1366 if (dev_is_pci(mdev->dev)) { 1367 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1368 if (dev->hif2) 1369 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 1370 } 1371 1372 set_bit(MT76_RESET, &dev->mphy.state); 1373 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1374 wake_up(&dev->mt76.mcu.wait); 1375 if (ext_phy) { 1376 set_bit(MT76_RESET, &ext_phy->state); 1377 set_bit(MT76_MCU_RESET, &ext_phy->state); 1378 } 1379 1380 /* lock/unlock all queues to ensure that no tx is pending */ 1381 mt76_txq_schedule_all(&dev->mphy); 1382 if (ext_phy) 1383 mt76_txq_schedule_all(ext_phy); 1384 1385 /* disable all tx/rx napi */ 1386 mt76_worker_disable(&dev->mt76.tx_worker); 1387 mt76_for_each_q_rx(mdev, i) { 1388 if (mdev->q_rx[i].ndesc) 1389 napi_disable(&dev->mt76.napi[i]); 1390 } 1391 napi_disable(&dev->mt76.tx_napi); 1392 1393 /* token reinit */ 1394 mt7915_tx_token_put(dev); 1395 idr_init(&dev->mt76.token); 1396 1397 mt7915_dma_reset(dev, true); 1398 1399 local_bh_disable(); 1400 mt76_for_each_q_rx(mdev, i) { 1401 if (mdev->q_rx[i].ndesc) { 1402 napi_enable(&dev->mt76.napi[i]); 1403 napi_schedule(&dev->mt76.napi[i]); 1404 } 1405 } 1406 local_bh_enable(); 1407 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1408 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 1409 1410 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 1411 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 1412 1413 if (dev->hif2) { 1414 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 1415 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1416 } 1417 if (dev_is_pci(mdev->dev)) { 1418 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1419 if (dev->hif2) 1420 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 1421 } 1422 1423 /* load firmware */ 1424 ret = mt7915_mcu_init_firmware(dev); 1425 if (ret) 1426 goto out; 1427 1428 /* set the necessary init items */ 1429 ret = mt7915_mcu_set_eeprom(dev); 1430 if (ret) 1431 goto out; 1432 1433 mt7915_mac_init(dev); 1434 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband); 1435 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband); 1436 ret = mt7915_txbf_init(dev); 1437 1438 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) { 1439 ret = mt7915_run(dev->mphy.hw); 1440 if (ret) 1441 goto out; 1442 } 1443 1444 if (ext_phy && test_bit(MT76_STATE_RUNNING, &ext_phy->state)) { 1445 ret = mt7915_run(ext_phy->hw); 1446 if (ret) 1447 goto out; 1448 } 1449 1450 out: 1451 /* reset done */ 1452 clear_bit(MT76_RESET, &dev->mphy.state); 1453 if (phy2) 1454 clear_bit(MT76_RESET, &phy2->mt76->state); 1455 1456 local_bh_disable(); 1457 napi_enable(&dev->mt76.tx_napi); 1458 napi_schedule(&dev->mt76.tx_napi); 1459 local_bh_enable(); 1460 1461 mt76_worker_enable(&dev->mt76.tx_worker); 1462 1463 return ret; 1464 } 1465 1466 static void 1467 mt7915_mac_full_reset(struct mt7915_dev *dev) 1468 { 1469 struct mt76_phy *ext_phy; 1470 int i; 1471 1472 ext_phy = dev->mt76.phys[MT_BAND1]; 1473 1474 dev->recovery.hw_full_reset = true; 1475 1476 wake_up(&dev->mt76.mcu.wait); 1477 ieee80211_stop_queues(mt76_hw(dev)); 1478 if (ext_phy) 1479 ieee80211_stop_queues(ext_phy->hw); 1480 1481 cancel_delayed_work_sync(&dev->mphy.mac_work); 1482 if (ext_phy) 1483 cancel_delayed_work_sync(&ext_phy->mac_work); 1484 1485 mutex_lock(&dev->mt76.mutex); 1486 for (i = 0; i < 10; i++) { 1487 if (!mt7915_mac_restart(dev)) 1488 break; 1489 } 1490 mutex_unlock(&dev->mt76.mutex); 1491 1492 if (i == 10) 1493 dev_err(dev->mt76.dev, "chip full reset failed\n"); 1494 1495 ieee80211_restart_hw(mt76_hw(dev)); 1496 if (ext_phy) 1497 ieee80211_restart_hw(ext_phy->hw); 1498 1499 ieee80211_wake_queues(mt76_hw(dev)); 1500 if (ext_phy) 1501 ieee80211_wake_queues(ext_phy->hw); 1502 1503 dev->recovery.hw_full_reset = false; 1504 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1505 MT7915_WATCHDOG_TIME); 1506 if (ext_phy) 1507 ieee80211_queue_delayed_work(ext_phy->hw, 1508 &ext_phy->mac_work, 1509 MT7915_WATCHDOG_TIME); 1510 } 1511 1512 /* system error recovery */ 1513 void mt7915_mac_reset_work(struct work_struct *work) 1514 { 1515 struct mt7915_phy *phy2; 1516 struct mt76_phy *ext_phy; 1517 struct mt7915_dev *dev; 1518 int i; 1519 1520 dev = container_of(work, struct mt7915_dev, reset_work); 1521 ext_phy = dev->mt76.phys[MT_BAND1]; 1522 phy2 = ext_phy ? ext_phy->priv : NULL; 1523 1524 /* chip full reset */ 1525 if (dev->recovery.restart) { 1526 /* disable WA/WM WDT */ 1527 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 1528 MT_MCU_CMD_WDT_MASK); 1529 1530 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 1531 dev->recovery.wa_reset_count++; 1532 else 1533 dev->recovery.wm_reset_count++; 1534 1535 mt7915_mac_full_reset(dev); 1536 1537 /* enable mcu irq */ 1538 mt7915_irq_enable(dev, MT_INT_MCU_CMD); 1539 mt7915_irq_disable(dev, 0); 1540 1541 /* enable WA/WM WDT */ 1542 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 1543 1544 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 1545 dev->recovery.restart = false; 1546 return; 1547 } 1548 1549 /* chip partial reset */ 1550 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 1551 return; 1552 1553 ieee80211_stop_queues(mt76_hw(dev)); 1554 if (ext_phy) 1555 ieee80211_stop_queues(ext_phy->hw); 1556 1557 set_bit(MT76_RESET, &dev->mphy.state); 1558 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1559 wake_up(&dev->mt76.mcu.wait); 1560 cancel_delayed_work_sync(&dev->mphy.mac_work); 1561 if (phy2) { 1562 set_bit(MT76_RESET, &phy2->mt76->state); 1563 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1564 } 1565 mt76_worker_disable(&dev->mt76.tx_worker); 1566 mt76_for_each_q_rx(&dev->mt76, i) 1567 napi_disable(&dev->mt76.napi[i]); 1568 napi_disable(&dev->mt76.tx_napi); 1569 1570 mutex_lock(&dev->mt76.mutex); 1571 1572 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1573 1574 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1575 mt7915_dma_reset(dev, false); 1576 1577 mt7915_tx_token_put(dev); 1578 idr_init(&dev->mt76.token); 1579 1580 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1581 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1582 } 1583 1584 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1585 clear_bit(MT76_RESET, &dev->mphy.state); 1586 if (phy2) 1587 clear_bit(MT76_RESET, &phy2->mt76->state); 1588 1589 local_bh_disable(); 1590 mt76_for_each_q_rx(&dev->mt76, i) { 1591 napi_enable(&dev->mt76.napi[i]); 1592 napi_schedule(&dev->mt76.napi[i]); 1593 } 1594 local_bh_enable(); 1595 1596 tasklet_schedule(&dev->irq_tasklet); 1597 1598 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1599 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1600 1601 mt76_worker_enable(&dev->mt76.tx_worker); 1602 1603 local_bh_disable(); 1604 napi_enable(&dev->mt76.tx_napi); 1605 napi_schedule(&dev->mt76.tx_napi); 1606 local_bh_enable(); 1607 1608 ieee80211_wake_queues(mt76_hw(dev)); 1609 if (ext_phy) 1610 ieee80211_wake_queues(ext_phy->hw); 1611 1612 mutex_unlock(&dev->mt76.mutex); 1613 1614 mt7915_update_beacons(dev); 1615 1616 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1617 MT7915_WATCHDOG_TIME); 1618 if (phy2) 1619 ieee80211_queue_delayed_work(ext_phy->hw, 1620 &phy2->mt76->mac_work, 1621 MT7915_WATCHDOG_TIME); 1622 } 1623 1624 /* firmware coredump */ 1625 void mt7915_mac_dump_work(struct work_struct *work) 1626 { 1627 const struct mt7915_mem_region *mem_region; 1628 struct mt7915_crash_data *crash_data; 1629 struct mt7915_dev *dev; 1630 struct mt7915_mem_hdr *hdr; 1631 size_t buf_len; 1632 int i; 1633 u32 num; 1634 u8 *buf; 1635 1636 dev = container_of(work, struct mt7915_dev, dump_work); 1637 1638 mutex_lock(&dev->dump_mutex); 1639 1640 crash_data = mt7915_coredump_new(dev); 1641 if (!crash_data) { 1642 mutex_unlock(&dev->dump_mutex); 1643 goto skip_coredump; 1644 } 1645 1646 mem_region = mt7915_coredump_get_mem_layout(dev, &num); 1647 if (!mem_region || !crash_data->memdump_buf_len) { 1648 mutex_unlock(&dev->dump_mutex); 1649 goto skip_memdump; 1650 } 1651 1652 buf = crash_data->memdump_buf; 1653 buf_len = crash_data->memdump_buf_len; 1654 1655 /* dumping memory content... */ 1656 memset(buf, 0, buf_len); 1657 for (i = 0; i < num; i++) { 1658 if (mem_region->len > buf_len) { 1659 dev_warn(dev->mt76.dev, "%s len %lu is too large\n", 1660 mem_region->name, 1661 (unsigned long)mem_region->len); 1662 break; 1663 } 1664 1665 /* reserve space for the header */ 1666 hdr = (void *)buf; 1667 buf += sizeof(*hdr); 1668 buf_len -= sizeof(*hdr); 1669 1670 mt7915_memcpy_fromio(dev, buf, mem_region->start, 1671 mem_region->len); 1672 1673 hdr->start = mem_region->start; 1674 hdr->len = mem_region->len; 1675 1676 if (!mem_region->len) 1677 /* note: the header remains, just with zero length */ 1678 break; 1679 1680 buf += mem_region->len; 1681 buf_len -= mem_region->len; 1682 1683 mem_region++; 1684 } 1685 1686 mutex_unlock(&dev->dump_mutex); 1687 1688 skip_memdump: 1689 mt7915_coredump_submit(dev); 1690 skip_coredump: 1691 queue_work(dev->mt76.wq, &dev->reset_work); 1692 } 1693 1694 void mt7915_reset(struct mt7915_dev *dev) 1695 { 1696 if (!dev->recovery.hw_init_done) 1697 return; 1698 1699 if (dev->recovery.hw_full_reset) 1700 return; 1701 1702 /* wm/wa exception: do full recovery */ 1703 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 1704 dev->recovery.restart = true; 1705 dev_info(dev->mt76.dev, 1706 "%s indicated firmware crash, attempting recovery\n", 1707 wiphy_name(dev->mt76.hw->wiphy)); 1708 1709 mt7915_irq_disable(dev, MT_INT_MCU_CMD); 1710 queue_work(dev->mt76.wq, &dev->dump_work); 1711 return; 1712 } 1713 1714 queue_work(dev->mt76.wq, &dev->reset_work); 1715 wake_up(&dev->reset_wait); 1716 } 1717 1718 void mt7915_mac_update_stats(struct mt7915_phy *phy) 1719 { 1720 struct mt7915_dev *dev = phy->dev; 1721 struct mib_stats *mib = &phy->mib; 1722 int i, aggr0 = 0, aggr1, cnt; 1723 u32 val; 1724 1725 cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx)); 1726 mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? 1727 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) : 1728 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt); 1729 1730 cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx)); 1731 mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt); 1732 1733 cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx)); 1734 mib->rx_mpdu_cnt += cnt; 1735 1736 cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx)); 1737 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 1738 1739 cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx)); 1740 mib->rx_vector_mismatch_cnt += 1741 FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt); 1742 1743 cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx)); 1744 mib->rx_delimiter_fail_cnt += 1745 FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt); 1746 1747 cnt = mt76_rr(dev, MT_MIB_SDR10(phy->band_idx)); 1748 mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ? 1749 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) : 1750 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt); 1751 1752 cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx)); 1753 mib->rx_len_mismatch_cnt += 1754 FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt); 1755 1756 cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx)); 1757 mib->tx_ampdu_cnt += cnt; 1758 1759 cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx)); 1760 mib->tx_stop_q_empty_cnt += 1761 FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt); 1762 1763 cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx)); 1764 mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ? 1765 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) : 1766 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt); 1767 1768 cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx)); 1769 mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ? 1770 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) : 1771 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt); 1772 1773 cnt = mt76_rr(dev, MT_MIB_SDR16(phy->band_idx)); 1774 mib->primary_cca_busy_time += 1775 FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt); 1776 1777 cnt = mt76_rr(dev, MT_MIB_SDR17(phy->band_idx)); 1778 mib->secondary_cca_busy_time += 1779 FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt); 1780 1781 cnt = mt76_rr(dev, MT_MIB_SDR18(phy->band_idx)); 1782 mib->primary_energy_detect_time += 1783 FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt); 1784 1785 cnt = mt76_rr(dev, MT_MIB_SDR19(phy->band_idx)); 1786 mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt); 1787 1788 cnt = mt76_rr(dev, MT_MIB_SDR20(phy->band_idx)); 1789 mib->ofdm_mdrdy_time += 1790 FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt); 1791 1792 cnt = mt76_rr(dev, MT_MIB_SDR21(phy->band_idx)); 1793 mib->green_mdrdy_time += 1794 FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt); 1795 1796 cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx)); 1797 mib->rx_ampdu_cnt += cnt; 1798 1799 cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx)); 1800 mib->rx_ampdu_bytes_cnt += cnt; 1801 1802 cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx)); 1803 mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ? 1804 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) : 1805 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt); 1806 1807 cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx)); 1808 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 1809 1810 cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx)); 1811 mib->tx_rwp_fail_cnt += 1812 FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt); 1813 1814 cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx)); 1815 mib->tx_rwp_need_cnt += 1816 FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt); 1817 1818 cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx)); 1819 mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ? 1820 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) : 1821 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt); 1822 1823 cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx)); 1824 mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ? 1825 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) : 1826 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt); 1827 1828 cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx)); 1829 mib->rx_ba_cnt += cnt; 1830 1831 cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx)); 1832 mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt); 1833 1834 cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx)); 1835 mib->tx_mu_mpdu_cnt += cnt; 1836 1837 cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx)); 1838 mib->tx_mu_acked_mpdu_cnt += cnt; 1839 1840 cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx)); 1841 mib->tx_su_acked_mpdu_cnt += cnt; 1842 1843 cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx)); 1844 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt); 1845 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt); 1846 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt); 1847 1848 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 1849 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 1850 mib->tx_amsdu[i] += cnt; 1851 mib->tx_amsdu_cnt += cnt; 1852 } 1853 1854 if (is_mt7915(&dev->mt76)) { 1855 for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) { 1856 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4))); 1857 mib->ba_miss_cnt += 1858 FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1859 mib->ack_fail_cnt += 1860 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1861 1862 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4))); 1863 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1864 mib->rts_retries_cnt += 1865 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1866 1867 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1868 phy->mt76->aggr_stats[aggr0++] += val & 0xffff; 1869 phy->mt76->aggr_stats[aggr0++] += val >> 16; 1870 1871 val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1872 phy->mt76->aggr_stats[aggr1++] += val & 0xffff; 1873 phy->mt76->aggr_stats[aggr1++] += val >> 16; 1874 } 1875 1876 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1877 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1878 1879 cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx)); 1880 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt); 1881 1882 cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx)); 1883 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt); 1884 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt); 1885 1886 cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx)); 1887 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt); 1888 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt); 1889 1890 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx)); 1891 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt); 1892 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt); 1893 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt); 1894 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt); 1895 } else { 1896 for (i = 0; i < 2; i++) { 1897 /* rts count */ 1898 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2))); 1899 mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val); 1900 mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val); 1901 1902 /* rts retry count */ 1903 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2))); 1904 mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val); 1905 mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val); 1906 1907 /* ba miss count */ 1908 val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2))); 1909 mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val); 1910 mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val); 1911 1912 /* ack fail count */ 1913 val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2))); 1914 mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val); 1915 mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val); 1916 } 1917 1918 for (i = 0; i < 8; i++) { 1919 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1920 phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val); 1921 phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val); 1922 } 1923 1924 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1925 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1926 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1927 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1928 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1929 1930 cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx)); 1931 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt); 1932 1933 cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx)); 1934 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt); 1935 1936 cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx)); 1937 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1938 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1939 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1940 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1941 1942 cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx)); 1943 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1944 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1945 } 1946 } 1947 1948 static void mt7915_mac_severe_check(struct mt7915_phy *phy) 1949 { 1950 struct mt7915_dev *dev = phy->dev; 1951 bool ext_phy = phy != &dev->phy; 1952 u32 trb; 1953 1954 if (!phy->omac_mask) 1955 return; 1956 1957 /* In rare cases, TRB pointers might be out of sync leads to RMAC 1958 * stopping Rx, so check status periodically to see if TRB hardware 1959 * requires minimal recovery. 1960 */ 1961 trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx)); 1962 1963 if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) != 1964 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) && 1965 (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) != 1966 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) && 1967 trb == phy->trb_ts) 1968 mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT, 1969 ext_phy); 1970 1971 phy->trb_ts = trb; 1972 } 1973 1974 void mt7915_mac_sta_rc_work(struct work_struct *work) 1975 { 1976 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); 1977 struct ieee80211_sta *sta; 1978 struct ieee80211_vif *vif; 1979 struct mt7915_sta *msta; 1980 u32 changed; 1981 LIST_HEAD(list); 1982 1983 spin_lock_bh(&dev->sta_poll_lock); 1984 list_splice_init(&dev->sta_rc_list, &list); 1985 1986 while (!list_empty(&list)) { 1987 msta = list_first_entry(&list, struct mt7915_sta, rc_list); 1988 list_del_init(&msta->rc_list); 1989 changed = msta->changed; 1990 msta->changed = 0; 1991 spin_unlock_bh(&dev->sta_poll_lock); 1992 1993 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 1994 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 1995 1996 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 1997 IEEE80211_RC_NSS_CHANGED | 1998 IEEE80211_RC_BW_CHANGED)) 1999 mt7915_mcu_add_rate_ctrl(dev, vif, sta, true); 2000 2001 if (changed & IEEE80211_RC_SMPS_CHANGED) 2002 mt7915_mcu_add_smps(dev, vif, sta); 2003 2004 spin_lock_bh(&dev->sta_poll_lock); 2005 } 2006 2007 spin_unlock_bh(&dev->sta_poll_lock); 2008 } 2009 2010 void mt7915_mac_work(struct work_struct *work) 2011 { 2012 struct mt7915_phy *phy; 2013 struct mt76_phy *mphy; 2014 2015 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2016 mac_work.work); 2017 phy = mphy->priv; 2018 2019 mutex_lock(&mphy->dev->mutex); 2020 2021 mt76_update_survey(mphy); 2022 if (++mphy->mac_work_count == 5) { 2023 mphy->mac_work_count = 0; 2024 2025 mt7915_mac_update_stats(phy); 2026 mt7915_mac_severe_check(phy); 2027 } 2028 2029 mutex_unlock(&mphy->dev->mutex); 2030 2031 mt76_tx_status_check(mphy->dev, false); 2032 2033 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2034 MT7915_WATCHDOG_TIME); 2035 } 2036 2037 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) 2038 { 2039 struct mt7915_dev *dev = phy->dev; 2040 2041 if (phy->rdd_state & BIT(0)) 2042 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, 2043 MT_RX_SEL0, 0); 2044 if (phy->rdd_state & BIT(1)) 2045 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, 2046 MT_RX_SEL0, 0); 2047 } 2048 2049 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) 2050 { 2051 int err, region; 2052 2053 switch (dev->mt76.region) { 2054 case NL80211_DFS_ETSI: 2055 region = 0; 2056 break; 2057 case NL80211_DFS_JP: 2058 region = 2; 2059 break; 2060 case NL80211_DFS_FCC: 2061 default: 2062 region = 1; 2063 break; 2064 } 2065 2066 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, 2067 MT_RX_SEL0, region); 2068 if (err < 0) 2069 return err; 2070 2071 if (is_mt7915(&dev->mt76)) { 2072 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain, 2073 0, dev->dbdc_support ? 2 : 0); 2074 if (err < 0) 2075 return err; 2076 } 2077 2078 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, 2079 MT_RX_SEL0, 1); 2080 } 2081 2082 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) 2083 { 2084 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2085 struct mt7915_dev *dev = phy->dev; 2086 int err; 2087 2088 /* start CAC */ 2089 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx, 2090 MT_RX_SEL0, 0); 2091 if (err < 0) 2092 return err; 2093 2094 err = mt7915_dfs_start_rdd(dev, phy->band_idx); 2095 if (err < 0) 2096 return err; 2097 2098 phy->rdd_state |= BIT(phy->band_idx); 2099 2100 if (!is_mt7915(&dev->mt76)) 2101 return 0; 2102 2103 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2104 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2105 err = mt7915_dfs_start_rdd(dev, 1); 2106 if (err < 0) 2107 return err; 2108 2109 phy->rdd_state |= BIT(1); 2110 } 2111 2112 return 0; 2113 } 2114 2115 static int 2116 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy) 2117 { 2118 const struct mt7915_dfs_radar_spec *radar_specs; 2119 struct mt7915_dev *dev = phy->dev; 2120 int err, i; 2121 2122 switch (dev->mt76.region) { 2123 case NL80211_DFS_FCC: 2124 radar_specs = &fcc_radar_specs; 2125 err = mt7915_mcu_set_fcc5_lpn(dev, 8); 2126 if (err < 0) 2127 return err; 2128 break; 2129 case NL80211_DFS_ETSI: 2130 radar_specs = &etsi_radar_specs; 2131 break; 2132 case NL80211_DFS_JP: 2133 radar_specs = &jp_radar_specs; 2134 break; 2135 default: 2136 return -EINVAL; 2137 } 2138 2139 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2140 err = mt7915_mcu_set_radar_th(dev, i, 2141 &radar_specs->radar_pattern[i]); 2142 if (err < 0) 2143 return err; 2144 } 2145 2146 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2147 } 2148 2149 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) 2150 { 2151 struct mt7915_dev *dev = phy->dev; 2152 enum mt76_dfs_state dfs_state, prev_state; 2153 int err; 2154 2155 prev_state = phy->mt76->dfs_state; 2156 dfs_state = mt76_phy_dfs_state(phy->mt76); 2157 2158 if (prev_state == dfs_state) 2159 return 0; 2160 2161 if (prev_state == MT_DFS_STATE_UNKNOWN) 2162 mt7915_dfs_stop_radar_detector(phy); 2163 2164 if (dfs_state == MT_DFS_STATE_DISABLED) 2165 goto stop; 2166 2167 if (prev_state <= MT_DFS_STATE_DISABLED) { 2168 err = mt7915_dfs_init_radar_specs(phy); 2169 if (err < 0) 2170 return err; 2171 2172 err = mt7915_dfs_start_radar_detector(phy); 2173 if (err < 0) 2174 return err; 2175 2176 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2177 } 2178 2179 if (dfs_state == MT_DFS_STATE_CAC) 2180 return 0; 2181 2182 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, 2183 phy->band_idx, MT_RX_SEL0, 0); 2184 if (err < 0) { 2185 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2186 return err; 2187 } 2188 2189 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2190 return 0; 2191 2192 stop: 2193 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, 2194 phy->band_idx, MT_RX_SEL0, 0); 2195 if (err < 0) 2196 return err; 2197 2198 if (is_mt7915(&dev->mt76)) { 2199 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, 2200 phy->band_idx, 0, 2201 dev->dbdc_support ? 2 : 0); 2202 if (err < 0) 2203 return err; 2204 } 2205 2206 mt7915_dfs_stop_radar_detector(phy); 2207 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2208 2209 return 0; 2210 } 2211 2212 static int 2213 mt7915_mac_twt_duration_align(int duration) 2214 { 2215 return duration << 8; 2216 } 2217 2218 static u64 2219 mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev, 2220 struct mt7915_twt_flow *flow) 2221 { 2222 struct mt7915_twt_flow *iter, *iter_next; 2223 u32 duration = flow->duration << 8; 2224 u64 start_tsf; 2225 2226 iter = list_first_entry_or_null(&dev->twt_list, 2227 struct mt7915_twt_flow, list); 2228 if (!iter || !iter->sched || iter->start_tsf > duration) { 2229 /* add flow as first entry in the list */ 2230 list_add(&flow->list, &dev->twt_list); 2231 return 0; 2232 } 2233 2234 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2235 start_tsf = iter->start_tsf + 2236 mt7915_mac_twt_duration_align(iter->duration); 2237 if (list_is_last(&iter->list, &dev->twt_list)) 2238 break; 2239 2240 if (!iter_next->sched || 2241 iter_next->start_tsf > start_tsf + duration) { 2242 list_add(&flow->list, &iter->list); 2243 goto out; 2244 } 2245 } 2246 2247 /* add flow as last entry in the list */ 2248 list_add_tail(&flow->list, &dev->twt_list); 2249 out: 2250 return start_tsf; 2251 } 2252 2253 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2254 { 2255 struct ieee80211_twt_params *twt_agrt; 2256 u64 interval, duration; 2257 u16 mantissa; 2258 u8 exp; 2259 2260 /* only individual agreement supported */ 2261 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2262 return -EOPNOTSUPP; 2263 2264 /* only 256us unit supported */ 2265 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2266 return -EOPNOTSUPP; 2267 2268 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2269 2270 /* explicit agreement not supported */ 2271 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2272 return -EOPNOTSUPP; 2273 2274 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2275 le16_to_cpu(twt_agrt->req_type)); 2276 mantissa = le16_to_cpu(twt_agrt->mantissa); 2277 duration = twt_agrt->min_twt_dur << 8; 2278 2279 interval = (u64)mantissa << exp; 2280 if (interval < duration) 2281 return -EOPNOTSUPP; 2282 2283 return 0; 2284 } 2285 2286 static bool 2287 mt7915_mac_twt_param_equal(struct mt7915_sta *msta, 2288 struct ieee80211_twt_params *twt_agrt) 2289 { 2290 u16 type = le16_to_cpu(twt_agrt->req_type); 2291 u8 exp; 2292 int i; 2293 2294 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2295 for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) { 2296 struct mt7915_twt_flow *f; 2297 2298 if (!(msta->twt.flowid_mask & BIT(i))) 2299 continue; 2300 2301 f = &msta->twt.flow[i]; 2302 if (f->duration == twt_agrt->min_twt_dur && 2303 f->mantissa == twt_agrt->mantissa && 2304 f->exp == exp && 2305 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2306 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2307 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2308 return true; 2309 } 2310 2311 return false; 2312 } 2313 2314 void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, 2315 struct ieee80211_sta *sta, 2316 struct ieee80211_twt_setup *twt) 2317 { 2318 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2319 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 2320 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2321 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2322 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2323 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2324 struct mt7915_twt_flow *flow; 2325 int flowid, table_id; 2326 u8 exp; 2327 2328 if (mt7915_mac_check_twt_req(twt)) 2329 goto out; 2330 2331 mutex_lock(&dev->mt76.mutex); 2332 2333 if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT) 2334 goto unlock; 2335 2336 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2337 goto unlock; 2338 2339 if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) { 2340 setup_cmd = TWT_SETUP_CMD_DICTATE; 2341 twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR; 2342 goto unlock; 2343 } 2344 2345 flowid = ffs(~msta->twt.flowid_mask) - 1; 2346 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 2347 twt_agrt->req_type |= le16_encode_bits(flowid, 2348 IEEE80211_TWT_REQTYPE_FLOWID); 2349 2350 table_id = ffs(~dev->twt.table_mask) - 1; 2351 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2352 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2353 2354 if (mt7915_mac_twt_param_equal(msta, twt_agrt)) 2355 goto unlock; 2356 2357 flow = &msta->twt.flow[flowid]; 2358 memset(flow, 0, sizeof(*flow)); 2359 INIT_LIST_HEAD(&flow->list); 2360 flow->wcid = msta->wcid.idx; 2361 flow->table_id = table_id; 2362 flow->id = flowid; 2363 flow->duration = twt_agrt->min_twt_dur; 2364 flow->mantissa = twt_agrt->mantissa; 2365 flow->exp = exp; 2366 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2367 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2368 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2369 2370 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2371 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2372 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2373 u64 flow_tsf, curr_tsf; 2374 u32 rem; 2375 2376 flow->sched = true; 2377 flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow); 2378 curr_tsf = __mt7915_get_tsf(hw, msta->vif); 2379 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2380 flow_tsf = curr_tsf + interval - rem; 2381 twt_agrt->twt = cpu_to_le64(flow_tsf); 2382 } else { 2383 list_add_tail(&flow->list, &dev->twt_list); 2384 } 2385 flow->tsf = le64_to_cpu(twt_agrt->twt); 2386 2387 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2388 goto unlock; 2389 2390 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2391 dev->twt.table_mask |= BIT(table_id); 2392 msta->twt.flowid_mask |= BIT(flowid); 2393 dev->twt.n_agrt++; 2394 2395 unlock: 2396 mutex_unlock(&dev->mt76.mutex); 2397 out: 2398 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 2399 twt_agrt->req_type |= 2400 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 2401 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2402 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2403 } 2404 2405 void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, 2406 struct mt7915_sta *msta, 2407 u8 flowid) 2408 { 2409 struct mt7915_twt_flow *flow; 2410 2411 lockdep_assert_held(&dev->mt76.mutex); 2412 2413 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2414 return; 2415 2416 if (!(msta->twt.flowid_mask & BIT(flowid))) 2417 return; 2418 2419 flow = &msta->twt.flow[flowid]; 2420 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, 2421 MCU_TWT_AGRT_DELETE)) 2422 return; 2423 2424 list_del_init(&flow->list); 2425 msta->twt.flowid_mask &= ~BIT(flowid); 2426 dev->twt.table_mask &= ~BIT(flow->table_id); 2427 dev->twt.n_agrt--; 2428 } 2429