1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/timekeeping.h> 6 #if defined(__FreeBSD__) 7 #include <linux/delay.h> 8 #include <linux/math64.h> 9 #endif 10 #include "coredump.h" 11 #include "mt7915.h" 12 #include "../dma.h" 13 #include "mac.h" 14 #include "mcu.h" 15 16 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 17 18 static const struct mt7915_dfs_radar_spec etsi_radar_specs = { 19 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 20 .radar_pattern = { 21 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 22 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 23 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 24 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 25 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 26 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 27 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 28 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 29 }, 30 }; 31 32 static const struct mt7915_dfs_radar_spec fcc_radar_specs = { 33 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 34 .radar_pattern = { 35 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 36 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 37 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 38 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 39 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 40 }, 41 }; 42 43 static const struct mt7915_dfs_radar_spec jp_radar_specs = { 44 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 45 .radar_pattern = { 46 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 47 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 48 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 49 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 50 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 51 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 52 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 53 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 54 }, 55 }; 56 57 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, 58 u16 idx, bool unicast) 59 { 60 struct mt7915_sta *sta; 61 struct mt76_wcid *wcid; 62 63 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 64 return NULL; 65 66 wcid = rcu_dereference(dev->mt76.wcid[idx]); 67 if (unicast || !wcid) 68 return wcid; 69 70 if (!wcid->sta) 71 return NULL; 72 73 sta = container_of(wcid, struct mt7915_sta, wcid); 74 if (!sta->vif) 75 return NULL; 76 77 return &sta->vif->sta.wcid; 78 } 79 80 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) 81 { 82 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 83 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 84 85 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 86 0, 5000); 87 } 88 89 u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw) 90 { 91 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 92 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 93 94 return MT_WTBL_LMAC_OFFS(wcid, dw); 95 } 96 97 static void mt7915_mac_sta_poll(struct mt7915_dev *dev) 98 { 99 static const u8 ac_to_tid[] = { 100 [IEEE80211_AC_BE] = 0, 101 [IEEE80211_AC_BK] = 1, 102 [IEEE80211_AC_VI] = 4, 103 [IEEE80211_AC_VO] = 6 104 }; 105 struct ieee80211_sta *sta; 106 struct mt7915_sta *msta; 107 struct rate_info *rate; 108 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 109 #if defined(__linux__) 110 LIST_HEAD(sta_poll_list); 111 #elif defined(__FreeBSD__) 112 LINUX_LIST_HEAD(sta_poll_list); 113 #endif 114 int i; 115 116 spin_lock_bh(&dev->mt76.sta_poll_lock); 117 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); 118 spin_unlock_bh(&dev->mt76.sta_poll_lock); 119 120 rcu_read_lock(); 121 122 while (true) { 123 bool clear = false; 124 u32 addr, val; 125 u16 idx; 126 s8 rssi[4]; 127 u8 bw; 128 129 spin_lock_bh(&dev->mt76.sta_poll_lock); 130 if (list_empty(&sta_poll_list)) { 131 spin_unlock_bh(&dev->mt76.sta_poll_lock); 132 break; 133 } 134 msta = list_first_entry(&sta_poll_list, 135 struct mt7915_sta, wcid.poll_list); 136 list_del_init(&msta->wcid.poll_list); 137 spin_unlock_bh(&dev->mt76.sta_poll_lock); 138 139 idx = msta->wcid.idx; 140 141 /* refresh peer's airtime reporting */ 142 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20); 143 144 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 145 u32 tx_last = msta->airtime_ac[i]; 146 u32 rx_last = msta->airtime_ac[i + 4]; 147 148 msta->airtime_ac[i] = mt76_rr(dev, addr); 149 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 150 151 tx_time[i] = msta->airtime_ac[i] - tx_last; 152 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 153 154 if ((tx_last | rx_last) & BIT(30)) 155 clear = true; 156 157 addr += 8; 158 } 159 160 if (clear) { 161 mt7915_mac_wtbl_update(dev, idx, 162 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 163 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 164 } 165 166 if (!msta->wcid.sta) 167 continue; 168 169 sta = container_of((void *)msta, struct ieee80211_sta, 170 drv_priv); 171 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 172 u8 queue = mt76_connac_lmac_mapping(i); 173 u32 tx_cur = tx_time[queue]; 174 u32 rx_cur = rx_time[queue]; 175 u8 tid = ac_to_tid[i]; 176 177 if (!tx_cur && !rx_cur) 178 continue; 179 180 ieee80211_sta_register_airtime(sta, tid, tx_cur, 181 rx_cur); 182 } 183 184 /* 185 * We don't support reading GI info from txs packets. 186 * For accurate tx status reporting and AQL improvement, 187 * we need to make sure that flags match so polling GI 188 * from per-sta counters directly. 189 */ 190 rate = &msta->wcid.rate; 191 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7); 192 val = mt76_rr(dev, addr); 193 194 switch (rate->bw) { 195 case RATE_INFO_BW_160: 196 bw = IEEE80211_STA_RX_BW_160; 197 break; 198 case RATE_INFO_BW_80: 199 bw = IEEE80211_STA_RX_BW_80; 200 break; 201 case RATE_INFO_BW_40: 202 bw = IEEE80211_STA_RX_BW_40; 203 break; 204 default: 205 bw = IEEE80211_STA_RX_BW_20; 206 break; 207 } 208 209 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 210 u8 offs = 24 + 2 * bw; 211 212 rate->he_gi = (val & (0x3 << offs)) >> offs; 213 } else if (rate->flags & 214 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 215 if (val & BIT(12 + bw)) 216 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 217 else 218 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 219 } 220 221 /* get signal strength of resp frames (CTS/BA/ACK) */ 222 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 30); 223 val = mt76_rr(dev, addr); 224 225 rssi[0] = to_rssi(GENMASK(7, 0), val); 226 rssi[1] = to_rssi(GENMASK(15, 8), val); 227 rssi[2] = to_rssi(GENMASK(23, 16), val); 228 rssi[3] = to_rssi(GENMASK(31, 14), val); 229 230 msta->ack_signal = 231 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); 232 233 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); 234 } 235 236 rcu_read_unlock(); 237 } 238 239 void mt7915_mac_enable_rtscts(struct mt7915_dev *dev, 240 struct ieee80211_vif *vif, bool enable) 241 { 242 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 243 u32 addr; 244 245 addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5); 246 if (enable) 247 mt76_set(dev, addr, BIT(5)); 248 else 249 mt76_clear(dev, addr, BIT(5)); 250 } 251 252 static void 253 mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q, 254 struct mt7915_sta *msta, struct sk_buff *skb, 255 u32 info) 256 { 257 struct ieee80211_vif *vif; 258 struct wireless_dev *wdev; 259 260 if (!msta || !msta->vif) 261 return; 262 263 if (!mt76_queue_is_wed_rx(q)) 264 return; 265 266 if (!(info & MT_DMA_INFO_PPE_VLD)) 267 return; 268 269 vif = container_of((void *)msta->vif, struct ieee80211_vif, 270 drv_priv); 271 wdev = ieee80211_vif_to_wdev(vif); 272 skb->dev = wdev->netdev; 273 274 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, 275 FIELD_GET(MT_DMA_PPE_CPU_REASON, info), 276 FIELD_GET(MT_DMA_PPE_ENTRY, info)); 277 } 278 279 static int 280 mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb, 281 enum mt76_rxq_id q, u32 *info) 282 { 283 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 284 struct mt76_phy *mphy = &dev->mt76.phy; 285 struct mt7915_phy *phy = &dev->phy; 286 struct ieee80211_supported_band *sband; 287 __le32 *rxd = (__le32 *)skb->data; 288 __le32 *rxv = NULL; 289 u32 rxd0 = le32_to_cpu(rxd[0]); 290 u32 rxd1 = le32_to_cpu(rxd[1]); 291 u32 rxd2 = le32_to_cpu(rxd[2]); 292 u32 rxd3 = le32_to_cpu(rxd[3]); 293 u32 rxd4 = le32_to_cpu(rxd[4]); 294 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 295 bool unicast, insert_ccmp_hdr = false; 296 u8 remove_pad, amsdu_info; 297 u8 mode = 0, qos_ctl = 0; 298 struct mt7915_sta *msta = NULL; 299 u32 csum_status = *(u32 *)skb->cb; 300 bool hdr_trans; 301 u16 hdr_gap; 302 u16 seq_ctrl = 0; 303 __le16 fc = 0; 304 int idx; 305 306 memset(status, 0, sizeof(*status)); 307 308 if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->mt76->band_idx) { 309 mphy = dev->mt76.phys[MT_BAND1]; 310 if (!mphy) 311 return -EINVAL; 312 313 phy = mphy->priv; 314 status->phy_idx = 1; 315 } 316 317 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 318 return -EINVAL; 319 320 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 321 return -EINVAL; 322 323 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 324 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 325 return -EINVAL; 326 327 /* ICV error or CCMP/BIP/WPI MIC error */ 328 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 329 status->flag |= RX_FLAG_ONLY_MONITOR; 330 331 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 332 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 333 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); 334 335 if (status->wcid) { 336 msta = container_of(status->wcid, struct mt7915_sta, wcid); 337 spin_lock_bh(&dev->mt76.sta_poll_lock); 338 if (list_empty(&msta->wcid.poll_list)) 339 list_add_tail(&msta->wcid.poll_list, 340 &dev->mt76.sta_poll_list); 341 spin_unlock_bh(&dev->mt76.sta_poll_lock); 342 } 343 344 status->freq = mphy->chandef.chan->center_freq; 345 status->band = mphy->chandef.chan->band; 346 if (status->band == NL80211_BAND_5GHZ) 347 sband = &mphy->sband_5g.sband; 348 else if (status->band == NL80211_BAND_6GHZ) 349 sband = &mphy->sband_6g.sband; 350 else 351 sband = &mphy->sband_2g.sband; 352 353 if (!sband->channels) 354 return -EINVAL; 355 356 if ((rxd0 & csum_mask) == csum_mask && 357 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 358 skb->ip_summed = CHECKSUM_UNNECESSARY; 359 360 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) 361 status->flag |= RX_FLAG_FAILED_FCS_CRC; 362 363 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 364 status->flag |= RX_FLAG_MMIC_ERROR; 365 366 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && 367 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 368 status->flag |= RX_FLAG_DECRYPTED; 369 status->flag |= RX_FLAG_IV_STRIPPED; 370 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 371 } 372 373 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 374 375 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 376 return -EINVAL; 377 378 rxd += 6; 379 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 380 u32 v0 = le32_to_cpu(rxd[0]); 381 u32 v2 = le32_to_cpu(rxd[2]); 382 383 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); 384 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); 385 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); 386 387 rxd += 4; 388 if ((u8 *)rxd - skb->data >= skb->len) 389 return -EINVAL; 390 } 391 392 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 393 u8 *data = (u8 *)rxd; 394 395 if (status->flag & RX_FLAG_DECRYPTED) { 396 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) { 397 case MT_CIPHER_AES_CCMP: 398 case MT_CIPHER_CCMP_CCX: 399 case MT_CIPHER_CCMP_256: 400 insert_ccmp_hdr = 401 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 402 fallthrough; 403 case MT_CIPHER_TKIP: 404 case MT_CIPHER_TKIP_NO_MIC: 405 case MT_CIPHER_GCMP: 406 case MT_CIPHER_GCMP_256: 407 status->iv[0] = data[5]; 408 status->iv[1] = data[4]; 409 status->iv[2] = data[3]; 410 status->iv[3] = data[2]; 411 status->iv[4] = data[1]; 412 status->iv[5] = data[0]; 413 break; 414 default: 415 break; 416 } 417 } 418 rxd += 4; 419 if ((u8 *)rxd - skb->data >= skb->len) 420 return -EINVAL; 421 } 422 423 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 424 status->timestamp = le32_to_cpu(rxd[0]); 425 status->flag |= RX_FLAG_MACTIME_START; 426 427 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 428 status->flag |= RX_FLAG_AMPDU_DETAILS; 429 430 /* all subframes of an A-MPDU have the same timestamp */ 431 if (phy->rx_ampdu_ts != status->timestamp) { 432 if (!++phy->ampdu_ref) 433 phy->ampdu_ref++; 434 } 435 phy->rx_ampdu_ts = status->timestamp; 436 437 status->ampdu_ref = phy->ampdu_ref; 438 } 439 440 rxd += 2; 441 if ((u8 *)rxd - skb->data >= skb->len) 442 return -EINVAL; 443 } 444 445 /* RXD Group 3 - P-RXV */ 446 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 447 u32 v0, v1; 448 int ret; 449 450 rxv = rxd; 451 rxd += 2; 452 if ((u8 *)rxd - skb->data >= skb->len) 453 return -EINVAL; 454 455 v0 = le32_to_cpu(rxv[0]); 456 v1 = le32_to_cpu(rxv[1]); 457 458 if (v0 & MT_PRXV_HT_AD_CODE) 459 status->enc_flags |= RX_ENC_FLAG_LDPC; 460 461 status->chains = mphy->antenna_mask; 462 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); 463 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); 464 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); 465 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); 466 467 /* RXD Group 5 - C-RXV */ 468 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 469 rxd += 18; 470 if ((u8 *)rxd - skb->data >= skb->len) 471 return -EINVAL; 472 } 473 474 if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) { 475 ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, 476 sband, rxv, &mode); 477 if (ret < 0) 478 return ret; 479 } 480 } 481 482 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 483 status->amsdu = !!amsdu_info; 484 if (status->amsdu) { 485 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 486 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 487 } 488 489 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 490 if (hdr_trans && ieee80211_has_morefrags(fc)) { 491 struct ieee80211_vif *vif; 492 int err; 493 494 if (!msta || !msta->vif) 495 return -EINVAL; 496 497 vif = container_of((void *)msta->vif, struct ieee80211_vif, 498 drv_priv); 499 err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap); 500 if (err) 501 return err; 502 503 hdr_trans = false; 504 } else { 505 int pad_start = 0; 506 507 skb_pull(skb, hdr_gap); 508 if (!hdr_trans && status->amsdu) { 509 pad_start = ieee80211_get_hdrlen_from_skb(skb); 510 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 511 /* 512 * When header translation failure is indicated, 513 * the hardware will insert an extra 2-byte field 514 * containing the data length after the protocol 515 * type field. This happens either when the LLC-SNAP 516 * pattern did not match, or if a VLAN header was 517 * detected. 518 */ 519 pad_start = 12; 520 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 521 pad_start += 4; 522 else 523 pad_start = 0; 524 } 525 526 if (pad_start) { 527 memmove(skb->data + 2, skb->data, pad_start); 528 skb_pull(skb, 2); 529 } 530 } 531 532 if (!hdr_trans) { 533 struct ieee80211_hdr *hdr; 534 535 if (insert_ccmp_hdr) { 536 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 537 538 mt76_insert_ccmp_hdr(skb, key_id); 539 } 540 541 hdr = mt76_skb_get_hdr(skb); 542 fc = hdr->frame_control; 543 if (ieee80211_is_data_qos(fc)) { 544 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 545 qos_ctl = *ieee80211_get_qos_ctl(hdr); 546 } 547 } else { 548 status->flag |= RX_FLAG_8023; 549 mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, 550 *info); 551 } 552 553 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 554 mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode); 555 556 if (!status->wcid || !ieee80211_is_data_qos(fc)) 557 return 0; 558 559 status->aggr = unicast && 560 !ieee80211_is_qos_nullfunc(fc); 561 status->qos_ctl = qos_ctl; 562 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 563 564 return 0; 565 } 566 567 static void 568 mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) 569 { 570 #ifdef CONFIG_NL80211_TESTMODE 571 struct mt7915_phy *phy = &dev->phy; 572 __le32 *rxd = (__le32 *)skb->data; 573 __le32 *rxv_hdr = rxd + 2; 574 __le32 *rxv = rxd + 4; 575 u32 rcpi, ib_rssi, wb_rssi, v20, v21; 576 u8 band_idx; 577 s32 foe; 578 u8 snr; 579 int i; 580 581 band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX); 582 if (band_idx && !phy->mt76->band_idx) { 583 phy = mt7915_ext_phy(dev); 584 if (!phy) 585 goto out; 586 } 587 588 rcpi = le32_to_cpu(rxv[6]); 589 ib_rssi = le32_to_cpu(rxv[7]); 590 wb_rssi = le32_to_cpu(rxv[8]) >> 5; 591 592 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) { 593 if (i == 3) 594 wb_rssi = le32_to_cpu(rxv[9]); 595 596 phy->test.last_rcpi[i] = rcpi & 0xff; 597 phy->test.last_ib_rssi[i] = ib_rssi & 0xff; 598 phy->test.last_wb_rssi[i] = wb_rssi & 0xff; 599 } 600 601 v20 = le32_to_cpu(rxv[20]); 602 v21 = le32_to_cpu(rxv[21]); 603 604 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) | 605 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT); 606 607 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16; 608 609 phy->test.last_freq_offset = foe; 610 phy->test.last_snr = snr; 611 out: 612 #endif 613 dev_kfree_skb(skb); 614 } 615 616 static void 617 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, 618 struct sk_buff *skb) 619 { 620 #ifdef CONFIG_NL80211_TESTMODE 621 struct mt76_testmode_data *td = &phy->mt76->test; 622 const struct ieee80211_rate *r; 623 u8 bw, mode, nss = td->tx_rate_nss; 624 u8 rate_idx = td->tx_rate_idx; 625 u16 rateval = 0; 626 u32 val; 627 bool cck = false; 628 int band; 629 630 if (skb != phy->mt76->test.tx_skb) 631 return; 632 633 switch (td->tx_rate_mode) { 634 case MT76_TM_TX_MODE_HT: 635 nss = 1 + (rate_idx >> 3); 636 mode = MT_PHY_TYPE_HT; 637 break; 638 case MT76_TM_TX_MODE_VHT: 639 mode = MT_PHY_TYPE_VHT; 640 break; 641 case MT76_TM_TX_MODE_HE_SU: 642 mode = MT_PHY_TYPE_HE_SU; 643 break; 644 case MT76_TM_TX_MODE_HE_EXT_SU: 645 mode = MT_PHY_TYPE_HE_EXT_SU; 646 break; 647 case MT76_TM_TX_MODE_HE_TB: 648 mode = MT_PHY_TYPE_HE_TB; 649 break; 650 case MT76_TM_TX_MODE_HE_MU: 651 mode = MT_PHY_TYPE_HE_MU; 652 break; 653 case MT76_TM_TX_MODE_CCK: 654 cck = true; 655 fallthrough; 656 case MT76_TM_TX_MODE_OFDM: 657 band = phy->mt76->chandef.chan->band; 658 if (band == NL80211_BAND_2GHZ && !cck) 659 rate_idx += 4; 660 661 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; 662 val = cck ? r->hw_value_short : r->hw_value; 663 664 mode = val >> 8; 665 rate_idx = val & 0xff; 666 break; 667 default: 668 mode = MT_PHY_TYPE_OFDM; 669 break; 670 } 671 672 switch (phy->mt76->chandef.width) { 673 case NL80211_CHAN_WIDTH_40: 674 bw = 1; 675 break; 676 case NL80211_CHAN_WIDTH_80: 677 bw = 2; 678 break; 679 case NL80211_CHAN_WIDTH_80P80: 680 case NL80211_CHAN_WIDTH_160: 681 bw = 3; 682 break; 683 default: 684 bw = 0; 685 break; 686 } 687 688 if (td->tx_rate_stbc && nss == 1) { 689 nss++; 690 rateval |= MT_TX_RATE_STBC; 691 } 692 693 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 694 FIELD_PREP(MT_TX_RATE_MODE, mode) | 695 FIELD_PREP(MT_TX_RATE_NSS, nss - 1); 696 697 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 698 699 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT); 700 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT) 701 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 702 703 val = MT_TXD6_FIXED_BW | 704 FIELD_PREP(MT_TXD6_BW, bw) | 705 FIELD_PREP(MT_TXD6_TX_RATE, rateval) | 706 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi); 707 708 /* for HE_SU/HE_EXT_SU PPDU 709 * - 1x, 2x, 4x LTF + 0.8us GI 710 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 711 * for HE_MU PPDU 712 * - 2x, 4x LTF + 0.8us GI 713 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 714 * for HE_TB PPDU 715 * - 1x, 2x LTF + 1.6us GI 716 * - 4x LTF + 3.2us GI 717 */ 718 if (mode >= MT_PHY_TYPE_HE_SU) 719 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); 720 721 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) 722 val |= MT_TXD6_LDPC; 723 724 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); 725 txwi[6] |= cpu_to_le32(val); 726 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 727 phy->test.spe_idx)); 728 #endif 729 } 730 731 void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, 732 struct sk_buff *skb, struct mt76_wcid *wcid, int pid, 733 struct ieee80211_key_conf *key, 734 enum mt76_txq_id qid, u32 changed) 735 { 736 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 737 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 738 struct mt76_phy *mphy = &dev->phy; 739 740 if (phy_idx && dev->phys[MT_BAND1]) 741 mphy = dev->phys[MT_BAND1]; 742 743 mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed); 744 745 if (mt76_testmode_enabled(mphy)) 746 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); 747 } 748 749 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 750 enum mt76_txq_id qid, struct mt76_wcid *wcid, 751 struct ieee80211_sta *sta, 752 struct mt76_tx_info *tx_info) 753 { 754 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 755 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 756 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 757 struct ieee80211_key_conf *key = info->control.hw_key; 758 struct ieee80211_vif *vif = info->control.vif; 759 struct mt76_connac_fw_txp *txp; 760 struct mt76_txwi_cache *t; 761 int id, i, nbuf = tx_info->nbuf - 1; 762 u8 *txwi = (u8 *)txwi_ptr; 763 int pid; 764 765 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 766 return -EINVAL; 767 768 if (!wcid) 769 wcid = &dev->mt76.global_wcid; 770 771 if (sta) { 772 struct mt7915_sta *msta; 773 774 msta = (struct mt7915_sta *)sta->drv_priv; 775 776 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 777 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 778 msta->jiffies = jiffies; 779 } 780 } 781 782 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 783 t->skb = tx_info->skb; 784 785 id = mt76_token_consume(mdev, &t); 786 if (id < 0) 787 return id; 788 789 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 790 mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key, 791 qid, 0); 792 793 txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE); 794 for (i = 0; i < nbuf; i++) { 795 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 796 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 797 } 798 txp->nbuf = nbuf; 799 800 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); 801 802 if (!key) 803 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 804 805 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 806 ieee80211_is_mgmt(hdr->frame_control)) 807 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 808 809 if (vif) { 810 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 811 812 txp->bss_idx = mvif->mt76.idx; 813 } 814 815 txp->token = cpu_to_le16(id); 816 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) 817 txp->rept_wds_wcid = cpu_to_le16(wcid->idx); 818 else 819 txp->rept_wds_wcid = cpu_to_le16(0x3ff); 820 tx_info->skb = DMA_DUMMY_DATA; 821 822 /* pass partial skb header to fw */ 823 tx_info->buf[1].len = MT_CT_PARSE_LEN; 824 tx_info->buf[1].skip_unmap = true; 825 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 826 827 return 0; 828 } 829 830 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 831 { 832 #if defined(__linux__) 833 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 834 #elif defined(__FreeBSD__) 835 struct mt76_connac_fw_txp *txp = (void *)((u8 *)ptr + MT_TXD_SIZE); 836 #endif 837 __le32 *txwi = ptr; 838 u32 val; 839 840 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 841 842 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 843 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 844 txwi[0] = cpu_to_le32(val); 845 846 val = MT_TXD1_LONG_FORMAT | 847 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 848 txwi[1] = cpu_to_le32(val); 849 850 txp->token = cpu_to_le16(token_id); 851 txp->nbuf = 1; 852 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 853 854 return MT_TXD_SIZE + sizeof(*txp); 855 } 856 857 static void 858 mt7915_mac_tx_free_prepare(struct mt7915_dev *dev) 859 { 860 struct mt76_dev *mdev = &dev->mt76; 861 struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1]; 862 863 /* clean DMA queues and unmap buffers first */ 864 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 865 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 866 if (mphy_ext) { 867 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); 868 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); 869 } 870 } 871 872 static void 873 mt7915_mac_tx_free_done(struct mt7915_dev *dev, 874 struct list_head *free_list, bool wake) 875 { 876 struct sk_buff *skb, *tmp; 877 878 mt7915_mac_sta_poll(dev); 879 880 if (wake) 881 mt76_set_tx_blocked(&dev->mt76, false); 882 883 mt76_worker_schedule(&dev->mt76.tx_worker); 884 885 list_for_each_entry_safe(skb, tmp, free_list, list) { 886 skb_list_del_init(skb); 887 napi_consume_skb(skb, 1); 888 } 889 } 890 891 static void 892 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) 893 { 894 struct mt76_connac_tx_free *free = data; 895 #if defined(__linux__) 896 __le32 *tx_info = (__le32 *)(data + sizeof(*free)); 897 #elif defined(__FreeBSD__) 898 __le32 *tx_info = (__le32 *)((u8 *)data + sizeof(*free)); 899 #endif 900 struct mt76_dev *mdev = &dev->mt76; 901 struct mt76_txwi_cache *txwi; 902 struct ieee80211_sta *sta = NULL; 903 struct mt76_wcid *wcid = NULL; 904 #if defined(__linux__) 905 LIST_HEAD(free_list); 906 void *end = data + len; 907 #elif defined(__FreeBSD__) 908 LINUX_LIST_HEAD(free_list); 909 void *end = (u8 *)data + len; 910 #endif 911 bool v3, wake = false; 912 u16 total, count = 0; 913 u32 txd = le32_to_cpu(free->txd); 914 __le32 *cur_info; 915 916 mt7915_mac_tx_free_prepare(dev); 917 918 total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT); 919 v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4); 920 921 for (cur_info = tx_info; count < total; cur_info++) { 922 u32 msdu, info; 923 u8 i; 924 925 if (WARN_ON_ONCE((void *)cur_info >= end)) 926 return; 927 928 /* 929 * 1'b1: new wcid pair. 930 * 1'b0: msdu_id with the same 'wcid pair' as above. 931 */ 932 info = le32_to_cpu(*cur_info); 933 if (info & MT_TX_FREE_PAIR) { 934 struct mt7915_sta *msta; 935 u16 idx; 936 937 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 938 wcid = rcu_dereference(dev->mt76.wcid[idx]); 939 sta = wcid_to_sta(wcid); 940 if (!sta) 941 continue; 942 943 msta = container_of(wcid, struct mt7915_sta, wcid); 944 spin_lock_bh(&mdev->sta_poll_lock); 945 if (list_empty(&msta->wcid.poll_list)) 946 list_add_tail(&msta->wcid.poll_list, 947 &mdev->sta_poll_list); 948 spin_unlock_bh(&mdev->sta_poll_lock); 949 continue; 950 } 951 952 if (!mtk_wed_device_active(&mdev->mmio.wed) && wcid) { 953 u32 tx_retries = 0, tx_failed = 0; 954 955 if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3)) { 956 tx_retries = 957 FIELD_GET(MT_TX_FREE_COUNT_V3, info) - 1; 958 tx_failed = tx_retries + 959 !!FIELD_GET(MT_TX_FREE_STAT_V3, info); 960 } else if (!v3 && (info & MT_TX_FREE_MPDU_HEADER)) { 961 tx_retries = 962 FIELD_GET(MT_TX_FREE_COUNT, info) - 1; 963 tx_failed = tx_retries + 964 !!FIELD_GET(MT_TX_FREE_STAT, info); 965 } 966 wcid->stats.tx_retries += tx_retries; 967 wcid->stats.tx_failed += tx_failed; 968 } 969 970 if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3)) 971 continue; 972 973 for (i = 0; i < 1 + v3; i++) { 974 if (v3) { 975 msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3; 976 if (msdu == MT_TX_FREE_MSDU_ID_V3) 977 continue; 978 } else { 979 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 980 } 981 count++; 982 txwi = mt76_token_release(mdev, msdu, &wake); 983 if (!txwi) 984 continue; 985 986 mt76_connac2_txwi_free(mdev, txwi, sta, &free_list); 987 } 988 } 989 990 mt7915_mac_tx_free_done(dev, &free_list, wake); 991 } 992 993 static void 994 mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len) 995 { 996 struct mt76_connac_tx_free *free = data; 997 #if defined(__linux__) 998 __le16 *info = (__le16 *)(data + sizeof(*free)); 999 #elif defined(__FreeBSD__) 1000 __le16 *info = (__le16 *)((u8 *)data + sizeof(*free)); 1001 #endif 1002 struct mt76_dev *mdev = &dev->mt76; 1003 #if defined(__linux__) 1004 void *end = data + len; 1005 LIST_HEAD(free_list); 1006 #elif defined(__FreeBSD__) 1007 void *end = (u8 *)data + len; 1008 LINUX_LIST_HEAD(free_list); 1009 #endif 1010 bool wake = false; 1011 u8 i, count; 1012 1013 mt7915_mac_tx_free_prepare(dev); 1014 1015 count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl)); 1016 if (WARN_ON_ONCE((void *)&info[count] > end)) 1017 return; 1018 1019 for (i = 0; i < count; i++) { 1020 struct mt76_txwi_cache *txwi; 1021 u16 msdu = le16_to_cpu(info[i]); 1022 1023 txwi = mt76_token_release(mdev, msdu, &wake); 1024 if (!txwi) 1025 continue; 1026 1027 mt76_connac2_txwi_free(mdev, txwi, NULL, &free_list); 1028 } 1029 1030 mt7915_mac_tx_free_done(dev, &free_list, wake); 1031 } 1032 1033 static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) 1034 { 1035 struct mt7915_sta *msta = NULL; 1036 struct mt76_wcid *wcid; 1037 __le32 *txs_data = data; 1038 u16 wcidx; 1039 u8 pid; 1040 1041 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1042 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1043 1044 if (pid < MT_PACKET_ID_WED) 1045 return; 1046 1047 if (wcidx >= mt7915_wtbl_size(dev)) 1048 return; 1049 1050 rcu_read_lock(); 1051 1052 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1053 if (!wcid) 1054 goto out; 1055 1056 msta = container_of(wcid, struct mt7915_sta, wcid); 1057 1058 if (pid == MT_PACKET_ID_WED) 1059 mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data); 1060 else 1061 mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data); 1062 1063 if (!wcid->sta) 1064 goto out; 1065 1066 spin_lock_bh(&dev->mt76.sta_poll_lock); 1067 if (list_empty(&msta->wcid.poll_list)) 1068 list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); 1069 spin_unlock_bh(&dev->mt76.sta_poll_lock); 1070 1071 out: 1072 rcu_read_unlock(); 1073 } 1074 1075 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) 1076 { 1077 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1078 __le32 *rxd = (__le32 *)data; 1079 __le32 *end = (__le32 *)&rxd[len / 4]; 1080 enum rx_pkt_type type; 1081 1082 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1083 1084 switch (type) { 1085 case PKT_TYPE_TXRX_NOTIFY: 1086 mt7915_mac_tx_free(dev, data, len); 1087 return false; 1088 case PKT_TYPE_TXRX_NOTIFY_V0: 1089 mt7915_mac_tx_free_v0(dev, data, len); 1090 return false; 1091 case PKT_TYPE_TXS: 1092 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1093 mt7915_mac_add_txs(dev, rxd); 1094 return false; 1095 case PKT_TYPE_RX_FW_MONITOR: 1096 #if !defined(__FreeBSD__) || defined(CONFIG_MT7915_DEBUGFS) 1097 mt7915_debugfs_rx_fw_monitor(dev, data, len); 1098 #endif 1099 return false; 1100 default: 1101 return true; 1102 } 1103 } 1104 1105 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1106 struct sk_buff *skb, u32 *info) 1107 { 1108 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1109 __le32 *rxd = (__le32 *)skb->data; 1110 __le32 *end = (__le32 *)&skb->data[skb->len]; 1111 enum rx_pkt_type type; 1112 1113 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1114 1115 switch (type) { 1116 case PKT_TYPE_TXRX_NOTIFY: 1117 mt7915_mac_tx_free(dev, skb->data, skb->len); 1118 napi_consume_skb(skb, 1); 1119 break; 1120 case PKT_TYPE_TXRX_NOTIFY_V0: 1121 mt7915_mac_tx_free_v0(dev, skb->data, skb->len); 1122 napi_consume_skb(skb, 1); 1123 break; 1124 case PKT_TYPE_RX_EVENT: 1125 mt7915_mcu_rx_event(dev, skb); 1126 break; 1127 case PKT_TYPE_TXRXV: 1128 mt7915_mac_fill_rx_vector(dev, skb); 1129 break; 1130 case PKT_TYPE_TXS: 1131 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1132 mt7915_mac_add_txs(dev, rxd); 1133 dev_kfree_skb(skb); 1134 break; 1135 case PKT_TYPE_RX_FW_MONITOR: 1136 #if !defined(__FreeBSD__) || defined(CONFIG_MT7915_DEBUGFS) 1137 mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1138 #endif 1139 dev_kfree_skb(skb); 1140 break; 1141 case PKT_TYPE_NORMAL: 1142 if (!mt7915_mac_fill_rx(dev, skb, q, info)) { 1143 mt76_rx(&dev->mt76, q, skb); 1144 return; 1145 } 1146 fallthrough; 1147 default: 1148 dev_kfree_skb(skb); 1149 break; 1150 } 1151 } 1152 1153 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) 1154 { 1155 struct mt7915_dev *dev = phy->dev; 1156 u32 reg = MT_WF_PHY_RX_CTRL1(phy->mt76->band_idx); 1157 1158 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); 1159 mt76_set(dev, reg, BIT(11) | BIT(9)); 1160 } 1161 1162 void mt7915_mac_reset_counters(struct mt7915_phy *phy) 1163 { 1164 struct mt7915_dev *dev = phy->dev; 1165 int i; 1166 1167 for (i = 0; i < 4; i++) { 1168 mt76_rr(dev, MT_TX_AGG_CNT(phy->mt76->band_idx, i)); 1169 mt76_rr(dev, MT_TX_AGG_CNT2(phy->mt76->band_idx, i)); 1170 } 1171 1172 phy->mt76->survey_time = ktime_get_boottime(); 1173 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 1174 1175 /* reset airtime counters */ 1176 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->mt76->band_idx), 1177 MT_WF_RMAC_MIB_RXTIME_CLR); 1178 1179 mt7915_mcu_get_chan_mib_info(phy, true); 1180 } 1181 1182 void mt7915_mac_set_timing(struct mt7915_phy *phy) 1183 { 1184 s16 coverage_class = phy->coverage_class; 1185 struct mt7915_dev *dev = phy->dev; 1186 struct mt7915_phy *ext_phy = mt7915_ext_phy(dev); 1187 u32 val, reg_offset; 1188 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1189 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1190 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1191 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1192 u8 band = phy->mt76->band_idx; 1193 int eifs_ofdm = 360, sifs = 10, offset; 1194 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ); 1195 1196 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1197 return; 1198 1199 if (ext_phy) 1200 coverage_class = max_t(s16, dev->phy.coverage_class, 1201 ext_phy->coverage_class); 1202 1203 mt76_set(dev, MT_ARB_SCR(band), 1204 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1205 udelay(1); 1206 1207 offset = 3 * coverage_class; 1208 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1209 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1210 1211 if (!is_mt7915(&dev->mt76)) { 1212 if (!a_band) { 1213 mt76_wr(dev, MT_TMAC_ICR1(band), 1214 FIELD_PREP(MT_IFS_EIFS_CCK, 314)); 1215 eifs_ofdm = 78; 1216 } else { 1217 eifs_ofdm = 84; 1218 } 1219 } else if (a_band) { 1220 sifs = 16; 1221 } 1222 1223 mt76_wr(dev, MT_TMAC_CDTR(band), cck + reg_offset); 1224 mt76_wr(dev, MT_TMAC_ODTR(band), ofdm + reg_offset); 1225 mt76_wr(dev, MT_TMAC_ICR0(band), 1226 FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) | 1227 FIELD_PREP(MT_IFS_RIFS, 2) | 1228 FIELD_PREP(MT_IFS_SIFS, sifs) | 1229 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1230 1231 if (phy->slottime < 20 || a_band) 1232 val = MT7915_CFEND_RATE_DEFAULT; 1233 else 1234 val = MT7915_CFEND_RATE_11B; 1235 1236 mt76_rmw_field(dev, MT_AGG_ACR0(band), MT_AGG_ACR_CFEND_RATE, val); 1237 mt76_clear(dev, MT_ARB_SCR(band), 1238 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1239 } 1240 1241 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool band) 1242 { 1243 u32 reg; 1244 1245 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(band) : 1246 MT_WF_PHY_RXTD12_MT7916(band); 1247 mt76_set(dev, reg, 1248 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | 1249 MT_WF_PHY_RXTD12_IRPI_SW_CLR); 1250 1251 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(band) : 1252 MT_WF_PHY_RX_CTRL1_MT7916(band); 1253 mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); 1254 } 1255 1256 static u8 1257 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) 1258 { 1259 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1260 struct mt7915_dev *dev = phy->dev; 1261 u32 val, sum = 0, n = 0; 1262 int nss, i; 1263 1264 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) { 1265 u32 reg = is_mt7915(&dev->mt76) ? 1266 MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) : 1267 MT_WF_IRPI_NSS_MT7916(idx, nss); 1268 1269 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1270 val = mt76_rr(dev, reg); 1271 sum += val * nf_power[i]; 1272 n += val; 1273 } 1274 } 1275 1276 if (!n) 1277 return 0; 1278 1279 return sum / n; 1280 } 1281 1282 void mt7915_update_channel(struct mt76_phy *mphy) 1283 { 1284 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv; 1285 struct mt76_channel_state *state = mphy->chan_state; 1286 int nf; 1287 1288 mt7915_mcu_get_chan_mib_info(phy, false); 1289 1290 nf = mt7915_phy_get_nf(phy, phy->mt76->band_idx); 1291 if (!phy->noise) 1292 phy->noise = nf << 4; 1293 else if (nf) 1294 phy->noise += nf - (phy->noise >> 4); 1295 1296 state->noise = -(phy->noise >> 4); 1297 } 1298 1299 static bool 1300 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state) 1301 { 1302 bool ret; 1303 1304 ret = wait_event_timeout(dev->reset_wait, 1305 (READ_ONCE(dev->recovery.state) & state), 1306 MT7915_RESET_TIMEOUT); 1307 1308 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1309 return ret; 1310 } 1311 1312 static void 1313 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1314 { 1315 struct ieee80211_hw *hw = priv; 1316 1317 switch (vif->type) { 1318 case NL80211_IFTYPE_MESH_POINT: 1319 case NL80211_IFTYPE_ADHOC: 1320 case NL80211_IFTYPE_AP: 1321 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon, 1322 BSS_CHANGED_BEACON_ENABLED); 1323 break; 1324 default: 1325 break; 1326 } 1327 } 1328 1329 static void 1330 mt7915_update_beacons(struct mt7915_dev *dev) 1331 { 1332 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1333 1334 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1335 IEEE80211_IFACE_ITER_RESUME_ALL, 1336 mt7915_update_vif_beacon, dev->mt76.hw); 1337 1338 if (!mphy_ext) 1339 return; 1340 1341 ieee80211_iterate_active_interfaces(mphy_ext->hw, 1342 IEEE80211_IFACE_ITER_RESUME_ALL, 1343 mt7915_update_vif_beacon, mphy_ext->hw); 1344 } 1345 1346 static int 1347 mt7915_mac_restart(struct mt7915_dev *dev) 1348 { 1349 struct mt7915_phy *phy2; 1350 struct mt76_phy *ext_phy; 1351 struct mt76_dev *mdev = &dev->mt76; 1352 int i, ret; 1353 1354 ext_phy = dev->mt76.phys[MT_BAND1]; 1355 phy2 = ext_phy ? ext_phy->priv : NULL; 1356 1357 if (dev->hif2) { 1358 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 1359 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1360 } 1361 1362 if (dev_is_pci(mdev->dev)) { 1363 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1364 if (dev->hif2) { 1365 if (is_mt7915(mdev)) 1366 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 1367 else 1368 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0x0); 1369 } 1370 } 1371 1372 set_bit(MT76_RESET, &dev->mphy.state); 1373 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1374 wake_up(&dev->mt76.mcu.wait); 1375 if (ext_phy) { 1376 set_bit(MT76_RESET, &ext_phy->state); 1377 set_bit(MT76_MCU_RESET, &ext_phy->state); 1378 } 1379 1380 /* lock/unlock all queues to ensure that no tx is pending */ 1381 mt76_txq_schedule_all(&dev->mphy); 1382 if (ext_phy) 1383 mt76_txq_schedule_all(ext_phy); 1384 1385 /* disable all tx/rx napi */ 1386 mt76_worker_disable(&dev->mt76.tx_worker); 1387 mt76_for_each_q_rx(mdev, i) { 1388 if (mdev->q_rx[i].ndesc) 1389 napi_disable(&dev->mt76.napi[i]); 1390 } 1391 napi_disable(&dev->mt76.tx_napi); 1392 1393 /* token reinit */ 1394 mt76_connac2_tx_token_put(&dev->mt76); 1395 idr_init(&dev->mt76.token); 1396 1397 mt7915_dma_reset(dev, true); 1398 1399 local_bh_disable(); 1400 mt76_for_each_q_rx(mdev, i) { 1401 if (mdev->q_rx[i].ndesc) { 1402 napi_enable(&dev->mt76.napi[i]); 1403 napi_schedule(&dev->mt76.napi[i]); 1404 } 1405 } 1406 local_bh_enable(); 1407 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1408 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 1409 1410 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 1411 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 1412 1413 if (dev->hif2) { 1414 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 1415 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1416 } 1417 if (dev_is_pci(mdev->dev)) { 1418 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1419 if (dev->hif2) { 1420 if (is_mt7915(mdev)) 1421 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 1422 else 1423 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff); 1424 } 1425 } 1426 1427 /* load firmware */ 1428 ret = mt7915_mcu_init_firmware(dev); 1429 if (ret) 1430 goto out; 1431 1432 /* set the necessary init items */ 1433 ret = mt7915_mcu_set_eeprom(dev); 1434 if (ret) 1435 goto out; 1436 1437 mt7915_mac_init(dev); 1438 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband); 1439 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband); 1440 ret = mt7915_txbf_init(dev); 1441 1442 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) { 1443 ret = mt7915_run(dev->mphy.hw); 1444 if (ret) 1445 goto out; 1446 } 1447 1448 if (ext_phy && test_bit(MT76_STATE_RUNNING, &ext_phy->state)) { 1449 ret = mt7915_run(ext_phy->hw); 1450 if (ret) 1451 goto out; 1452 } 1453 1454 out: 1455 /* reset done */ 1456 clear_bit(MT76_RESET, &dev->mphy.state); 1457 if (phy2) 1458 clear_bit(MT76_RESET, &phy2->mt76->state); 1459 1460 local_bh_disable(); 1461 napi_enable(&dev->mt76.tx_napi); 1462 napi_schedule(&dev->mt76.tx_napi); 1463 local_bh_enable(); 1464 1465 mt76_worker_enable(&dev->mt76.tx_worker); 1466 1467 return ret; 1468 } 1469 1470 static void 1471 mt7915_mac_full_reset(struct mt7915_dev *dev) 1472 { 1473 struct mt76_phy *ext_phy; 1474 int i; 1475 1476 ext_phy = dev->mt76.phys[MT_BAND1]; 1477 1478 dev->recovery.hw_full_reset = true; 1479 1480 wake_up(&dev->mt76.mcu.wait); 1481 ieee80211_stop_queues(mt76_hw(dev)); 1482 if (ext_phy) 1483 ieee80211_stop_queues(ext_phy->hw); 1484 1485 cancel_delayed_work_sync(&dev->mphy.mac_work); 1486 if (ext_phy) 1487 cancel_delayed_work_sync(&ext_phy->mac_work); 1488 1489 mutex_lock(&dev->mt76.mutex); 1490 for (i = 0; i < 10; i++) { 1491 if (!mt7915_mac_restart(dev)) 1492 break; 1493 } 1494 mutex_unlock(&dev->mt76.mutex); 1495 1496 if (i == 10) 1497 dev_err(dev->mt76.dev, "chip full reset failed\n"); 1498 1499 ieee80211_restart_hw(mt76_hw(dev)); 1500 if (ext_phy) 1501 ieee80211_restart_hw(ext_phy->hw); 1502 1503 ieee80211_wake_queues(mt76_hw(dev)); 1504 if (ext_phy) 1505 ieee80211_wake_queues(ext_phy->hw); 1506 1507 dev->recovery.hw_full_reset = false; 1508 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1509 MT7915_WATCHDOG_TIME); 1510 if (ext_phy) 1511 ieee80211_queue_delayed_work(ext_phy->hw, 1512 &ext_phy->mac_work, 1513 MT7915_WATCHDOG_TIME); 1514 } 1515 1516 /* system error recovery */ 1517 void mt7915_mac_reset_work(struct work_struct *work) 1518 { 1519 struct mt7915_phy *phy2; 1520 struct mt76_phy *ext_phy; 1521 struct mt7915_dev *dev; 1522 int i; 1523 1524 dev = container_of(work, struct mt7915_dev, reset_work); 1525 ext_phy = dev->mt76.phys[MT_BAND1]; 1526 phy2 = ext_phy ? ext_phy->priv : NULL; 1527 1528 /* chip full reset */ 1529 if (dev->recovery.restart) { 1530 /* disable WA/WM WDT */ 1531 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 1532 MT_MCU_CMD_WDT_MASK); 1533 1534 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 1535 dev->recovery.wa_reset_count++; 1536 else 1537 dev->recovery.wm_reset_count++; 1538 1539 mt7915_mac_full_reset(dev); 1540 1541 /* enable mcu irq */ 1542 mt7915_irq_enable(dev, MT_INT_MCU_CMD); 1543 mt7915_irq_disable(dev, 0); 1544 1545 /* enable WA/WM WDT */ 1546 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 1547 1548 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 1549 dev->recovery.restart = false; 1550 return; 1551 } 1552 1553 /* chip partial reset */ 1554 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 1555 return; 1556 1557 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 1558 mtk_wed_device_stop(&dev->mt76.mmio.wed); 1559 if (!is_mt798x(&dev->mt76)) 1560 mt76_wr(dev, MT_INT_WED_MASK_CSR, 0); 1561 } 1562 1563 ieee80211_stop_queues(mt76_hw(dev)); 1564 if (ext_phy) 1565 ieee80211_stop_queues(ext_phy->hw); 1566 1567 set_bit(MT76_RESET, &dev->mphy.state); 1568 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1569 wake_up(&dev->mt76.mcu.wait); 1570 cancel_delayed_work_sync(&dev->mphy.mac_work); 1571 if (phy2) { 1572 set_bit(MT76_RESET, &phy2->mt76->state); 1573 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1574 } 1575 mt76_worker_disable(&dev->mt76.tx_worker); 1576 mt76_for_each_q_rx(&dev->mt76, i) 1577 napi_disable(&dev->mt76.napi[i]); 1578 napi_disable(&dev->mt76.tx_napi); 1579 1580 mutex_lock(&dev->mt76.mutex); 1581 1582 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1583 1584 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1585 mt7915_dma_reset(dev, false); 1586 1587 mt76_connac2_tx_token_put(&dev->mt76); 1588 idr_init(&dev->mt76.token); 1589 1590 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1591 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1592 } 1593 1594 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1595 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1596 1597 /* enable DMA Tx/Rx and interrupt */ 1598 mt7915_dma_start(dev, false, false); 1599 1600 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1601 clear_bit(MT76_RESET, &dev->mphy.state); 1602 if (phy2) 1603 clear_bit(MT76_RESET, &phy2->mt76->state); 1604 1605 local_bh_disable(); 1606 mt76_for_each_q_rx(&dev->mt76, i) { 1607 napi_enable(&dev->mt76.napi[i]); 1608 napi_schedule(&dev->mt76.napi[i]); 1609 } 1610 local_bh_enable(); 1611 1612 tasklet_schedule(&dev->mt76.irq_tasklet); 1613 1614 mt76_worker_enable(&dev->mt76.tx_worker); 1615 1616 local_bh_disable(); 1617 napi_enable(&dev->mt76.tx_napi); 1618 napi_schedule(&dev->mt76.tx_napi); 1619 local_bh_enable(); 1620 1621 ieee80211_wake_queues(mt76_hw(dev)); 1622 if (ext_phy) 1623 ieee80211_wake_queues(ext_phy->hw); 1624 1625 mutex_unlock(&dev->mt76.mutex); 1626 1627 mt7915_update_beacons(dev); 1628 1629 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1630 MT7915_WATCHDOG_TIME); 1631 if (phy2) 1632 ieee80211_queue_delayed_work(ext_phy->hw, 1633 &phy2->mt76->mac_work, 1634 MT7915_WATCHDOG_TIME); 1635 } 1636 1637 /* firmware coredump */ 1638 void mt7915_mac_dump_work(struct work_struct *work) 1639 { 1640 const struct mt7915_mem_region *mem_region; 1641 struct mt7915_crash_data *crash_data; 1642 struct mt7915_dev *dev; 1643 struct mt7915_mem_hdr *hdr; 1644 size_t buf_len; 1645 int i; 1646 u32 num; 1647 u8 *buf; 1648 1649 dev = container_of(work, struct mt7915_dev, dump_work); 1650 1651 mutex_lock(&dev->dump_mutex); 1652 1653 crash_data = mt7915_coredump_new(dev); 1654 if (!crash_data) { 1655 mutex_unlock(&dev->dump_mutex); 1656 goto skip_coredump; 1657 } 1658 1659 mem_region = mt7915_coredump_get_mem_layout(dev, &num); 1660 if (!mem_region || !crash_data->memdump_buf_len) { 1661 mutex_unlock(&dev->dump_mutex); 1662 goto skip_memdump; 1663 } 1664 1665 buf = crash_data->memdump_buf; 1666 buf_len = crash_data->memdump_buf_len; 1667 1668 /* dumping memory content... */ 1669 memset(buf, 0, buf_len); 1670 for (i = 0; i < num; i++) { 1671 if (mem_region->len > buf_len) { 1672 dev_warn(dev->mt76.dev, "%s len %lu is too large\n", 1673 mem_region->name, 1674 (unsigned long)mem_region->len); 1675 break; 1676 } 1677 1678 /* reserve space for the header */ 1679 hdr = (void *)buf; 1680 buf += sizeof(*hdr); 1681 buf_len -= sizeof(*hdr); 1682 1683 mt7915_memcpy_fromio(dev, buf, mem_region->start, 1684 mem_region->len); 1685 1686 hdr->start = mem_region->start; 1687 hdr->len = mem_region->len; 1688 1689 if (!mem_region->len) 1690 /* note: the header remains, just with zero length */ 1691 break; 1692 1693 buf += mem_region->len; 1694 buf_len -= mem_region->len; 1695 1696 mem_region++; 1697 } 1698 1699 mutex_unlock(&dev->dump_mutex); 1700 1701 skip_memdump: 1702 mt7915_coredump_submit(dev); 1703 skip_coredump: 1704 queue_work(dev->mt76.wq, &dev->reset_work); 1705 } 1706 1707 void mt7915_reset(struct mt7915_dev *dev) 1708 { 1709 if (!dev->recovery.hw_init_done) 1710 return; 1711 1712 if (dev->recovery.hw_full_reset) 1713 return; 1714 1715 /* wm/wa exception: do full recovery */ 1716 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 1717 dev->recovery.restart = true; 1718 dev_info(dev->mt76.dev, 1719 "%s indicated firmware crash, attempting recovery\n", 1720 wiphy_name(dev->mt76.hw->wiphy)); 1721 1722 mt7915_irq_disable(dev, MT_INT_MCU_CMD); 1723 queue_work(dev->mt76.wq, &dev->dump_work); 1724 return; 1725 } 1726 1727 queue_work(dev->mt76.wq, &dev->reset_work); 1728 wake_up(&dev->reset_wait); 1729 } 1730 1731 void mt7915_mac_update_stats(struct mt7915_phy *phy) 1732 { 1733 struct mt76_mib_stats *mib = &phy->mib; 1734 struct mt7915_dev *dev = phy->dev; 1735 int i, aggr0 = 0, aggr1, cnt; 1736 u8 band = phy->mt76->band_idx; 1737 u32 val; 1738 1739 cnt = mt76_rr(dev, MT_MIB_SDR3(band)); 1740 mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? 1741 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) : 1742 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt); 1743 1744 cnt = mt76_rr(dev, MT_MIB_SDR4(band)); 1745 mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt); 1746 1747 cnt = mt76_rr(dev, MT_MIB_SDR5(band)); 1748 mib->rx_mpdu_cnt += cnt; 1749 1750 cnt = mt76_rr(dev, MT_MIB_SDR6(band)); 1751 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 1752 1753 cnt = mt76_rr(dev, MT_MIB_SDR7(band)); 1754 mib->rx_vector_mismatch_cnt += 1755 FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt); 1756 1757 cnt = mt76_rr(dev, MT_MIB_SDR8(band)); 1758 mib->rx_delimiter_fail_cnt += 1759 FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt); 1760 1761 cnt = mt76_rr(dev, MT_MIB_SDR10(band)); 1762 mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ? 1763 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) : 1764 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt); 1765 1766 cnt = mt76_rr(dev, MT_MIB_SDR11(band)); 1767 mib->rx_len_mismatch_cnt += 1768 FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt); 1769 1770 cnt = mt76_rr(dev, MT_MIB_SDR12(band)); 1771 mib->tx_ampdu_cnt += cnt; 1772 1773 cnt = mt76_rr(dev, MT_MIB_SDR13(band)); 1774 mib->tx_stop_q_empty_cnt += 1775 FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt); 1776 1777 cnt = mt76_rr(dev, MT_MIB_SDR14(band)); 1778 mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ? 1779 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) : 1780 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt); 1781 1782 cnt = mt76_rr(dev, MT_MIB_SDR15(band)); 1783 mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ? 1784 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) : 1785 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt); 1786 1787 cnt = mt76_rr(dev, MT_MIB_SDR16(band)); 1788 mib->primary_cca_busy_time += 1789 FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt); 1790 1791 cnt = mt76_rr(dev, MT_MIB_SDR17(band)); 1792 mib->secondary_cca_busy_time += 1793 FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt); 1794 1795 cnt = mt76_rr(dev, MT_MIB_SDR18(band)); 1796 mib->primary_energy_detect_time += 1797 FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt); 1798 1799 cnt = mt76_rr(dev, MT_MIB_SDR19(band)); 1800 mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt); 1801 1802 cnt = mt76_rr(dev, MT_MIB_SDR20(band)); 1803 mib->ofdm_mdrdy_time += 1804 FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt); 1805 1806 cnt = mt76_rr(dev, MT_MIB_SDR21(band)); 1807 mib->green_mdrdy_time += 1808 FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt); 1809 1810 cnt = mt76_rr(dev, MT_MIB_SDR22(band)); 1811 mib->rx_ampdu_cnt += cnt; 1812 1813 cnt = mt76_rr(dev, MT_MIB_SDR23(band)); 1814 mib->rx_ampdu_bytes_cnt += cnt; 1815 1816 cnt = mt76_rr(dev, MT_MIB_SDR24(band)); 1817 mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ? 1818 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) : 1819 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt); 1820 1821 cnt = mt76_rr(dev, MT_MIB_SDR25(band)); 1822 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 1823 1824 cnt = mt76_rr(dev, MT_MIB_SDR27(band)); 1825 mib->tx_rwp_fail_cnt += 1826 FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt); 1827 1828 cnt = mt76_rr(dev, MT_MIB_SDR28(band)); 1829 mib->tx_rwp_need_cnt += 1830 FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt); 1831 1832 cnt = mt76_rr(dev, MT_MIB_SDR29(band)); 1833 mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ? 1834 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) : 1835 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt); 1836 1837 cnt = mt76_rr(dev, MT_MIB_SDRVEC(band)); 1838 mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ? 1839 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) : 1840 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt); 1841 1842 cnt = mt76_rr(dev, MT_MIB_SDR31(band)); 1843 mib->rx_ba_cnt += cnt; 1844 1845 cnt = mt76_rr(dev, MT_MIB_SDRMUBF(band)); 1846 mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt); 1847 1848 cnt = mt76_rr(dev, MT_MIB_DR8(band)); 1849 mib->tx_mu_mpdu_cnt += cnt; 1850 1851 cnt = mt76_rr(dev, MT_MIB_DR9(band)); 1852 mib->tx_mu_acked_mpdu_cnt += cnt; 1853 1854 cnt = mt76_rr(dev, MT_MIB_DR11(band)); 1855 mib->tx_su_acked_mpdu_cnt += cnt; 1856 1857 cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(band)); 1858 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt); 1859 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt); 1860 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt); 1861 1862 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 1863 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 1864 mib->tx_amsdu[i] += cnt; 1865 mib->tx_amsdu_cnt += cnt; 1866 } 1867 1868 if (is_mt7915(&dev->mt76)) { 1869 for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) { 1870 val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 4))); 1871 mib->ba_miss_cnt += 1872 FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1873 mib->ack_fail_cnt += 1874 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1875 1876 val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 4))); 1877 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1878 mib->rts_retries_cnt += 1879 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1880 1881 val = mt76_rr(dev, MT_TX_AGG_CNT(band, i)); 1882 phy->mt76->aggr_stats[aggr0++] += val & 0xffff; 1883 phy->mt76->aggr_stats[aggr0++] += val >> 16; 1884 1885 val = mt76_rr(dev, MT_TX_AGG_CNT2(band, i)); 1886 phy->mt76->aggr_stats[aggr1++] += val & 0xffff; 1887 phy->mt76->aggr_stats[aggr1++] += val >> 16; 1888 } 1889 1890 cnt = mt76_rr(dev, MT_MIB_SDR32(band)); 1891 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1892 1893 cnt = mt76_rr(dev, MT_MIB_SDR33(band)); 1894 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt); 1895 1896 cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(band)); 1897 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt); 1898 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt); 1899 1900 cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(band)); 1901 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt); 1902 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt); 1903 1904 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(band)); 1905 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt); 1906 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt); 1907 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt); 1908 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt); 1909 } else { 1910 for (i = 0; i < 2; i++) { 1911 /* rts count */ 1912 val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 2))); 1913 mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val); 1914 mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val); 1915 1916 /* rts retry count */ 1917 val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 2))); 1918 mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val); 1919 mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val); 1920 1921 /* ba miss count */ 1922 val = mt76_rr(dev, MT_MIB_MB_SDR2(band, (i << 2))); 1923 mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val); 1924 mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val); 1925 1926 /* ack fail count */ 1927 val = mt76_rr(dev, MT_MIB_MB_BFTF(band, (i << 2))); 1928 mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val); 1929 mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val); 1930 } 1931 1932 for (i = 0; i < 8; i++) { 1933 val = mt76_rr(dev, MT_TX_AGG_CNT(band, i)); 1934 phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val); 1935 phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val); 1936 } 1937 1938 cnt = mt76_rr(dev, MT_MIB_SDR32(band)); 1939 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1940 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1941 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1942 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1943 1944 cnt = mt76_rr(dev, MT_MIB_BFCR7(band)); 1945 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt); 1946 1947 cnt = mt76_rr(dev, MT_MIB_BFCR2(band)); 1948 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt); 1949 1950 cnt = mt76_rr(dev, MT_MIB_BFCR0(band)); 1951 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1952 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1953 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1954 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1955 1956 cnt = mt76_rr(dev, MT_MIB_BFCR1(band)); 1957 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1958 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1959 } 1960 } 1961 1962 static void mt7915_mac_severe_check(struct mt7915_phy *phy) 1963 { 1964 struct mt7915_dev *dev = phy->dev; 1965 u32 trb; 1966 1967 if (!phy->omac_mask) 1968 return; 1969 1970 /* In rare cases, TRB pointers might be out of sync leads to RMAC 1971 * stopping Rx, so check status periodically to see if TRB hardware 1972 * requires minimal recovery. 1973 */ 1974 trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->mt76->band_idx)); 1975 1976 if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) != 1977 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) && 1978 (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) != 1979 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) && 1980 trb == phy->trb_ts) 1981 mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT, 1982 phy->mt76->band_idx); 1983 1984 phy->trb_ts = trb; 1985 } 1986 1987 void mt7915_mac_sta_rc_work(struct work_struct *work) 1988 { 1989 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); 1990 struct ieee80211_sta *sta; 1991 struct ieee80211_vif *vif; 1992 struct mt7915_sta *msta; 1993 u32 changed; 1994 #if defined(__linux__) 1995 LIST_HEAD(list); 1996 #elif defined(__FreeBSD__) 1997 LINUX_LIST_HEAD(list); 1998 #endif 1999 2000 spin_lock_bh(&dev->mt76.sta_poll_lock); 2001 list_splice_init(&dev->sta_rc_list, &list); 2002 2003 while (!list_empty(&list)) { 2004 msta = list_first_entry(&list, struct mt7915_sta, rc_list); 2005 list_del_init(&msta->rc_list); 2006 changed = msta->changed; 2007 msta->changed = 0; 2008 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2009 2010 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 2011 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 2012 2013 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2014 IEEE80211_RC_NSS_CHANGED | 2015 IEEE80211_RC_BW_CHANGED)) 2016 mt7915_mcu_add_rate_ctrl(dev, vif, sta, true); 2017 2018 if (changed & IEEE80211_RC_SMPS_CHANGED) 2019 mt7915_mcu_add_smps(dev, vif, sta); 2020 2021 spin_lock_bh(&dev->mt76.sta_poll_lock); 2022 } 2023 2024 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2025 } 2026 2027 void mt7915_mac_work(struct work_struct *work) 2028 { 2029 struct mt7915_phy *phy; 2030 struct mt76_phy *mphy; 2031 2032 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2033 mac_work.work); 2034 phy = mphy->priv; 2035 2036 mutex_lock(&mphy->dev->mutex); 2037 2038 mt76_update_survey(mphy); 2039 if (++mphy->mac_work_count == 5) { 2040 mphy->mac_work_count = 0; 2041 2042 mt7915_mac_update_stats(phy); 2043 mt7915_mac_severe_check(phy); 2044 2045 if (phy->dev->muru_debug) 2046 mt7915_mcu_muru_debug_get(phy); 2047 } 2048 2049 mutex_unlock(&mphy->dev->mutex); 2050 2051 mt76_tx_status_check(mphy->dev, false); 2052 2053 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2054 MT7915_WATCHDOG_TIME); 2055 } 2056 2057 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) 2058 { 2059 struct mt7915_dev *dev = phy->dev; 2060 2061 if (phy->rdd_state & BIT(0)) 2062 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, 2063 MT_RX_SEL0, 0); 2064 if (phy->rdd_state & BIT(1)) 2065 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, 2066 MT_RX_SEL0, 0); 2067 } 2068 2069 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) 2070 { 2071 int err, region; 2072 2073 switch (dev->mt76.region) { 2074 case NL80211_DFS_ETSI: 2075 region = 0; 2076 break; 2077 case NL80211_DFS_JP: 2078 region = 2; 2079 break; 2080 case NL80211_DFS_FCC: 2081 default: 2082 region = 1; 2083 break; 2084 } 2085 2086 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, 2087 MT_RX_SEL0, region); 2088 if (err < 0) 2089 return err; 2090 2091 if (is_mt7915(&dev->mt76)) { 2092 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain, 2093 0, dev->dbdc_support ? 2 : 0); 2094 if (err < 0) 2095 return err; 2096 } 2097 2098 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, 2099 MT_RX_SEL0, 1); 2100 } 2101 2102 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) 2103 { 2104 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2105 struct mt7915_dev *dev = phy->dev; 2106 int err; 2107 2108 /* start CAC */ 2109 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, 2110 phy->mt76->band_idx, MT_RX_SEL0, 0); 2111 if (err < 0) 2112 return err; 2113 2114 err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx); 2115 if (err < 0) 2116 return err; 2117 2118 phy->rdd_state |= BIT(phy->mt76->band_idx); 2119 2120 if (!is_mt7915(&dev->mt76)) 2121 return 0; 2122 2123 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2124 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2125 err = mt7915_dfs_start_rdd(dev, 1); 2126 if (err < 0) 2127 return err; 2128 2129 phy->rdd_state |= BIT(1); 2130 } 2131 2132 return 0; 2133 } 2134 2135 static int 2136 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy) 2137 { 2138 const struct mt7915_dfs_radar_spec *radar_specs; 2139 struct mt7915_dev *dev = phy->dev; 2140 int err, i; 2141 2142 switch (dev->mt76.region) { 2143 case NL80211_DFS_FCC: 2144 radar_specs = &fcc_radar_specs; 2145 err = mt7915_mcu_set_fcc5_lpn(dev, 8); 2146 if (err < 0) 2147 return err; 2148 break; 2149 case NL80211_DFS_ETSI: 2150 radar_specs = &etsi_radar_specs; 2151 break; 2152 case NL80211_DFS_JP: 2153 radar_specs = &jp_radar_specs; 2154 break; 2155 default: 2156 return -EINVAL; 2157 } 2158 2159 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2160 err = mt7915_mcu_set_radar_th(dev, i, 2161 &radar_specs->radar_pattern[i]); 2162 if (err < 0) 2163 return err; 2164 } 2165 2166 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2167 } 2168 2169 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) 2170 { 2171 struct mt7915_dev *dev = phy->dev; 2172 enum mt76_dfs_state dfs_state, prev_state; 2173 int err; 2174 2175 prev_state = phy->mt76->dfs_state; 2176 dfs_state = mt76_phy_dfs_state(phy->mt76); 2177 2178 if (prev_state == dfs_state) 2179 return 0; 2180 2181 if (prev_state == MT_DFS_STATE_UNKNOWN) 2182 mt7915_dfs_stop_radar_detector(phy); 2183 2184 if (dfs_state == MT_DFS_STATE_DISABLED) 2185 goto stop; 2186 2187 if (prev_state <= MT_DFS_STATE_DISABLED) { 2188 err = mt7915_dfs_init_radar_specs(phy); 2189 if (err < 0) 2190 return err; 2191 2192 err = mt7915_dfs_start_radar_detector(phy); 2193 if (err < 0) 2194 return err; 2195 2196 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2197 } 2198 2199 if (dfs_state == MT_DFS_STATE_CAC) 2200 return 0; 2201 2202 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, 2203 phy->mt76->band_idx, MT_RX_SEL0, 0); 2204 if (err < 0) { 2205 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2206 return err; 2207 } 2208 2209 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2210 return 0; 2211 2212 stop: 2213 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, 2214 phy->mt76->band_idx, MT_RX_SEL0, 0); 2215 if (err < 0) 2216 return err; 2217 2218 if (is_mt7915(&dev->mt76)) { 2219 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, 2220 phy->mt76->band_idx, 0, 2221 dev->dbdc_support ? 2 : 0); 2222 if (err < 0) 2223 return err; 2224 } 2225 2226 mt7915_dfs_stop_radar_detector(phy); 2227 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2228 2229 return 0; 2230 } 2231 2232 static int 2233 mt7915_mac_twt_duration_align(int duration) 2234 { 2235 return duration << 8; 2236 } 2237 2238 static u64 2239 mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev, 2240 struct mt7915_twt_flow *flow) 2241 { 2242 struct mt7915_twt_flow *iter, *iter_next; 2243 u32 duration = flow->duration << 8; 2244 u64 start_tsf; 2245 2246 iter = list_first_entry_or_null(&dev->twt_list, 2247 struct mt7915_twt_flow, list); 2248 if (!iter || !iter->sched || iter->start_tsf > duration) { 2249 /* add flow as first entry in the list */ 2250 list_add(&flow->list, &dev->twt_list); 2251 return 0; 2252 } 2253 2254 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2255 start_tsf = iter->start_tsf + 2256 mt7915_mac_twt_duration_align(iter->duration); 2257 if (list_is_last(&iter->list, &dev->twt_list)) 2258 break; 2259 2260 if (!iter_next->sched || 2261 iter_next->start_tsf > start_tsf + duration) { 2262 list_add(&flow->list, &iter->list); 2263 goto out; 2264 } 2265 } 2266 2267 /* add flow as last entry in the list */ 2268 list_add_tail(&flow->list, &dev->twt_list); 2269 out: 2270 return start_tsf; 2271 } 2272 2273 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2274 { 2275 struct ieee80211_twt_params *twt_agrt; 2276 u64 interval, duration; 2277 u16 mantissa; 2278 u8 exp; 2279 2280 /* only individual agreement supported */ 2281 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2282 return -EOPNOTSUPP; 2283 2284 /* only 256us unit supported */ 2285 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2286 return -EOPNOTSUPP; 2287 2288 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2289 2290 /* explicit agreement not supported */ 2291 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2292 return -EOPNOTSUPP; 2293 2294 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2295 le16_to_cpu(twt_agrt->req_type)); 2296 mantissa = le16_to_cpu(twt_agrt->mantissa); 2297 duration = twt_agrt->min_twt_dur << 8; 2298 2299 interval = (u64)mantissa << exp; 2300 if (interval < duration) 2301 return -EOPNOTSUPP; 2302 2303 return 0; 2304 } 2305 2306 static bool 2307 mt7915_mac_twt_param_equal(struct mt7915_sta *msta, 2308 struct ieee80211_twt_params *twt_agrt) 2309 { 2310 u16 type = le16_to_cpu(twt_agrt->req_type); 2311 u8 exp; 2312 int i; 2313 2314 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2315 for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) { 2316 struct mt7915_twt_flow *f; 2317 2318 if (!(msta->twt.flowid_mask & BIT(i))) 2319 continue; 2320 2321 f = &msta->twt.flow[i]; 2322 if (f->duration == twt_agrt->min_twt_dur && 2323 f->mantissa == twt_agrt->mantissa && 2324 f->exp == exp && 2325 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2326 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2327 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2328 return true; 2329 } 2330 2331 return false; 2332 } 2333 2334 void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, 2335 struct ieee80211_sta *sta, 2336 struct ieee80211_twt_setup *twt) 2337 { 2338 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2339 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 2340 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2341 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2342 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2343 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2344 struct mt7915_twt_flow *flow; 2345 int flowid, table_id; 2346 u8 exp; 2347 2348 if (mt7915_mac_check_twt_req(twt)) 2349 goto out; 2350 2351 mutex_lock(&dev->mt76.mutex); 2352 2353 if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT) 2354 goto unlock; 2355 2356 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2357 goto unlock; 2358 2359 if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) { 2360 setup_cmd = TWT_SETUP_CMD_DICTATE; 2361 twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR; 2362 goto unlock; 2363 } 2364 2365 flowid = ffs(~msta->twt.flowid_mask) - 1; 2366 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 2367 twt_agrt->req_type |= le16_encode_bits(flowid, 2368 IEEE80211_TWT_REQTYPE_FLOWID); 2369 2370 table_id = ffs(~dev->twt.table_mask) - 1; 2371 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2372 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2373 2374 if (mt7915_mac_twt_param_equal(msta, twt_agrt)) 2375 goto unlock; 2376 2377 flow = &msta->twt.flow[flowid]; 2378 memset(flow, 0, sizeof(*flow)); 2379 INIT_LIST_HEAD(&flow->list); 2380 flow->wcid = msta->wcid.idx; 2381 flow->table_id = table_id; 2382 flow->id = flowid; 2383 flow->duration = twt_agrt->min_twt_dur; 2384 flow->mantissa = twt_agrt->mantissa; 2385 flow->exp = exp; 2386 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2387 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2388 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2389 2390 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2391 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2392 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2393 u64 flow_tsf, curr_tsf; 2394 u32 rem; 2395 2396 flow->sched = true; 2397 flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow); 2398 curr_tsf = __mt7915_get_tsf(hw, msta->vif); 2399 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2400 flow_tsf = curr_tsf + interval - rem; 2401 twt_agrt->twt = cpu_to_le64(flow_tsf); 2402 } else { 2403 list_add_tail(&flow->list, &dev->twt_list); 2404 } 2405 flow->tsf = le64_to_cpu(twt_agrt->twt); 2406 2407 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2408 goto unlock; 2409 2410 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2411 dev->twt.table_mask |= BIT(table_id); 2412 msta->twt.flowid_mask |= BIT(flowid); 2413 dev->twt.n_agrt++; 2414 2415 unlock: 2416 mutex_unlock(&dev->mt76.mutex); 2417 out: 2418 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 2419 twt_agrt->req_type |= 2420 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 2421 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2422 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2423 } 2424 2425 void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, 2426 struct mt7915_sta *msta, 2427 u8 flowid) 2428 { 2429 struct mt7915_twt_flow *flow; 2430 2431 lockdep_assert_held(&dev->mt76.mutex); 2432 2433 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2434 return; 2435 2436 if (!(msta->twt.flowid_mask & BIT(flowid))) 2437 return; 2438 2439 flow = &msta->twt.flow[flowid]; 2440 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, 2441 MCU_TWT_AGRT_DELETE)) 2442 return; 2443 2444 list_del_init(&flow->list); 2445 msta->twt.flowid_mask &= ~BIT(flowid); 2446 dev->twt.table_mask &= ~BIT(flow->table_id); 2447 dev->twt.n_agrt--; 2448 } 2449