1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2019 MediaTek Inc. 3 * 4 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Roy Luo <royluo@google.com> 6 * Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 */ 9 10 #include <linux/devcoredump.h> 11 #include <linux/etherdevice.h> 12 #include <linux/timekeeping.h> 13 #include "mt7615.h" 14 #include "../trace.h" 15 #include "../dma.h" 16 #include "mt7615_trace.h" 17 #include "mac.h" 18 #include "mcu.h" 19 20 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 21 22 static const struct mt7615_dfs_radar_spec etsi_radar_specs = { 23 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 24 .radar_pattern = { 25 [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 }, 26 [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 }, 27 [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 }, 28 [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 }, 29 [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 }, 30 [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 }, 31 [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 }, 32 [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 }, 33 }, 34 }; 35 36 static const struct mt7615_dfs_radar_spec fcc_radar_specs = { 37 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 38 .radar_pattern = { 39 [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 }, 40 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 41 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 42 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 43 [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 }, 44 }, 45 }; 46 47 static const struct mt7615_dfs_radar_spec jp_radar_specs = { 48 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 49 .radar_pattern = { 50 [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 }, 51 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 52 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 53 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 54 [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 }, 55 [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 }, 56 [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 }, 57 }, 58 }; 59 60 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev, 61 u8 idx, bool unicast) 62 { 63 struct mt7615_sta *sta; 64 struct mt76_wcid *wcid; 65 66 if (idx >= MT7615_WTBL_SIZE) 67 return NULL; 68 69 wcid = rcu_dereference(dev->mt76.wcid[idx]); 70 if (unicast || !wcid) 71 return wcid; 72 73 if (!wcid->sta) 74 return NULL; 75 76 sta = container_of(wcid, struct mt7615_sta, wcid); 77 if (!sta->vif) 78 return NULL; 79 80 return &sta->vif->sta.wcid; 81 } 82 83 void mt7615_mac_reset_counters(struct mt7615_dev *dev) 84 { 85 int i; 86 87 for (i = 0; i < 4; i++) { 88 mt76_rr(dev, MT_TX_AGG_CNT(0, i)); 89 mt76_rr(dev, MT_TX_AGG_CNT(1, i)); 90 } 91 92 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); 93 dev->mt76.phy.survey_time = ktime_get_boottime(); 94 if (dev->mt76.phy2) 95 dev->mt76.phy2->survey_time = ktime_get_boottime(); 96 97 /* reset airtime counters */ 98 mt76_rr(dev, MT_MIB_SDR9(0)); 99 mt76_rr(dev, MT_MIB_SDR9(1)); 100 101 mt76_rr(dev, MT_MIB_SDR36(0)); 102 mt76_rr(dev, MT_MIB_SDR36(1)); 103 104 mt76_rr(dev, MT_MIB_SDR37(0)); 105 mt76_rr(dev, MT_MIB_SDR37(1)); 106 107 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 108 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 109 } 110 111 void mt7615_mac_set_timing(struct mt7615_phy *phy) 112 { 113 s16 coverage_class = phy->coverage_class; 114 struct mt7615_dev *dev = phy->dev; 115 bool ext_phy = phy != &dev->phy; 116 u32 val, reg_offset; 117 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 118 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 119 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 120 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 121 int sifs, offset; 122 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; 123 124 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 125 return; 126 127 if (is_5ghz) 128 sifs = 16; 129 else 130 sifs = 10; 131 132 if (ext_phy) { 133 coverage_class = max_t(s16, dev->phy.coverage_class, 134 coverage_class); 135 mt76_set(dev, MT_ARB_SCR, 136 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 137 } else { 138 struct mt7615_phy *phy_ext = mt7615_ext_phy(dev); 139 140 if (phy_ext) 141 coverage_class = max_t(s16, phy_ext->coverage_class, 142 coverage_class); 143 mt76_set(dev, MT_ARB_SCR, 144 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 145 } 146 udelay(1); 147 148 offset = 3 * coverage_class; 149 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 150 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 151 mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset); 152 mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset); 153 154 mt76_wr(dev, MT_TMAC_ICR(ext_phy), 155 FIELD_PREP(MT_IFS_EIFS, 360) | 156 FIELD_PREP(MT_IFS_RIFS, 2) | 157 FIELD_PREP(MT_IFS_SIFS, sifs) | 158 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 159 160 if (phy->slottime < 20 || is_5ghz) 161 val = MT7615_CFEND_RATE_DEFAULT; 162 else 163 val = MT7615_CFEND_RATE_11B; 164 165 mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val); 166 if (ext_phy) 167 mt76_clear(dev, MT_ARB_SCR, 168 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 169 else 170 mt76_clear(dev, MT_ARB_SCR, 171 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 172 173 } 174 175 static void 176 mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy, 177 struct mt76_rx_status *status, u8 chfreq) 178 { 179 if (!test_bit(MT76_HW_SCANNING, &mphy->state) && 180 !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) && 181 !test_bit(MT76_STATE_ROC, &mphy->state)) { 182 status->freq = mphy->chandef.chan->center_freq; 183 status->band = mphy->chandef.chan->band; 184 return; 185 } 186 187 status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 188 status->freq = ieee80211_channel_to_frequency(chfreq, status->band); 189 } 190 191 static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv) 192 { 193 #ifdef CONFIG_NL80211_TESTMODE 194 u32 rxv1 = le32_to_cpu(rxv[0]); 195 u32 rxv3 = le32_to_cpu(rxv[2]); 196 u32 rxv4 = le32_to_cpu(rxv[3]); 197 u32 rxv5 = le32_to_cpu(rxv[4]); 198 u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1); 199 u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1); 200 s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5); 201 u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000; 202 203 if (!mode) { 204 /* CCK */ 205 foe &= ~BIT(11); 206 foe *= 1000; 207 foe >>= 11; 208 } else { 209 if (foe > 2048) 210 foe -= 4096; 211 212 foe = (foe * foe_const) >> 15; 213 } 214 215 phy->test.last_freq_offset = foe; 216 phy->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4); 217 phy->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4); 218 phy->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4); 219 phy->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4); 220 phy->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3); 221 phy->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3); 222 #endif 223 } 224 225 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) 226 { 227 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 228 struct mt76_phy *mphy = &dev->mt76.phy; 229 struct mt7615_phy *phy = &dev->phy; 230 struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL; 231 struct ieee80211_supported_band *sband; 232 struct ieee80211_hdr *hdr; 233 __le32 *rxd = (__le32 *)skb->data; 234 u32 rxd0 = le32_to_cpu(rxd[0]); 235 u32 rxd1 = le32_to_cpu(rxd[1]); 236 u32 rxd2 = le32_to_cpu(rxd[2]); 237 __le32 rxd12 = rxd[12]; 238 bool unicast, remove_pad, insert_ccmp_hdr = false; 239 int phy_idx; 240 int i, idx; 241 u8 chfreq; 242 243 memset(status, 0, sizeof(*status)); 244 245 chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 246 if (!phy2) 247 phy_idx = 0; 248 else if (phy2->chfreq == phy->chfreq) 249 phy_idx = -1; 250 else if (phy->chfreq == chfreq) 251 phy_idx = 0; 252 else if (phy2->chfreq == chfreq) 253 phy_idx = 1; 254 else 255 phy_idx = -1; 256 257 unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; 258 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 259 status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); 260 261 if (status->wcid) { 262 struct mt7615_sta *msta; 263 264 msta = container_of(status->wcid, struct mt7615_sta, wcid); 265 spin_lock_bh(&dev->sta_poll_lock); 266 if (list_empty(&msta->poll_list)) 267 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 268 spin_unlock_bh(&dev->sta_poll_lock); 269 } 270 271 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 272 status->flag |= RX_FLAG_FAILED_FCS_CRC; 273 274 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 275 status->flag |= RX_FLAG_MMIC_ERROR; 276 277 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 278 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 279 status->flag |= RX_FLAG_DECRYPTED; 280 status->flag |= RX_FLAG_IV_STRIPPED; 281 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 282 } 283 284 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 285 286 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 287 return -EINVAL; 288 289 rxd += 4; 290 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 291 rxd += 4; 292 if ((u8 *)rxd - skb->data >= skb->len) 293 return -EINVAL; 294 } 295 296 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 297 u8 *data = (u8 *)rxd; 298 299 if (status->flag & RX_FLAG_DECRYPTED) { 300 status->iv[0] = data[5]; 301 status->iv[1] = data[4]; 302 status->iv[2] = data[3]; 303 status->iv[3] = data[2]; 304 status->iv[4] = data[1]; 305 status->iv[5] = data[0]; 306 307 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 308 } 309 rxd += 4; 310 if ((u8 *)rxd - skb->data >= skb->len) 311 return -EINVAL; 312 } 313 314 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 315 rxd += 2; 316 if ((u8 *)rxd - skb->data >= skb->len) 317 return -EINVAL; 318 } 319 320 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 321 u32 rxdg5 = le32_to_cpu(rxd[5]); 322 323 /* 324 * If both PHYs are on the same channel and we don't have a WCID, 325 * we need to figure out which PHY this packet was received on. 326 * On the primary PHY, the noise value for the chains belonging to the 327 * second PHY will be set to the noise value of the last packet from 328 * that PHY. 329 */ 330 if (phy_idx < 0) { 331 int first_chain = ffs(phy2->mt76->chainmask) - 1; 332 333 phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0; 334 } 335 } 336 337 if (phy_idx == 1 && phy2) { 338 mphy = dev->mt76.phy2; 339 phy = phy2; 340 status->ext_phy = true; 341 } 342 343 if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq) 344 return -EINVAL; 345 346 mt7615_get_status_freq_info(dev, mphy, status, chfreq); 347 if (status->band == NL80211_BAND_5GHZ) 348 sband = &mphy->sband_5g.sband; 349 else 350 sband = &mphy->sband_2g.sband; 351 352 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 353 return -EINVAL; 354 355 if (!sband->channels) 356 return -EINVAL; 357 358 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | 359 MT_RXD2_NORMAL_NON_AMPDU))) { 360 status->flag |= RX_FLAG_AMPDU_DETAILS; 361 362 /* all subframes of an A-MPDU have the same timestamp */ 363 if (phy->rx_ampdu_ts != rxd12) { 364 if (!++phy->ampdu_ref) 365 phy->ampdu_ref++; 366 } 367 phy->rx_ampdu_ts = rxd12; 368 369 status->ampdu_ref = phy->ampdu_ref; 370 } 371 372 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 373 u32 rxdg0 = le32_to_cpu(rxd[0]); 374 u32 rxdg1 = le32_to_cpu(rxd[1]); 375 u32 rxdg3 = le32_to_cpu(rxd[3]); 376 u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 377 bool cck = false; 378 379 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 380 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 381 case MT_PHY_TYPE_CCK: 382 cck = true; 383 fallthrough; 384 case MT_PHY_TYPE_OFDM: 385 i = mt76_get_rate(&dev->mt76, sband, i, cck); 386 break; 387 case MT_PHY_TYPE_HT_GF: 388 case MT_PHY_TYPE_HT: 389 status->encoding = RX_ENC_HT; 390 if (i > 31) 391 return -EINVAL; 392 break; 393 case MT_PHY_TYPE_VHT: 394 status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1; 395 status->encoding = RX_ENC_VHT; 396 break; 397 default: 398 return -EINVAL; 399 } 400 status->rate_idx = i; 401 402 switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) { 403 case MT_PHY_BW_20: 404 break; 405 case MT_PHY_BW_40: 406 status->bw = RATE_INFO_BW_40; 407 break; 408 case MT_PHY_BW_80: 409 status->bw = RATE_INFO_BW_80; 410 break; 411 case MT_PHY_BW_160: 412 status->bw = RATE_INFO_BW_160; 413 break; 414 default: 415 return -EINVAL; 416 } 417 418 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 419 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 420 if (rxdg0 & MT_RXV1_HT_AD_CODE) 421 status->enc_flags |= RX_ENC_FLAG_LDPC; 422 423 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 424 425 status->chains = mphy->antenna_mask; 426 status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3); 427 status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3); 428 status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3); 429 status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3); 430 status->signal = status->chain_signal[0]; 431 432 for (i = 1; i < hweight8(mphy->antenna_mask); i++) { 433 if (!(status->chains & BIT(i))) 434 continue; 435 436 status->signal = max(status->signal, 437 status->chain_signal[i]); 438 } 439 440 mt7615_mac_fill_tm_rx(mphy->priv, rxd); 441 442 rxd += 6; 443 if ((u8 *)rxd - skb->data >= skb->len) 444 return -EINVAL; 445 } 446 447 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 448 449 if (insert_ccmp_hdr) { 450 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 451 452 mt76_insert_ccmp_hdr(skb, key_id); 453 } 454 455 hdr = (struct ieee80211_hdr *)skb->data; 456 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) 457 return 0; 458 459 status->aggr = unicast && 460 !ieee80211_is_qos_nullfunc(hdr->frame_control); 461 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 462 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 463 464 return 0; 465 } 466 467 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 468 { 469 } 470 EXPORT_SYMBOL_GPL(mt7615_sta_ps); 471 472 static u16 473 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, 474 struct mt76_phy *mphy, 475 const struct ieee80211_tx_rate *rate, 476 bool stbc, u8 *bw) 477 { 478 u8 phy, nss, rate_idx; 479 u16 rateval = 0; 480 481 *bw = 0; 482 483 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { 484 rate_idx = ieee80211_rate_get_vht_mcs(rate); 485 nss = ieee80211_rate_get_vht_nss(rate); 486 phy = MT_PHY_TYPE_VHT; 487 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 488 *bw = 1; 489 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) 490 *bw = 2; 491 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) 492 *bw = 3; 493 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 494 rate_idx = rate->idx; 495 nss = 1 + (rate->idx >> 3); 496 phy = MT_PHY_TYPE_HT; 497 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 498 phy = MT_PHY_TYPE_HT_GF; 499 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 500 *bw = 1; 501 } else { 502 const struct ieee80211_rate *r; 503 int band = mphy->chandef.chan->band; 504 u16 val; 505 506 nss = 1; 507 r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx]; 508 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 509 val = r->hw_value_short; 510 else 511 val = r->hw_value; 512 513 phy = val >> 8; 514 rate_idx = val & 0xff; 515 } 516 517 if (stbc && nss == 1) { 518 nss++; 519 rateval |= MT_TX_RATE_STBC; 520 } 521 522 rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 523 FIELD_PREP(MT_TX_RATE_MODE, phy) | 524 FIELD_PREP(MT_TX_RATE_NSS, nss - 1)); 525 526 return rateval; 527 } 528 529 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, 530 struct sk_buff *skb, struct mt76_wcid *wcid, 531 struct ieee80211_sta *sta, int pid, 532 struct ieee80211_key_conf *key, bool beacon) 533 { 534 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 535 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 536 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 537 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 538 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; 539 bool multicast = is_multicast_ether_addr(hdr->addr1); 540 struct ieee80211_vif *vif = info->control.vif; 541 bool is_mmio = mt76_is_mmio(&dev->mt76); 542 u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE; 543 struct mt76_phy *mphy = &dev->mphy; 544 __le16 fc = hdr->frame_control; 545 int tx_count = 8; 546 u16 seqno = 0; 547 548 if (vif) { 549 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 550 551 omac_idx = mvif->omac_idx; 552 wmm_idx = mvif->wmm_idx; 553 } 554 555 if (sta) { 556 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; 557 558 tx_count = msta->rate_count; 559 } 560 561 if (ext_phy && dev->mt76.phy2) 562 mphy = dev->mt76.phy2; 563 564 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 565 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 566 567 if (beacon) { 568 p_fmt = MT_TX_TYPE_FW; 569 q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; 570 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { 571 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 572 q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; 573 } else { 574 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 575 q_idx = wmm_idx * MT7615_MAX_WMM_SETS + 576 mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); 577 } 578 579 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | 580 FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) | 581 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 582 txwi[0] = cpu_to_le32(val); 583 584 val = MT_TXD1_LONG_FORMAT | 585 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 586 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 587 FIELD_PREP(MT_TXD1_HDR_INFO, 588 ieee80211_get_hdrlen_from_skb(skb) / 2) | 589 FIELD_PREP(MT_TXD1_TID, 590 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 591 FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) | 592 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 593 txwi[1] = cpu_to_le32(val); 594 595 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 596 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | 597 FIELD_PREP(MT_TXD2_MULTICAST, multicast); 598 if (key) { 599 if (multicast && ieee80211_is_robust_mgmt_frame(skb) && 600 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 601 val |= MT_TXD2_BIP; 602 txwi[3] = 0; 603 } else { 604 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); 605 } 606 } else { 607 txwi[3] = 0; 608 } 609 txwi[2] = cpu_to_le32(val); 610 611 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 612 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 613 614 txwi[4] = 0; 615 txwi[6] = 0; 616 617 if (rate->idx >= 0 && rate->count && 618 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 619 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 620 u8 bw; 621 u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc, 622 &bw); 623 624 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 625 626 val = MT_TXD6_FIXED_BW | 627 FIELD_PREP(MT_TXD6_BW, bw) | 628 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 629 txwi[6] |= cpu_to_le32(val); 630 631 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 632 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 633 634 if (info->flags & IEEE80211_TX_CTL_LDPC) 635 txwi[6] |= cpu_to_le32(MT_TXD6_LDPC); 636 637 if (!(rate->flags & (IEEE80211_TX_RC_MCS | 638 IEEE80211_TX_RC_VHT_MCS))) 639 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 640 641 tx_count = rate->count; 642 } 643 644 if (!ieee80211_is_beacon(fc)) { 645 struct ieee80211_hw *hw = mt76_hw(dev); 646 647 val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid); 648 if (!ieee80211_hw_check(hw, SUPPORTS_PS)) 649 val |= MT_TXD5_SW_POWER_MGMT; 650 txwi[5] = cpu_to_le32(val); 651 } else { 652 txwi[5] = 0; 653 /* use maximum tx count for beacons */ 654 tx_count = 0x1f; 655 } 656 657 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 658 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 659 seqno = le16_to_cpu(hdr->seq_ctrl); 660 661 if (ieee80211_is_back_req(hdr->frame_control)) { 662 struct ieee80211_bar *bar; 663 664 bar = (struct ieee80211_bar *)skb->data; 665 seqno = le16_to_cpu(bar->start_seq_num); 666 } 667 668 val |= MT_TXD3_SN_VALID | 669 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 670 } 671 672 txwi[3] |= cpu_to_le32(val); 673 674 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 675 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); 676 677 txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 678 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | 679 FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); 680 if (!is_mmio) 681 txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | 682 FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); 683 684 return 0; 685 } 686 EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi); 687 688 static void 689 mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp) 690 { 691 int i; 692 693 for (i = 1; i < txp->nbuf; i++) 694 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), 695 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); 696 } 697 698 static void 699 mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp) 700 { 701 u32 last_mask; 702 int i; 703 704 last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST; 705 706 for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) { 707 struct mt7615_txp_ptr *ptr = &txp->ptr[i]; 708 bool last; 709 u16 len; 710 711 len = le16_to_cpu(ptr->len0); 712 last = len & last_mask; 713 len &= MT_TXD_LEN_MASK; 714 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, 715 DMA_TO_DEVICE); 716 if (last) 717 break; 718 719 len = le16_to_cpu(ptr->len1); 720 last = len & last_mask; 721 len &= MT_TXD_LEN_MASK; 722 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, 723 DMA_TO_DEVICE); 724 if (last) 725 break; 726 } 727 } 728 729 void mt7615_txp_skb_unmap(struct mt76_dev *dev, 730 struct mt76_txwi_cache *t) 731 { 732 struct mt7615_txp_common *txp; 733 734 txp = mt7615_txwi_to_txp(dev, t); 735 if (is_mt7615(dev)) 736 mt7615_txp_skb_unmap_fw(dev, &txp->fw); 737 else 738 mt7615_txp_skb_unmap_hw(dev, &txp->hw); 739 } 740 EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap); 741 742 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask) 743 { 744 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 745 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 746 747 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 748 0, 5000); 749 } 750 751 void mt7615_mac_sta_poll(struct mt7615_dev *dev) 752 { 753 static const u8 ac_to_tid[4] = { 754 [IEEE80211_AC_BE] = 0, 755 [IEEE80211_AC_BK] = 1, 756 [IEEE80211_AC_VI] = 4, 757 [IEEE80211_AC_VO] = 6 758 }; 759 static const u8 hw_queue_map[] = { 760 [IEEE80211_AC_BK] = 0, 761 [IEEE80211_AC_BE] = 1, 762 [IEEE80211_AC_VI] = 2, 763 [IEEE80211_AC_VO] = 3, 764 }; 765 struct ieee80211_sta *sta; 766 struct mt7615_sta *msta; 767 u32 addr, tx_time[4], rx_time[4]; 768 struct list_head sta_poll_list; 769 int i; 770 771 INIT_LIST_HEAD(&sta_poll_list); 772 spin_lock_bh(&dev->sta_poll_lock); 773 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 774 spin_unlock_bh(&dev->sta_poll_lock); 775 776 while (!list_empty(&sta_poll_list)) { 777 bool clear = false; 778 779 msta = list_first_entry(&sta_poll_list, struct mt7615_sta, 780 poll_list); 781 list_del_init(&msta->poll_list); 782 783 addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; 784 785 for (i = 0; i < 4; i++, addr += 8) { 786 u32 tx_last = msta->airtime_ac[i]; 787 u32 rx_last = msta->airtime_ac[i + 4]; 788 789 msta->airtime_ac[i] = mt76_rr(dev, addr); 790 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 791 tx_time[i] = msta->airtime_ac[i] - tx_last; 792 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 793 794 if ((tx_last | rx_last) & BIT(30)) 795 clear = true; 796 } 797 798 if (clear) { 799 mt7615_mac_wtbl_update(dev, msta->wcid.idx, 800 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 801 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 802 } 803 804 if (!msta->wcid.sta) 805 continue; 806 807 sta = container_of((void *)msta, struct ieee80211_sta, 808 drv_priv); 809 for (i = 0; i < 4; i++) { 810 u32 tx_cur = tx_time[i]; 811 u32 rx_cur = rx_time[hw_queue_map[i]]; 812 u8 tid = ac_to_tid[i]; 813 814 if (!tx_cur && !rx_cur) 815 continue; 816 817 ieee80211_sta_register_airtime(sta, tid, tx_cur, 818 rx_cur); 819 } 820 } 821 } 822 EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll); 823 824 static void 825 mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta, 826 struct ieee80211_tx_rate *probe_rate, 827 struct ieee80211_tx_rate *rates, 828 struct mt7615_rate_desc *rd) 829 { 830 struct mt7615_dev *dev = phy->dev; 831 struct mt76_phy *mphy = phy->mt76; 832 struct ieee80211_tx_rate *ref; 833 bool rateset, stbc = false; 834 int n_rates = sta->n_rates; 835 u8 bw, bw_prev; 836 int i, j; 837 838 for (i = n_rates; i < 4; i++) 839 rates[i] = rates[n_rates - 1]; 840 841 rateset = !(sta->rate_set_tsf & BIT(0)); 842 memcpy(sta->rateset[rateset].rates, rates, 843 sizeof(sta->rateset[rateset].rates)); 844 if (probe_rate) { 845 sta->rateset[rateset].probe_rate = *probe_rate; 846 ref = &sta->rateset[rateset].probe_rate; 847 } else { 848 sta->rateset[rateset].probe_rate.idx = -1; 849 ref = &sta->rateset[rateset].rates[0]; 850 } 851 852 rates = sta->rateset[rateset].rates; 853 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 854 /* 855 * We don't support switching between short and long GI 856 * within the rate set. For accurate tx status reporting, we 857 * need to make sure that flags match. 858 * For improved performance, avoid duplicate entries by 859 * decrementing the MCS index if necessary 860 */ 861 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 862 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 863 864 for (j = 0; j < i; j++) { 865 if (rates[i].idx != rates[j].idx) 866 continue; 867 if ((rates[i].flags ^ rates[j].flags) & 868 (IEEE80211_TX_RC_40_MHZ_WIDTH | 869 IEEE80211_TX_RC_80_MHZ_WIDTH | 870 IEEE80211_TX_RC_160_MHZ_WIDTH)) 871 continue; 872 873 if (!rates[i].idx) 874 continue; 875 876 rates[i].idx--; 877 } 878 } 879 880 rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw); 881 bw_prev = bw; 882 883 if (probe_rate) { 884 rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate, 885 stbc, &bw); 886 if (bw) 887 rd->bw_idx = 1; 888 else 889 bw_prev = 0; 890 } else { 891 rd->probe_val = rd->val[0]; 892 } 893 894 rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw); 895 if (bw_prev) { 896 rd->bw_idx = 3; 897 bw_prev = bw; 898 } 899 900 rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw); 901 if (bw_prev) { 902 rd->bw_idx = 5; 903 bw_prev = bw; 904 } 905 906 rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw); 907 if (bw_prev) 908 rd->bw_idx = 7; 909 910 rd->rateset = rateset; 911 rd->bw = bw; 912 } 913 914 static int 915 mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, 916 struct ieee80211_tx_rate *probe_rate, 917 struct ieee80211_tx_rate *rates) 918 { 919 struct mt7615_dev *dev = phy->dev; 920 struct mt7615_wtbl_rate_desc *wrd; 921 922 if (work_pending(&dev->rate_work)) 923 return -EBUSY; 924 925 wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC); 926 if (!wrd) 927 return -ENOMEM; 928 929 wrd->sta = sta; 930 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, 931 &wrd->rate); 932 list_add_tail(&wrd->node, &dev->wrd_head); 933 queue_work(dev->mt76.wq, &dev->rate_work); 934 935 return 0; 936 } 937 938 u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) 939 { 940 u32 addr, val, val2; 941 u8 offset; 942 943 addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4; 944 945 offset = tid * 12; 946 addr += 4 * (offset / 32); 947 offset %= 32; 948 949 val = mt76_rr(dev, addr); 950 val >>= (tid % 32); 951 952 if (offset > 20) { 953 addr += 4; 954 val2 = mt76_rr(dev, addr); 955 val |= val2 << (32 - offset); 956 } 957 958 return val & GENMASK(11, 0); 959 } 960 961 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, 962 struct ieee80211_tx_rate *probe_rate, 963 struct ieee80211_tx_rate *rates) 964 { 965 int wcid = sta->wcid.idx, n_rates = sta->n_rates; 966 struct mt7615_dev *dev = phy->dev; 967 struct mt7615_rate_desc rd; 968 u32 w5, w27, addr; 969 970 if (!mt76_is_mmio(&dev->mt76)) { 971 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); 972 return; 973 } 974 975 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 976 return; 977 978 memset(&rd, 0, sizeof(struct mt7615_rate_desc)); 979 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd); 980 981 addr = mt7615_mac_wtbl_addr(dev, wcid); 982 w27 = mt76_rr(dev, addr + 27 * 4); 983 w27 &= ~MT_WTBL_W27_CC_BW_SEL; 984 w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw); 985 986 w5 = mt76_rr(dev, addr + 5 * 4); 987 w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | 988 MT_WTBL_W5_MPDU_OK_COUNT | 989 MT_WTBL_W5_MPDU_FAIL_COUNT | 990 MT_WTBL_W5_RATE_IDX); 991 w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) | 992 FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, 993 rd.bw_idx ? rd.bw_idx - 1 : 7); 994 995 mt76_wr(dev, MT_WTBL_RIUCR0, w5); 996 997 mt76_wr(dev, MT_WTBL_RIUCR1, 998 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) | 999 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) | 1000 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1])); 1001 1002 mt76_wr(dev, MT_WTBL_RIUCR2, 1003 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) | 1004 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) | 1005 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) | 1006 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2])); 1007 1008 mt76_wr(dev, MT_WTBL_RIUCR3, 1009 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) | 1010 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) | 1011 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3])); 1012 1013 mt76_wr(dev, MT_WTBL_UPDATE, 1014 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 1015 MT_WTBL_UPDATE_RATE_UPDATE | 1016 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 1017 1018 mt76_wr(dev, addr + 27 * 4, w27); 1019 1020 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 1021 sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0); 1022 sta->rate_set_tsf |= rd.rateset; 1023 1024 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 1025 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 1026 1027 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; 1028 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 1029 sta->rate_probe = !!probe_rate; 1030 } 1031 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); 1032 1033 static int 1034 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1035 struct ieee80211_key_conf *key, 1036 enum mt7615_cipher_type cipher, 1037 enum set_key_cmd cmd) 1038 { 1039 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; 1040 u8 data[32] = {}; 1041 1042 if (key->keylen > sizeof(data)) 1043 return -EINVAL; 1044 1045 mt76_rr_copy(dev, addr, data, sizeof(data)); 1046 if (cmd == SET_KEY) { 1047 if (cipher == MT_CIPHER_TKIP) { 1048 /* Rx/Tx MIC keys are swapped */ 1049 memcpy(data, key->key, 16); 1050 memcpy(data + 16, key->key + 24, 8); 1051 memcpy(data + 24, key->key + 16, 8); 1052 } else { 1053 if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) 1054 memmove(data + 16, data, 16); 1055 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) 1056 memcpy(data, key->key, key->keylen); 1057 else if (cipher == MT_CIPHER_BIP_CMAC_128) 1058 memcpy(data + 16, key->key, 16); 1059 } 1060 } else { 1061 if (wcid->cipher & ~BIT(cipher)) { 1062 if (cipher != MT_CIPHER_BIP_CMAC_128) 1063 memmove(data, data + 16, 16); 1064 memset(data + 16, 0, 16); 1065 } else { 1066 memset(data, 0, sizeof(data)); 1067 } 1068 } 1069 mt76_wr_copy(dev, addr, data, sizeof(data)); 1070 1071 return 0; 1072 } 1073 1074 static int 1075 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1076 enum mt7615_cipher_type cipher, 1077 int keyidx, enum set_key_cmd cmd) 1078 { 1079 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; 1080 1081 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 1082 return -ETIMEDOUT; 1083 1084 w0 = mt76_rr(dev, addr); 1085 w1 = mt76_rr(dev, addr + 4); 1086 if (cmd == SET_KEY) { 1087 w0 |= MT_WTBL_W0_RX_KEY_VALID | 1088 FIELD_PREP(MT_WTBL_W0_RX_IK_VALID, 1089 cipher == MT_CIPHER_BIP_CMAC_128); 1090 if (cipher != MT_CIPHER_BIP_CMAC_128 || 1091 !wcid->cipher) 1092 w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); 1093 } else { 1094 if (!(wcid->cipher & ~BIT(cipher))) 1095 w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | 1096 MT_WTBL_W0_KEY_IDX); 1097 if (cipher == MT_CIPHER_BIP_CMAC_128) 1098 w0 &= ~MT_WTBL_W0_RX_IK_VALID; 1099 } 1100 mt76_wr(dev, MT_WTBL_RICR0, w0); 1101 mt76_wr(dev, MT_WTBL_RICR1, w1); 1102 1103 if (!mt7615_mac_wtbl_update(dev, wcid->idx, 1104 MT_WTBL_UPDATE_RXINFO_UPDATE)) 1105 return -ETIMEDOUT; 1106 1107 return 0; 1108 } 1109 1110 static void 1111 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1112 enum mt7615_cipher_type cipher, 1113 enum set_key_cmd cmd) 1114 { 1115 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); 1116 1117 if (cmd == SET_KEY) { 1118 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) 1119 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1120 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); 1121 } else { 1122 if (cipher != MT_CIPHER_BIP_CMAC_128 && 1123 wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128)) 1124 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1125 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, 1126 MT_CIPHER_BIP_CMAC_128)); 1127 else if (!(wcid->cipher & ~BIT(cipher))) 1128 mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); 1129 } 1130 } 1131 1132 int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, 1133 struct mt76_wcid *wcid, 1134 struct ieee80211_key_conf *key, 1135 enum set_key_cmd cmd) 1136 { 1137 enum mt7615_cipher_type cipher; 1138 int err; 1139 1140 cipher = mt7615_mac_get_cipher(key->cipher); 1141 if (cipher == MT_CIPHER_NONE) 1142 return -EOPNOTSUPP; 1143 1144 mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd); 1145 err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd); 1146 if (err < 0) 1147 return err; 1148 1149 err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd); 1150 if (err < 0) 1151 return err; 1152 1153 if (cmd == SET_KEY) 1154 wcid->cipher |= BIT(cipher); 1155 else 1156 wcid->cipher &= ~BIT(cipher); 1157 1158 return 0; 1159 } 1160 1161 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, 1162 struct mt76_wcid *wcid, 1163 struct ieee80211_key_conf *key, 1164 enum set_key_cmd cmd) 1165 { 1166 int err; 1167 1168 spin_lock_bh(&dev->mt76.lock); 1169 err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); 1170 spin_unlock_bh(&dev->mt76.lock); 1171 1172 return err; 1173 } 1174 1175 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, 1176 struct ieee80211_tx_info *info, __le32 *txs_data) 1177 { 1178 struct ieee80211_supported_band *sband; 1179 struct mt7615_rate_set *rs; 1180 struct mt76_phy *mphy; 1181 int first_idx = 0, last_idx; 1182 int i, idx, count; 1183 bool fixed_rate, ack_timeout; 1184 bool probe, ampdu, cck = false; 1185 bool rs_idx; 1186 u32 rate_set_tsf; 1187 u32 final_rate, final_rate_flags, final_nss, txs; 1188 1189 fixed_rate = info->status.rates[0].count; 1190 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1191 1192 txs = le32_to_cpu(txs_data[1]); 1193 ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU); 1194 1195 txs = le32_to_cpu(txs_data[3]); 1196 count = FIELD_GET(MT_TXS3_TX_COUNT, txs); 1197 last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs); 1198 1199 txs = le32_to_cpu(txs_data[0]); 1200 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1201 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1202 1203 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1204 return false; 1205 1206 if (txs & MT_TXS0_QUEUE_TIMEOUT) 1207 return false; 1208 1209 if (!ack_timeout) 1210 info->flags |= IEEE80211_TX_STAT_ACK; 1211 1212 info->status.ampdu_len = 1; 1213 info->status.ampdu_ack_len = !!(info->flags & 1214 IEEE80211_TX_STAT_ACK); 1215 1216 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1217 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1218 1219 first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY); 1220 1221 if (fixed_rate && !probe) { 1222 info->status.rates[0].count = count; 1223 i = 0; 1224 goto out; 1225 } 1226 1227 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1228 rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) - 1229 rate_set_tsf) < 1000000); 1230 rs_idx ^= rate_set_tsf & BIT(0); 1231 rs = &sta->rateset[rs_idx]; 1232 1233 if (!first_idx && rs->probe_rate.idx >= 0) { 1234 info->status.rates[0] = rs->probe_rate; 1235 1236 spin_lock_bh(&dev->mt76.lock); 1237 if (sta->rate_probe) { 1238 struct mt7615_phy *phy = &dev->phy; 1239 1240 if (sta->wcid.ext_phy && dev->mt76.phy2) 1241 phy = dev->mt76.phy2->priv; 1242 1243 mt7615_mac_set_rates(phy, sta, NULL, sta->rates); 1244 } 1245 spin_unlock_bh(&dev->mt76.lock); 1246 } else { 1247 info->status.rates[0] = rs->rates[first_idx / 2]; 1248 } 1249 info->status.rates[0].count = 0; 1250 1251 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1252 struct ieee80211_tx_rate *cur_rate; 1253 int cur_count; 1254 1255 cur_rate = &rs->rates[idx / 2]; 1256 cur_count = min_t(int, MT7615_RATE_RETRY, count); 1257 count -= cur_count; 1258 1259 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1260 cur_rate->flags != info->status.rates[i].flags)) { 1261 i++; 1262 if (i == ARRAY_SIZE(info->status.rates)) { 1263 i--; 1264 break; 1265 } 1266 1267 info->status.rates[i] = *cur_rate; 1268 info->status.rates[i].count = 0; 1269 } 1270 1271 info->status.rates[i].count += cur_count; 1272 } 1273 1274 out: 1275 final_rate_flags = info->status.rates[i].flags; 1276 1277 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1278 case MT_PHY_TYPE_CCK: 1279 cck = true; 1280 fallthrough; 1281 case MT_PHY_TYPE_OFDM: 1282 mphy = &dev->mphy; 1283 if (sta->wcid.ext_phy && dev->mt76.phy2) 1284 mphy = dev->mt76.phy2; 1285 1286 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1287 sband = &mphy->sband_5g.sband; 1288 else 1289 sband = &mphy->sband_2g.sband; 1290 final_rate &= MT_TX_RATE_IDX; 1291 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1292 cck); 1293 final_rate_flags = 0; 1294 break; 1295 case MT_PHY_TYPE_HT_GF: 1296 case MT_PHY_TYPE_HT: 1297 final_rate_flags |= IEEE80211_TX_RC_MCS; 1298 final_rate &= MT_TX_RATE_IDX; 1299 if (final_rate > 31) 1300 return false; 1301 break; 1302 case MT_PHY_TYPE_VHT: 1303 final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate); 1304 1305 if ((final_rate & MT_TX_RATE_STBC) && final_nss) 1306 final_nss--; 1307 1308 final_rate_flags |= IEEE80211_TX_RC_VHT_MCS; 1309 final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4); 1310 break; 1311 default: 1312 return false; 1313 } 1314 1315 info->status.rates[i].idx = final_rate; 1316 info->status.rates[i].flags = final_rate_flags; 1317 1318 return true; 1319 } 1320 1321 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev, 1322 struct mt7615_sta *sta, int pid, 1323 __le32 *txs_data) 1324 { 1325 struct mt76_dev *mdev = &dev->mt76; 1326 struct sk_buff_head list; 1327 struct sk_buff *skb; 1328 1329 if (pid < MT_PACKET_ID_FIRST) 1330 return false; 1331 1332 trace_mac_txdone(mdev, sta->wcid.idx, pid); 1333 1334 mt76_tx_status_lock(mdev, &list); 1335 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1336 if (skb) { 1337 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1338 1339 if (!mt7615_fill_txs(dev, sta, info, txs_data)) { 1340 ieee80211_tx_info_clear_status(info); 1341 info->status.rates[0].idx = -1; 1342 } 1343 1344 mt76_tx_status_skb_done(mdev, skb, &list); 1345 } 1346 mt76_tx_status_unlock(mdev, &list); 1347 1348 return !!skb; 1349 } 1350 1351 static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) 1352 { 1353 struct ieee80211_tx_info info = {}; 1354 struct ieee80211_sta *sta = NULL; 1355 struct mt7615_sta *msta = NULL; 1356 struct mt76_wcid *wcid; 1357 struct mt76_phy *mphy = &dev->mt76.phy; 1358 __le32 *txs_data = data; 1359 u32 txs; 1360 u8 wcidx; 1361 u8 pid; 1362 1363 txs = le32_to_cpu(txs_data[0]); 1364 pid = FIELD_GET(MT_TXS0_PID, txs); 1365 txs = le32_to_cpu(txs_data[2]); 1366 wcidx = FIELD_GET(MT_TXS2_WCID, txs); 1367 1368 if (pid == MT_PACKET_ID_NO_ACK) 1369 return; 1370 1371 if (wcidx >= MT7615_WTBL_SIZE) 1372 return; 1373 1374 rcu_read_lock(); 1375 1376 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1377 if (!wcid) 1378 goto out; 1379 1380 msta = container_of(wcid, struct mt7615_sta, wcid); 1381 sta = wcid_to_sta(wcid); 1382 1383 spin_lock_bh(&dev->sta_poll_lock); 1384 if (list_empty(&msta->poll_list)) 1385 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1386 spin_unlock_bh(&dev->sta_poll_lock); 1387 1388 if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data)) 1389 goto out; 1390 1391 if (wcidx >= MT7615_WTBL_STA || !sta) 1392 goto out; 1393 1394 if (wcid->ext_phy && dev->mt76.phy2) 1395 mphy = dev->mt76.phy2; 1396 1397 if (mt7615_fill_txs(dev, msta, &info, txs_data)) 1398 ieee80211_tx_status_noskb(mphy->hw, sta, &info); 1399 1400 out: 1401 rcu_read_unlock(); 1402 } 1403 1404 static void 1405 mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) 1406 { 1407 struct mt76_dev *mdev = &dev->mt76; 1408 struct mt76_txwi_cache *txwi; 1409 __le32 *txwi_data; 1410 u32 val; 1411 u8 wcid; 1412 1413 trace_mac_tx_free(dev, token); 1414 1415 spin_lock_bh(&dev->token_lock); 1416 txwi = idr_remove(&dev->token, token); 1417 spin_unlock_bh(&dev->token_lock); 1418 1419 if (!txwi) 1420 return; 1421 1422 txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi); 1423 val = le32_to_cpu(txwi_data[1]); 1424 wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val); 1425 1426 mt7615_txp_skb_unmap(mdev, txwi); 1427 if (txwi->skb) { 1428 mt76_tx_complete_skb(mdev, wcid, txwi->skb); 1429 txwi->skb = NULL; 1430 } 1431 1432 mt76_put_txwi(mdev, txwi); 1433 } 1434 1435 static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) 1436 { 1437 struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data; 1438 u8 i, count; 1439 1440 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1441 if (is_mt7615(&dev->mt76)) { 1442 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1443 } else { 1444 for (i = 0; i < IEEE80211_NUM_ACS; i++) 1445 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); 1446 } 1447 1448 count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl)); 1449 if (is_mt7615(&dev->mt76)) { 1450 __le16 *token = &free->token[0]; 1451 1452 for (i = 0; i < count; i++) 1453 mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i])); 1454 } else { 1455 __le32 *token = (__le32 *)&free->token[0]; 1456 1457 for (i = 0; i < count; i++) 1458 mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i])); 1459 } 1460 1461 dev_kfree_skb(skb); 1462 1463 if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state)) 1464 return; 1465 1466 rcu_read_lock(); 1467 mt7615_mac_sta_poll(dev); 1468 rcu_read_unlock(); 1469 1470 mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 1471 mt76_worker_schedule(&dev->mt76.tx_worker); 1472 } 1473 1474 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1475 struct sk_buff *skb) 1476 { 1477 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1478 __le32 *rxd = (__le32 *)skb->data; 1479 __le32 *end = (__le32 *)&skb->data[skb->len]; 1480 enum rx_pkt_type type; 1481 u16 flag; 1482 1483 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 1484 flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0])); 1485 if (type == PKT_TYPE_RX_EVENT && flag == 0x1) 1486 type = PKT_TYPE_NORMAL_MCU; 1487 1488 switch (type) { 1489 case PKT_TYPE_TXS: 1490 for (rxd++; rxd + 7 <= end; rxd += 7) 1491 mt7615_mac_add_txs(dev, rxd); 1492 dev_kfree_skb(skb); 1493 break; 1494 case PKT_TYPE_TXRX_NOTIFY: 1495 mt7615_mac_tx_free(dev, skb); 1496 break; 1497 case PKT_TYPE_RX_EVENT: 1498 mt7615_mcu_rx_event(dev, skb); 1499 break; 1500 case PKT_TYPE_NORMAL_MCU: 1501 case PKT_TYPE_NORMAL: 1502 if (!mt7615_mac_fill_rx(dev, skb)) { 1503 mt76_rx(&dev->mt76, q, skb); 1504 return; 1505 } 1506 fallthrough; 1507 default: 1508 dev_kfree_skb(skb); 1509 break; 1510 } 1511 } 1512 EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb); 1513 1514 static void 1515 mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm) 1516 { 1517 struct mt7615_dev *dev = phy->dev; 1518 bool ext_phy = phy != &dev->phy; 1519 1520 if (is_mt7663(&dev->mt76)) { 1521 if (ofdm) 1522 mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy), 1523 MT_WF_PHY_PD_OFDM_MASK(0), 1524 MT_WF_PHY_PD_OFDM(0, val)); 1525 else 1526 mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy), 1527 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1528 MT_WF_PHY_PD_CCK(ext_phy, val)); 1529 return; 1530 } 1531 1532 if (ofdm) 1533 mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), 1534 MT_WF_PHY_PD_OFDM_MASK(ext_phy), 1535 MT_WF_PHY_PD_OFDM(ext_phy, val)); 1536 else 1537 mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), 1538 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1539 MT_WF_PHY_PD_CCK(ext_phy, val)); 1540 } 1541 1542 static void 1543 mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy) 1544 { 1545 /* ofdm */ 1546 mt7615_mac_set_sensitivity(phy, 0x13c, true); 1547 /* cck */ 1548 mt7615_mac_set_sensitivity(phy, 0x92, false); 1549 1550 phy->ofdm_sensitivity = -98; 1551 phy->cck_sensitivity = -110; 1552 phy->last_cca_adj = jiffies; 1553 } 1554 1555 void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable) 1556 { 1557 struct mt7615_dev *dev = phy->dev; 1558 bool ext_phy = phy != &dev->phy; 1559 u32 reg, mask; 1560 1561 mt7615_mutex_acquire(dev); 1562 1563 if (phy->scs_en == enable) 1564 goto out; 1565 1566 if (is_mt7663(&dev->mt76)) { 1567 reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy); 1568 mask = MT_WF_PHY_PD_BLK(0); 1569 } else { 1570 reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy); 1571 mask = MT_WF_PHY_PD_BLK(ext_phy); 1572 } 1573 1574 if (enable) { 1575 mt76_set(dev, reg, mask); 1576 if (is_mt7622(&dev->mt76)) { 1577 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8); 1578 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7); 1579 } 1580 } else { 1581 mt76_clear(dev, reg, mask); 1582 } 1583 1584 mt7615_mac_set_default_sensitivity(phy); 1585 phy->scs_en = enable; 1586 1587 out: 1588 mt7615_mutex_release(dev); 1589 } 1590 1591 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) 1592 { 1593 u32 rxtd, reg; 1594 1595 if (is_mt7663(&dev->mt76)) 1596 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1597 else 1598 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1599 1600 if (ext_phy) 1601 rxtd = MT_WF_PHY_RXTD2(10); 1602 else 1603 rxtd = MT_WF_PHY_RXTD(12); 1604 1605 mt76_set(dev, rxtd, BIT(18) | BIT(29)); 1606 mt76_set(dev, reg, 0x5 << 12); 1607 } 1608 1609 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy) 1610 { 1611 struct mt7615_dev *dev = phy->dev; 1612 bool ext_phy = phy != &dev->phy; 1613 u32 reg; 1614 1615 if (is_mt7663(&dev->mt76)) 1616 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1617 else 1618 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1619 1620 /* reset PD and MDRDY counters */ 1621 mt76_clear(dev, reg, GENMASK(22, 20)); 1622 mt76_set(dev, reg, BIT(22) | BIT(20)); 1623 } 1624 1625 static void 1626 mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, 1627 u32 rts_err_rate, bool ofdm) 1628 { 1629 struct mt7615_dev *dev = phy->dev; 1630 int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; 1631 bool ext_phy = phy != &dev->phy; 1632 u16 def_th = ofdm ? -98 : -110; 1633 bool update = false; 1634 s8 *sensitivity; 1635 int signal; 1636 1637 sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity; 1638 signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy); 1639 if (!signal) { 1640 mt7615_mac_set_default_sensitivity(phy); 1641 return; 1642 } 1643 1644 signal = min(signal, -72); 1645 if (false_cca > 500) { 1646 if (rts_err_rate > MT_FRAC(40, 100)) 1647 return; 1648 1649 /* decrease coverage */ 1650 if (*sensitivity == def_th && signal > -90) { 1651 *sensitivity = -90; 1652 update = true; 1653 } else if (*sensitivity + 2 < signal) { 1654 *sensitivity += 2; 1655 update = true; 1656 } 1657 } else if ((false_cca > 0 && false_cca < 50) || 1658 rts_err_rate > MT_FRAC(60, 100)) { 1659 /* increase coverage */ 1660 if (*sensitivity - 2 >= def_th) { 1661 *sensitivity -= 2; 1662 update = true; 1663 } 1664 } 1665 1666 if (*sensitivity > signal) { 1667 *sensitivity = signal; 1668 update = true; 1669 } 1670 1671 if (update) { 1672 u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256; 1673 1674 mt7615_mac_set_sensitivity(phy, val, ofdm); 1675 phy->last_cca_adj = jiffies; 1676 } 1677 } 1678 1679 static void 1680 mt7615_mac_scs_check(struct mt7615_phy *phy) 1681 { 1682 struct mt7615_dev *dev = phy->dev; 1683 struct mib_stats *mib = &phy->mib; 1684 u32 val, rts_err_rate = 0; 1685 u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm; 1686 bool ext_phy = phy != &dev->phy; 1687 1688 if (!phy->scs_en) 1689 return; 1690 1691 if (is_mt7663(&dev->mt76)) 1692 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1693 else 1694 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1695 pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val); 1696 pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val); 1697 1698 if (is_mt7663(&dev->mt76)) 1699 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1700 else 1701 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1702 mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val); 1703 mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val); 1704 1705 phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1706 phy->false_cca_cck = pd_cck - mdrdy_cck; 1707 mt7615_mac_cca_stats_reset(phy); 1708 1709 if (mib->rts_cnt + mib->rts_retries_cnt) 1710 rts_err_rate = MT_FRAC(mib->rts_retries_cnt, 1711 mib->rts_cnt + mib->rts_retries_cnt); 1712 1713 /* cck */ 1714 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false); 1715 /* ofdm */ 1716 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true); 1717 1718 if (time_after(jiffies, phy->last_cca_adj + 10 * HZ)) 1719 mt7615_mac_set_default_sensitivity(phy); 1720 } 1721 1722 static u8 1723 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx) 1724 { 1725 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1726 u32 reg, val, sum = 0, n = 0; 1727 int i; 1728 1729 if (is_mt7663(&dev->mt76)) 1730 reg = MT7663_WF_PHY_RXTD(20); 1731 else 1732 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20); 1733 1734 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1735 val = mt76_rr(dev, reg); 1736 sum += val * nf_power[i]; 1737 n += val; 1738 } 1739 1740 if (!n) 1741 return 0; 1742 1743 return sum / n; 1744 } 1745 1746 static void 1747 mt7615_phy_update_channel(struct mt76_phy *mphy, int idx) 1748 { 1749 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); 1750 struct mt7615_phy *phy = mphy->priv; 1751 struct mt76_channel_state *state; 1752 u64 busy_time, tx_time, rx_time, obss_time; 1753 u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5; 1754 int nf; 1755 1756 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), 1757 MT_MIB_SDR9_BUSY_MASK); 1758 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), 1759 MT_MIB_SDR36_TXTIME_MASK); 1760 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), 1761 MT_MIB_SDR37_RXTIME_MASK); 1762 obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK); 1763 1764 nf = mt7615_phy_get_nf(dev, idx); 1765 if (!phy->noise) 1766 phy->noise = nf << 4; 1767 else if (nf) 1768 phy->noise += nf - (phy->noise >> 4); 1769 1770 state = mphy->chan_state; 1771 state->cc_busy += busy_time; 1772 state->cc_tx += tx_time; 1773 state->cc_rx += rx_time + obss_time; 1774 state->cc_bss_rx += rx_time; 1775 state->noise = -(phy->noise >> 4); 1776 } 1777 1778 static void __mt7615_update_channel(struct mt7615_dev *dev) 1779 { 1780 struct mt76_dev *mdev = &dev->mt76; 1781 1782 mt7615_phy_update_channel(&mdev->phy, 0); 1783 if (mdev->phy2) 1784 mt7615_phy_update_channel(mdev->phy2, 1); 1785 1786 /* reset obss airtime */ 1787 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 1788 } 1789 1790 void mt7615_update_channel(struct mt76_dev *mdev) 1791 { 1792 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1793 1794 if (mt76_connac_pm_wake(&dev->mphy, &dev->pm)) 1795 return; 1796 1797 __mt7615_update_channel(dev); 1798 mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 1799 } 1800 EXPORT_SYMBOL_GPL(mt7615_update_channel); 1801 1802 static void mt7615_update_survey(struct mt7615_dev *dev) 1803 { 1804 struct mt76_dev *mdev = &dev->mt76; 1805 ktime_t cur_time; 1806 1807 __mt7615_update_channel(dev); 1808 cur_time = ktime_get_boottime(); 1809 1810 mt76_update_survey_active_time(&mdev->phy, cur_time); 1811 if (mdev->phy2) 1812 mt76_update_survey_active_time(mdev->phy2, cur_time); 1813 } 1814 1815 static void 1816 mt7615_mac_update_mib_stats(struct mt7615_phy *phy) 1817 { 1818 struct mt7615_dev *dev = phy->dev; 1819 struct mib_stats *mib = &phy->mib; 1820 bool ext_phy = phy != &dev->phy; 1821 int i, aggr; 1822 u32 val, val2; 1823 1824 memset(mib, 0, sizeof(*mib)); 1825 1826 mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), 1827 MT_MIB_SDR3_FCS_ERR_MASK); 1828 1829 val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy), 1830 MT_MIB_AMPDU_MPDU_COUNT); 1831 if (val) { 1832 val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy), 1833 MT_MIB_AMPDU_ACK_COUNT); 1834 mib->aggr_per = 1000 * (val - val2) / val; 1835 } 1836 1837 aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; 1838 for (i = 0; i < 4; i++) { 1839 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); 1840 1841 val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1842 if (val2 > mib->ack_fail_cnt) 1843 mib->ack_fail_cnt = val2; 1844 1845 val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1846 if (val2 > mib->ba_miss_cnt) 1847 mib->ba_miss_cnt = val2; 1848 1849 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); 1850 val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1851 if (val2 > mib->rts_retries_cnt) { 1852 mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1853 mib->rts_retries_cnt = val2; 1854 } 1855 1856 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); 1857 1858 dev->mt76.aggr_stats[aggr++] += val & 0xffff; 1859 dev->mt76.aggr_stats[aggr++] += val >> 16; 1860 } 1861 } 1862 1863 void mt7615_pm_wake_work(struct work_struct *work) 1864 { 1865 struct mt7615_dev *dev; 1866 struct mt76_phy *mphy; 1867 1868 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 1869 pm.wake_work); 1870 mphy = dev->phy.mt76; 1871 1872 if (!mt7615_mcu_set_drv_ctrl(dev)) 1873 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); 1874 else 1875 dev_err(mphy->dev->dev, "failed to wake device\n"); 1876 1877 ieee80211_wake_queues(mphy->hw); 1878 complete_all(&dev->pm.wake_cmpl); 1879 } 1880 1881 void mt7615_pm_power_save_work(struct work_struct *work) 1882 { 1883 struct mt7615_dev *dev; 1884 unsigned long delta; 1885 1886 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 1887 pm.ps_work.work); 1888 1889 delta = dev->pm.idle_timeout; 1890 if (time_is_after_jiffies(dev->pm.last_activity + delta)) { 1891 delta = dev->pm.last_activity + delta - jiffies; 1892 goto out; 1893 } 1894 1895 if (!mt7615_mcu_set_fw_ctrl(dev)) 1896 return; 1897 out: 1898 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); 1899 } 1900 1901 void mt7615_mac_work(struct work_struct *work) 1902 { 1903 struct mt7615_phy *phy; 1904 struct mt76_phy *mphy; 1905 1906 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 1907 mac_work.work); 1908 phy = mphy->priv; 1909 1910 mt7615_mutex_acquire(phy->dev); 1911 1912 mt7615_update_survey(phy->dev); 1913 if (++mphy->mac_work_count == 5) { 1914 mphy->mac_work_count = 0; 1915 1916 mt7615_mac_update_mib_stats(phy); 1917 mt7615_mac_scs_check(phy); 1918 } 1919 1920 mt7615_mutex_release(phy->dev); 1921 1922 mt76_tx_status_check(mphy->dev, NULL, false); 1923 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 1924 MT7615_WATCHDOG_TIME); 1925 } 1926 1927 static bool 1928 mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) 1929 { 1930 bool ret; 1931 1932 ret = wait_event_timeout(dev->reset_wait, 1933 (READ_ONCE(dev->reset_state) & state), 1934 MT7615_RESET_TIMEOUT); 1935 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1936 return ret; 1937 } 1938 1939 static void 1940 mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1941 { 1942 struct ieee80211_hw *hw = priv; 1943 struct mt7615_dev *dev = mt7615_hw_dev(hw); 1944 1945 switch (vif->type) { 1946 case NL80211_IFTYPE_MESH_POINT: 1947 case NL80211_IFTYPE_ADHOC: 1948 case NL80211_IFTYPE_AP: 1949 mt7615_mcu_add_beacon(dev, hw, vif, 1950 vif->bss_conf.enable_beacon); 1951 break; 1952 default: 1953 break; 1954 } 1955 } 1956 1957 static void 1958 mt7615_update_beacons(struct mt7615_dev *dev) 1959 { 1960 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1961 IEEE80211_IFACE_ITER_RESUME_ALL, 1962 mt7615_update_vif_beacon, dev->mt76.hw); 1963 1964 if (!dev->mt76.phy2) 1965 return; 1966 1967 ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, 1968 IEEE80211_IFACE_ITER_RESUME_ALL, 1969 mt7615_update_vif_beacon, dev->mt76.phy2->hw); 1970 } 1971 1972 void mt7615_dma_reset(struct mt7615_dev *dev) 1973 { 1974 int i; 1975 1976 mt76_clear(dev, MT_WPDMA_GLO_CFG, 1977 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1978 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1979 usleep_range(1000, 2000); 1980 1981 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); 1982 for (i = 0; i < __MT_TXQ_MAX; i++) 1983 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 1984 1985 mt76_for_each_q_rx(&dev->mt76, i) { 1986 mt76_queue_rx_reset(dev, i); 1987 } 1988 1989 mt76_set(dev, MT_WPDMA_GLO_CFG, 1990 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1991 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1992 } 1993 EXPORT_SYMBOL_GPL(mt7615_dma_reset); 1994 1995 void mt7615_tx_token_put(struct mt7615_dev *dev) 1996 { 1997 struct mt76_txwi_cache *txwi; 1998 int id; 1999 2000 spin_lock_bh(&dev->token_lock); 2001 idr_for_each_entry(&dev->token, txwi, id) { 2002 mt7615_txp_skb_unmap(&dev->mt76, txwi); 2003 if (txwi->skb) 2004 dev_kfree_skb_any(txwi->skb); 2005 mt76_put_txwi(&dev->mt76, txwi); 2006 } 2007 spin_unlock_bh(&dev->token_lock); 2008 idr_destroy(&dev->token); 2009 } 2010 EXPORT_SYMBOL_GPL(mt7615_tx_token_put); 2011 2012 void mt7615_mac_reset_work(struct work_struct *work) 2013 { 2014 struct mt7615_phy *phy2; 2015 struct mt76_phy *ext_phy; 2016 struct mt7615_dev *dev; 2017 2018 dev = container_of(work, struct mt7615_dev, reset_work); 2019 ext_phy = dev->mt76.phy2; 2020 phy2 = ext_phy ? ext_phy->priv : NULL; 2021 2022 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) 2023 return; 2024 2025 ieee80211_stop_queues(mt76_hw(dev)); 2026 if (ext_phy) 2027 ieee80211_stop_queues(ext_phy->hw); 2028 2029 set_bit(MT76_RESET, &dev->mphy.state); 2030 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2031 wake_up(&dev->mt76.mcu.wait); 2032 cancel_delayed_work_sync(&dev->mphy.mac_work); 2033 del_timer_sync(&dev->phy.roc_timer); 2034 cancel_work_sync(&dev->phy.roc_work); 2035 if (phy2) { 2036 cancel_delayed_work_sync(&phy2->mt76->mac_work); 2037 del_timer_sync(&phy2->roc_timer); 2038 cancel_work_sync(&phy2->roc_work); 2039 } 2040 2041 /* lock/unlock all queues to ensure that no tx is pending */ 2042 mt76_txq_schedule_all(&dev->mphy); 2043 if (ext_phy) 2044 mt76_txq_schedule_all(ext_phy); 2045 2046 mt76_worker_disable(&dev->mt76.tx_worker); 2047 napi_disable(&dev->mt76.napi[0]); 2048 napi_disable(&dev->mt76.napi[1]); 2049 napi_disable(&dev->mt76.tx_napi); 2050 2051 mt7615_mutex_acquire(dev); 2052 2053 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED); 2054 2055 mt7615_tx_token_put(dev); 2056 idr_init(&dev->token); 2057 2058 if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 2059 mt7615_dma_reset(dev); 2060 2061 mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); 2062 2063 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT); 2064 mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 2065 } 2066 2067 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2068 clear_bit(MT76_RESET, &dev->mphy.state); 2069 2070 mt76_worker_enable(&dev->mt76.tx_worker); 2071 napi_enable(&dev->mt76.tx_napi); 2072 napi_schedule(&dev->mt76.tx_napi); 2073 2074 napi_enable(&dev->mt76.napi[0]); 2075 napi_schedule(&dev->mt76.napi[0]); 2076 2077 napi_enable(&dev->mt76.napi[1]); 2078 napi_schedule(&dev->mt76.napi[1]); 2079 2080 ieee80211_wake_queues(mt76_hw(dev)); 2081 if (ext_phy) 2082 ieee80211_wake_queues(ext_phy->hw); 2083 2084 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2085 mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2086 2087 mt7615_update_beacons(dev); 2088 2089 mt7615_mutex_release(dev); 2090 2091 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 2092 MT7615_WATCHDOG_TIME); 2093 if (phy2) 2094 ieee80211_queue_delayed_work(ext_phy->hw, 2095 &phy2->mt76->mac_work, 2096 MT7615_WATCHDOG_TIME); 2097 2098 } 2099 2100 static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) 2101 { 2102 struct mt7615_dev *dev = phy->dev; 2103 2104 if (phy->rdd_state & BIT(0)) 2105 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0); 2106 if (phy->rdd_state & BIT(1)) 2107 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0); 2108 } 2109 2110 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain) 2111 { 2112 int err; 2113 2114 err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0); 2115 if (err < 0) 2116 return err; 2117 2118 return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, 2119 MT_RX_SEL0, 1); 2120 } 2121 2122 static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy) 2123 { 2124 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2125 struct mt7615_dev *dev = phy->dev; 2126 bool ext_phy = phy != &dev->phy; 2127 int err; 2128 2129 /* start CAC */ 2130 err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0); 2131 if (err < 0) 2132 return err; 2133 2134 err = mt7615_dfs_start_rdd(dev, ext_phy); 2135 if (err < 0) 2136 return err; 2137 2138 phy->rdd_state |= BIT(ext_phy); 2139 2140 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2141 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2142 err = mt7615_dfs_start_rdd(dev, 1); 2143 if (err < 0) 2144 return err; 2145 2146 phy->rdd_state |= BIT(1); 2147 } 2148 2149 return 0; 2150 } 2151 2152 static int 2153 mt7615_dfs_init_radar_specs(struct mt7615_phy *phy) 2154 { 2155 const struct mt7615_dfs_radar_spec *radar_specs; 2156 struct mt7615_dev *dev = phy->dev; 2157 int err, i; 2158 2159 switch (dev->mt76.region) { 2160 case NL80211_DFS_FCC: 2161 radar_specs = &fcc_radar_specs; 2162 err = mt7615_mcu_set_fcc5_lpn(dev, 8); 2163 if (err < 0) 2164 return err; 2165 break; 2166 case NL80211_DFS_ETSI: 2167 radar_specs = &etsi_radar_specs; 2168 break; 2169 case NL80211_DFS_JP: 2170 radar_specs = &jp_radar_specs; 2171 break; 2172 default: 2173 return -EINVAL; 2174 } 2175 2176 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2177 err = mt7615_mcu_set_radar_th(dev, i, 2178 &radar_specs->radar_pattern[i]); 2179 if (err < 0) 2180 return err; 2181 } 2182 2183 return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2184 } 2185 2186 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy) 2187 { 2188 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2189 struct mt7615_dev *dev = phy->dev; 2190 bool ext_phy = phy != &dev->phy; 2191 int err; 2192 2193 if (is_mt7663(&dev->mt76)) 2194 return 0; 2195 2196 if (dev->mt76.region == NL80211_DFS_UNSET) { 2197 phy->dfs_state = -1; 2198 if (phy->rdd_state) 2199 goto stop; 2200 2201 return 0; 2202 } 2203 2204 if (test_bit(MT76_SCANNING, &phy->mt76->state)) 2205 return 0; 2206 2207 if (phy->dfs_state == chandef->chan->dfs_state) 2208 return 0; 2209 2210 err = mt7615_dfs_init_radar_specs(phy); 2211 if (err < 0) { 2212 phy->dfs_state = -1; 2213 goto stop; 2214 } 2215 2216 phy->dfs_state = chandef->chan->dfs_state; 2217 2218 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) { 2219 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) 2220 return mt7615_dfs_start_radar_detector(phy); 2221 2222 return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy, 2223 MT_RX_SEL0, 0); 2224 } 2225 2226 stop: 2227 err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0); 2228 if (err < 0) 2229 return err; 2230 2231 mt7615_dfs_stop_radar_detector(phy); 2232 return 0; 2233 } 2234 2235 int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy, 2236 struct ieee80211_vif *vif, 2237 bool enable) 2238 { 2239 struct mt7615_dev *dev = phy->dev; 2240 bool ext_phy = phy != &dev->phy; 2241 int err; 2242 2243 if (!mt7615_firmware_offload(dev)) 2244 return -EOPNOTSUPP; 2245 2246 switch (vif->type) { 2247 case NL80211_IFTYPE_MONITOR: 2248 return 0; 2249 case NL80211_IFTYPE_MESH_POINT: 2250 case NL80211_IFTYPE_ADHOC: 2251 case NL80211_IFTYPE_AP: 2252 if (enable) 2253 phy->n_beacon_vif++; 2254 else 2255 phy->n_beacon_vif--; 2256 fallthrough; 2257 default: 2258 break; 2259 } 2260 2261 err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif); 2262 if (err) 2263 return err; 2264 2265 if (phy->n_beacon_vif) { 2266 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; 2267 mt76_clear(dev, MT_WF_RFCR(ext_phy), 2268 MT_WF_RFCR_DROP_OTHER_BEACON); 2269 } else { 2270 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 2271 mt76_set(dev, MT_WF_RFCR(ext_phy), 2272 MT_WF_RFCR_DROP_OTHER_BEACON); 2273 } 2274 2275 return 0; 2276 } 2277 2278 void mt7615_coredump_work(struct work_struct *work) 2279 { 2280 struct mt7615_dev *dev; 2281 char *dump, *data; 2282 2283 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 2284 coredump.work.work); 2285 2286 if (time_is_after_jiffies(dev->coredump.last_activity + 2287 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) { 2288 queue_delayed_work(dev->mt76.wq, &dev->coredump.work, 2289 MT76_CONNAC_COREDUMP_TIMEOUT); 2290 return; 2291 } 2292 2293 dump = vzalloc(MT76_CONNAC_COREDUMP_SZ); 2294 data = dump; 2295 2296 while (true) { 2297 struct sk_buff *skb; 2298 2299 spin_lock_bh(&dev->mt76.lock); 2300 skb = __skb_dequeue(&dev->coredump.msg_list); 2301 spin_unlock_bh(&dev->mt76.lock); 2302 2303 if (!skb) 2304 break; 2305 2306 skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); 2307 if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) 2308 break; 2309 2310 memcpy(data, skb->data, skb->len); 2311 data += skb->len; 2312 2313 dev_kfree_skb(skb); 2314 } 2315 dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, 2316 GFP_KERNEL); 2317 } 2318