1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2019 MediaTek Inc. 3 * 4 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Roy Luo <royluo@google.com> 6 * Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 */ 9 10 #include <linux/devcoredump.h> 11 #include <linux/etherdevice.h> 12 #include <linux/timekeeping.h> 13 #include "mt7615.h" 14 #include "../trace.h" 15 #include "../dma.h" 16 #include "mt7615_trace.h" 17 #include "mac.h" 18 #include "mcu.h" 19 20 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 21 22 static const struct mt7615_dfs_radar_spec etsi_radar_specs = { 23 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 24 .radar_pattern = { 25 [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 }, 26 [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 }, 27 [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 }, 28 [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 }, 29 [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 }, 30 [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 }, 31 [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 }, 32 [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 }, 33 }, 34 }; 35 36 static const struct mt7615_dfs_radar_spec fcc_radar_specs = { 37 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 38 .radar_pattern = { 39 [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 }, 40 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 41 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 42 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 43 [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 }, 44 }, 45 }; 46 47 static const struct mt7615_dfs_radar_spec jp_radar_specs = { 48 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 49 .radar_pattern = { 50 [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 }, 51 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 52 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 53 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 54 [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 }, 55 [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 }, 56 [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 }, 57 }, 58 }; 59 60 static enum mt76_cipher_type 61 mt7615_mac_get_cipher(int cipher) 62 { 63 switch (cipher) { 64 case WLAN_CIPHER_SUITE_WEP40: 65 return MT_CIPHER_WEP40; 66 case WLAN_CIPHER_SUITE_WEP104: 67 return MT_CIPHER_WEP104; 68 case WLAN_CIPHER_SUITE_TKIP: 69 return MT_CIPHER_TKIP; 70 case WLAN_CIPHER_SUITE_AES_CMAC: 71 return MT_CIPHER_BIP_CMAC_128; 72 case WLAN_CIPHER_SUITE_CCMP: 73 return MT_CIPHER_AES_CCMP; 74 case WLAN_CIPHER_SUITE_CCMP_256: 75 return MT_CIPHER_CCMP_256; 76 case WLAN_CIPHER_SUITE_GCMP: 77 return MT_CIPHER_GCMP; 78 case WLAN_CIPHER_SUITE_GCMP_256: 79 return MT_CIPHER_GCMP_256; 80 case WLAN_CIPHER_SUITE_SMS4: 81 return MT_CIPHER_WAPI; 82 default: 83 return MT_CIPHER_NONE; 84 } 85 } 86 87 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev, 88 u8 idx, bool unicast) 89 { 90 struct mt7615_sta *sta; 91 struct mt76_wcid *wcid; 92 93 if (idx >= MT7615_WTBL_SIZE) 94 return NULL; 95 96 wcid = rcu_dereference(dev->mt76.wcid[idx]); 97 if (unicast || !wcid) 98 return wcid; 99 100 if (!wcid->sta) 101 return NULL; 102 103 sta = container_of(wcid, struct mt7615_sta, wcid); 104 if (!sta->vif) 105 return NULL; 106 107 return &sta->vif->sta.wcid; 108 } 109 110 void mt7615_mac_reset_counters(struct mt7615_dev *dev) 111 { 112 int i; 113 114 for (i = 0; i < 4; i++) { 115 mt76_rr(dev, MT_TX_AGG_CNT(0, i)); 116 mt76_rr(dev, MT_TX_AGG_CNT(1, i)); 117 } 118 119 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); 120 dev->mt76.phy.survey_time = ktime_get_boottime(); 121 if (dev->mt76.phy2) 122 dev->mt76.phy2->survey_time = ktime_get_boottime(); 123 124 /* reset airtime counters */ 125 mt76_rr(dev, MT_MIB_SDR9(0)); 126 mt76_rr(dev, MT_MIB_SDR9(1)); 127 128 mt76_rr(dev, MT_MIB_SDR36(0)); 129 mt76_rr(dev, MT_MIB_SDR36(1)); 130 131 mt76_rr(dev, MT_MIB_SDR37(0)); 132 mt76_rr(dev, MT_MIB_SDR37(1)); 133 134 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 135 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 136 } 137 138 void mt7615_mac_set_timing(struct mt7615_phy *phy) 139 { 140 s16 coverage_class = phy->coverage_class; 141 struct mt7615_dev *dev = phy->dev; 142 bool ext_phy = phy != &dev->phy; 143 u32 val, reg_offset; 144 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 145 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 146 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 147 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 148 int sifs, offset; 149 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; 150 151 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 152 return; 153 154 if (is_5ghz) 155 sifs = 16; 156 else 157 sifs = 10; 158 159 if (ext_phy) { 160 coverage_class = max_t(s16, dev->phy.coverage_class, 161 coverage_class); 162 mt76_set(dev, MT_ARB_SCR, 163 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 164 } else { 165 struct mt7615_phy *phy_ext = mt7615_ext_phy(dev); 166 167 if (phy_ext) 168 coverage_class = max_t(s16, phy_ext->coverage_class, 169 coverage_class); 170 mt76_set(dev, MT_ARB_SCR, 171 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 172 } 173 udelay(1); 174 175 offset = 3 * coverage_class; 176 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 177 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 178 mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset); 179 mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset); 180 181 mt76_wr(dev, MT_TMAC_ICR(ext_phy), 182 FIELD_PREP(MT_IFS_EIFS, 360) | 183 FIELD_PREP(MT_IFS_RIFS, 2) | 184 FIELD_PREP(MT_IFS_SIFS, sifs) | 185 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 186 187 if (phy->slottime < 20 || is_5ghz) 188 val = MT7615_CFEND_RATE_DEFAULT; 189 else 190 val = MT7615_CFEND_RATE_11B; 191 192 mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val); 193 if (ext_phy) 194 mt76_clear(dev, MT_ARB_SCR, 195 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 196 else 197 mt76_clear(dev, MT_ARB_SCR, 198 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 199 200 } 201 202 static void 203 mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy, 204 struct mt76_rx_status *status, u8 chfreq) 205 { 206 if (!test_bit(MT76_HW_SCANNING, &mphy->state) && 207 !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) && 208 !test_bit(MT76_STATE_ROC, &mphy->state)) { 209 status->freq = mphy->chandef.chan->center_freq; 210 status->band = mphy->chandef.chan->band; 211 return; 212 } 213 214 status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 215 status->freq = ieee80211_channel_to_frequency(chfreq, status->band); 216 } 217 218 static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv) 219 { 220 #ifdef CONFIG_NL80211_TESTMODE 221 u32 rxv1 = le32_to_cpu(rxv[0]); 222 u32 rxv3 = le32_to_cpu(rxv[2]); 223 u32 rxv4 = le32_to_cpu(rxv[3]); 224 u32 rxv5 = le32_to_cpu(rxv[4]); 225 u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1); 226 u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1); 227 s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5); 228 u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000; 229 230 if (!mode) { 231 /* CCK */ 232 foe &= ~BIT(11); 233 foe *= 1000; 234 foe >>= 11; 235 } else { 236 if (foe > 2048) 237 foe -= 4096; 238 239 foe = (foe * foe_const) >> 15; 240 } 241 242 phy->test.last_freq_offset = foe; 243 phy->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4); 244 phy->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4); 245 phy->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4); 246 phy->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4); 247 phy->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3); 248 phy->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3); 249 #endif 250 } 251 252 /* The HW does not translate the mac header to 802.3 for mesh point */ 253 static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 254 { 255 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 256 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 257 struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid; 258 __le32 *rxd = (__le32 *)skb->data; 259 struct ieee80211_sta *sta; 260 struct ieee80211_vif *vif; 261 struct ieee80211_hdr hdr; 262 __le32 qos_ctrl, ht_ctrl; 263 264 if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) != 265 MT_RXD1_NORMAL_U2M) 266 return -EINVAL; 267 268 if (!(le32_to_cpu(rxd[0]) & MT_RXD0_NORMAL_GROUP_4)) 269 return -EINVAL; 270 271 if (!msta || !msta->vif) 272 return -EINVAL; 273 274 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 275 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 276 277 /* store the info from RXD and ethhdr to avoid being overridden */ 278 hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]); 279 hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]); 280 qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]); 281 ht_ctrl = FIELD_GET(MT_RXD7_HT_CONTROL, rxd[7]); 282 283 hdr.duration_id = 0; 284 ether_addr_copy(hdr.addr1, vif->addr); 285 ether_addr_copy(hdr.addr2, sta->addr); 286 switch (le16_to_cpu(hdr.frame_control) & 287 (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 288 case 0: 289 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); 290 break; 291 case IEEE80211_FCTL_FROMDS: 292 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 293 break; 294 case IEEE80211_FCTL_TODS: 295 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 296 break; 297 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 298 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 299 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 300 break; 301 default: 302 break; 303 } 304 305 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 306 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 307 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 308 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 309 else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN)) 310 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 311 else 312 skb_pull(skb, 2); 313 314 if (ieee80211_has_order(hdr.frame_control)) 315 memcpy(skb_push(skb, 2), &ht_ctrl, 2); 316 if (ieee80211_is_data_qos(hdr.frame_control)) 317 memcpy(skb_push(skb, 2), &qos_ctrl, 2); 318 if (ieee80211_has_a4(hdr.frame_control)) 319 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 320 else 321 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 322 323 status->flag &= ~(RX_FLAG_RADIOTAP_HE | RX_FLAG_RADIOTAP_HE_MU); 324 return 0; 325 } 326 327 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) 328 { 329 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 330 struct mt76_phy *mphy = &dev->mt76.phy; 331 struct mt7615_phy *phy = &dev->phy; 332 struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL; 333 struct ieee80211_supported_band *sband; 334 struct ieee80211_hdr *hdr; 335 __le32 *rxd = (__le32 *)skb->data; 336 u32 rxd0 = le32_to_cpu(rxd[0]); 337 u32 rxd1 = le32_to_cpu(rxd[1]); 338 u32 rxd2 = le32_to_cpu(rxd[2]); 339 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 340 bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false; 341 u16 hdr_gap; 342 int phy_idx; 343 int i, idx; 344 u8 chfreq, amsdu_info, qos_ctl = 0; 345 u16 seq_ctrl = 0; 346 __le16 fc = 0; 347 348 memset(status, 0, sizeof(*status)); 349 350 chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 351 if (!phy2) 352 phy_idx = 0; 353 else if (phy2->chfreq == phy->chfreq) 354 phy_idx = -1; 355 else if (phy->chfreq == chfreq) 356 phy_idx = 0; 357 else if (phy2->chfreq == chfreq) 358 phy_idx = 1; 359 else 360 phy_idx = -1; 361 362 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 363 return -EINVAL; 364 365 hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS; 366 if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_CM)) 367 return -EINVAL; 368 369 /* ICV error or CCMP/BIP/WPI MIC error */ 370 if (rxd2 & MT_RXD2_NORMAL_ICV_ERR) 371 status->flag |= RX_FLAG_ONLY_MONITOR; 372 373 unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; 374 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 375 status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); 376 377 if (status->wcid) { 378 struct mt7615_sta *msta; 379 380 msta = container_of(status->wcid, struct mt7615_sta, wcid); 381 spin_lock_bh(&dev->sta_poll_lock); 382 if (list_empty(&msta->poll_list)) 383 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 384 spin_unlock_bh(&dev->sta_poll_lock); 385 } 386 387 if ((rxd0 & csum_mask) == csum_mask) 388 skb->ip_summed = CHECKSUM_UNNECESSARY; 389 390 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 391 status->flag |= RX_FLAG_FAILED_FCS_CRC; 392 393 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 394 status->flag |= RX_FLAG_MMIC_ERROR; 395 396 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 397 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 398 status->flag |= RX_FLAG_DECRYPTED; 399 status->flag |= RX_FLAG_IV_STRIPPED; 400 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 401 } 402 403 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 404 405 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 406 return -EINVAL; 407 408 rxd += 4; 409 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 410 u32 v0 = le32_to_cpu(rxd[0]); 411 u32 v2 = le32_to_cpu(rxd[2]); 412 413 fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0)); 414 qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2); 415 seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2); 416 417 rxd += 4; 418 if ((u8 *)rxd - skb->data >= skb->len) 419 return -EINVAL; 420 } 421 422 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 423 u8 *data = (u8 *)rxd; 424 425 if (status->flag & RX_FLAG_DECRYPTED) { 426 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 427 case MT_CIPHER_AES_CCMP: 428 case MT_CIPHER_CCMP_CCX: 429 case MT_CIPHER_CCMP_256: 430 insert_ccmp_hdr = 431 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 432 fallthrough; 433 case MT_CIPHER_TKIP: 434 case MT_CIPHER_TKIP_NO_MIC: 435 case MT_CIPHER_GCMP: 436 case MT_CIPHER_GCMP_256: 437 status->iv[0] = data[5]; 438 status->iv[1] = data[4]; 439 status->iv[2] = data[3]; 440 status->iv[3] = data[2]; 441 status->iv[4] = data[1]; 442 status->iv[5] = data[0]; 443 break; 444 default: 445 break; 446 } 447 } 448 rxd += 4; 449 if ((u8 *)rxd - skb->data >= skb->len) 450 return -EINVAL; 451 } 452 453 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 454 status->timestamp = le32_to_cpu(rxd[0]); 455 status->flag |= RX_FLAG_MACTIME_START; 456 457 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | 458 MT_RXD2_NORMAL_NON_AMPDU))) { 459 status->flag |= RX_FLAG_AMPDU_DETAILS; 460 461 /* all subframes of an A-MPDU have the same timestamp */ 462 if (phy->rx_ampdu_ts != status->timestamp) { 463 if (!++phy->ampdu_ref) 464 phy->ampdu_ref++; 465 } 466 phy->rx_ampdu_ts = status->timestamp; 467 468 status->ampdu_ref = phy->ampdu_ref; 469 } 470 471 rxd += 2; 472 if ((u8 *)rxd - skb->data >= skb->len) 473 return -EINVAL; 474 } 475 476 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 477 u32 rxdg5 = le32_to_cpu(rxd[5]); 478 479 /* 480 * If both PHYs are on the same channel and we don't have a WCID, 481 * we need to figure out which PHY this packet was received on. 482 * On the primary PHY, the noise value for the chains belonging to the 483 * second PHY will be set to the noise value of the last packet from 484 * that PHY. 485 */ 486 if (phy_idx < 0) { 487 int first_chain = ffs(phy2->mt76->chainmask) - 1; 488 489 phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0; 490 } 491 } 492 493 if (phy_idx == 1 && phy2) { 494 mphy = dev->mt76.phy2; 495 phy = phy2; 496 status->ext_phy = true; 497 } 498 499 if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq) 500 return -EINVAL; 501 502 mt7615_get_status_freq_info(dev, mphy, status, chfreq); 503 if (status->band == NL80211_BAND_5GHZ) 504 sband = &mphy->sband_5g.sband; 505 else 506 sband = &mphy->sband_2g.sband; 507 508 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 509 return -EINVAL; 510 511 if (!sband->channels) 512 return -EINVAL; 513 514 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 515 u32 rxdg0 = le32_to_cpu(rxd[0]); 516 u32 rxdg1 = le32_to_cpu(rxd[1]); 517 u32 rxdg3 = le32_to_cpu(rxd[3]); 518 u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 519 bool cck = false; 520 521 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 522 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 523 case MT_PHY_TYPE_CCK: 524 cck = true; 525 fallthrough; 526 case MT_PHY_TYPE_OFDM: 527 i = mt76_get_rate(&dev->mt76, sband, i, cck); 528 break; 529 case MT_PHY_TYPE_HT_GF: 530 case MT_PHY_TYPE_HT: 531 status->encoding = RX_ENC_HT; 532 if (i > 31) 533 return -EINVAL; 534 break; 535 case MT_PHY_TYPE_VHT: 536 status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1; 537 status->encoding = RX_ENC_VHT; 538 break; 539 default: 540 return -EINVAL; 541 } 542 status->rate_idx = i; 543 544 switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) { 545 case MT_PHY_BW_20: 546 break; 547 case MT_PHY_BW_40: 548 status->bw = RATE_INFO_BW_40; 549 break; 550 case MT_PHY_BW_80: 551 status->bw = RATE_INFO_BW_80; 552 break; 553 case MT_PHY_BW_160: 554 status->bw = RATE_INFO_BW_160; 555 break; 556 default: 557 return -EINVAL; 558 } 559 560 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 561 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 562 if (rxdg0 & MT_RXV1_HT_AD_CODE) 563 status->enc_flags |= RX_ENC_FLAG_LDPC; 564 565 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 566 567 status->chains = mphy->antenna_mask; 568 status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3); 569 status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3); 570 status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3); 571 status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3); 572 status->signal = status->chain_signal[0]; 573 574 for (i = 1; i < hweight8(mphy->antenna_mask); i++) { 575 if (!(status->chains & BIT(i))) 576 continue; 577 578 status->signal = max(status->signal, 579 status->chain_signal[i]); 580 } 581 582 mt7615_mac_fill_tm_rx(mphy->priv, rxd); 583 584 rxd += 6; 585 if ((u8 *)rxd - skb->data >= skb->len) 586 return -EINVAL; 587 } 588 589 amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1); 590 status->amsdu = !!amsdu_info; 591 if (status->amsdu) { 592 status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME; 593 status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME; 594 } 595 596 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 597 if (hdr_trans && ieee80211_has_morefrags(fc)) { 598 if (mt7615_reverse_frag0_hdr_trans(skb, hdr_gap)) 599 return -EINVAL; 600 hdr_trans = false; 601 } else { 602 int pad_start = 0; 603 604 skb_pull(skb, hdr_gap); 605 if (!hdr_trans && status->amsdu) { 606 pad_start = ieee80211_get_hdrlen_from_skb(skb); 607 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 608 /* 609 * When header translation failure is indicated, 610 * the hardware will insert an extra 2-byte field 611 * containing the data length after the protocol 612 * type field. 613 */ 614 pad_start = 12; 615 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 616 pad_start += 4; 617 618 if (get_unaligned_be16(skb->data + pad_start) != 619 skb->len - pad_start - 2) 620 pad_start = 0; 621 } 622 623 if (pad_start) { 624 memmove(skb->data + 2, skb->data, pad_start); 625 skb_pull(skb, 2); 626 } 627 } 628 629 if (insert_ccmp_hdr && !hdr_trans) { 630 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 631 632 mt76_insert_ccmp_hdr(skb, key_id); 633 } 634 635 if (!hdr_trans) { 636 hdr = (struct ieee80211_hdr *)skb->data; 637 fc = hdr->frame_control; 638 if (ieee80211_is_data_qos(fc)) { 639 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 640 qos_ctl = *ieee80211_get_qos_ctl(hdr); 641 } 642 } else { 643 status->flag |= RX_FLAG_8023; 644 } 645 646 if (!status->wcid || !ieee80211_is_data_qos(fc)) 647 return 0; 648 649 status->aggr = unicast && 650 !ieee80211_is_qos_nullfunc(fc); 651 status->qos_ctl = qos_ctl; 652 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 653 654 return 0; 655 } 656 657 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 658 { 659 } 660 EXPORT_SYMBOL_GPL(mt7615_sta_ps); 661 662 static u16 663 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, 664 struct mt76_phy *mphy, 665 const struct ieee80211_tx_rate *rate, 666 bool stbc, u8 *bw) 667 { 668 u8 phy, nss, rate_idx; 669 u16 rateval = 0; 670 671 *bw = 0; 672 673 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { 674 rate_idx = ieee80211_rate_get_vht_mcs(rate); 675 nss = ieee80211_rate_get_vht_nss(rate); 676 phy = MT_PHY_TYPE_VHT; 677 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 678 *bw = 1; 679 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) 680 *bw = 2; 681 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) 682 *bw = 3; 683 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 684 rate_idx = rate->idx; 685 nss = 1 + (rate->idx >> 3); 686 phy = MT_PHY_TYPE_HT; 687 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 688 phy = MT_PHY_TYPE_HT_GF; 689 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 690 *bw = 1; 691 } else { 692 const struct ieee80211_rate *r; 693 int band = mphy->chandef.chan->band; 694 u16 val; 695 696 nss = 1; 697 r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx]; 698 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 699 val = r->hw_value_short; 700 else 701 val = r->hw_value; 702 703 phy = val >> 8; 704 rate_idx = val & 0xff; 705 } 706 707 if (stbc && nss == 1) { 708 nss++; 709 rateval |= MT_TX_RATE_STBC; 710 } 711 712 rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 713 FIELD_PREP(MT_TX_RATE_MODE, phy) | 714 FIELD_PREP(MT_TX_RATE_NSS, nss - 1)); 715 716 return rateval; 717 } 718 719 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, 720 struct sk_buff *skb, struct mt76_wcid *wcid, 721 struct ieee80211_sta *sta, int pid, 722 struct ieee80211_key_conf *key, bool beacon) 723 { 724 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 725 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 726 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 727 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 728 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; 729 bool multicast = is_multicast_ether_addr(hdr->addr1); 730 struct ieee80211_vif *vif = info->control.vif; 731 bool is_mmio = mt76_is_mmio(&dev->mt76); 732 u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE; 733 struct mt76_phy *mphy = &dev->mphy; 734 __le16 fc = hdr->frame_control; 735 int tx_count = 8; 736 u16 seqno = 0; 737 738 if (vif) { 739 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 740 741 omac_idx = mvif->omac_idx; 742 wmm_idx = mvif->wmm_idx; 743 } 744 745 if (sta) { 746 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; 747 748 tx_count = msta->rate_count; 749 } 750 751 if (ext_phy && dev->mt76.phy2) 752 mphy = dev->mt76.phy2; 753 754 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 755 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 756 757 if (beacon) { 758 p_fmt = MT_TX_TYPE_FW; 759 q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; 760 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { 761 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 762 q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; 763 } else { 764 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 765 q_idx = wmm_idx * MT7615_MAX_WMM_SETS + 766 mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); 767 } 768 769 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | 770 FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) | 771 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 772 txwi[0] = cpu_to_le32(val); 773 774 val = MT_TXD1_LONG_FORMAT | 775 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 776 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 777 FIELD_PREP(MT_TXD1_HDR_INFO, 778 ieee80211_get_hdrlen_from_skb(skb) / 2) | 779 FIELD_PREP(MT_TXD1_TID, 780 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 781 FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) | 782 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 783 txwi[1] = cpu_to_le32(val); 784 785 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 786 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | 787 FIELD_PREP(MT_TXD2_MULTICAST, multicast); 788 if (key) { 789 if (multicast && ieee80211_is_robust_mgmt_frame(skb) && 790 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 791 val |= MT_TXD2_BIP; 792 txwi[3] = 0; 793 } else { 794 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); 795 } 796 } else { 797 txwi[3] = 0; 798 } 799 txwi[2] = cpu_to_le32(val); 800 801 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 802 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 803 804 txwi[4] = 0; 805 txwi[6] = 0; 806 807 if (rate->idx >= 0 && rate->count && 808 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 809 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 810 u8 bw; 811 u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc, 812 &bw); 813 814 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 815 816 val = MT_TXD6_FIXED_BW | 817 FIELD_PREP(MT_TXD6_BW, bw) | 818 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 819 txwi[6] |= cpu_to_le32(val); 820 821 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 822 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 823 824 if (info->flags & IEEE80211_TX_CTL_LDPC) 825 txwi[6] |= cpu_to_le32(MT_TXD6_LDPC); 826 827 if (!(rate->flags & (IEEE80211_TX_RC_MCS | 828 IEEE80211_TX_RC_VHT_MCS))) 829 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 830 831 tx_count = rate->count; 832 } 833 834 if (!ieee80211_is_beacon(fc)) { 835 struct ieee80211_hw *hw = mt76_hw(dev); 836 837 val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid); 838 if (!ieee80211_hw_check(hw, SUPPORTS_PS)) 839 val |= MT_TXD5_SW_POWER_MGMT; 840 txwi[5] = cpu_to_le32(val); 841 } else { 842 txwi[5] = 0; 843 /* use maximum tx count for beacons */ 844 tx_count = 0x1f; 845 } 846 847 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 848 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 849 seqno = le16_to_cpu(hdr->seq_ctrl); 850 851 if (ieee80211_is_back_req(hdr->frame_control)) { 852 struct ieee80211_bar *bar; 853 854 bar = (struct ieee80211_bar *)skb->data; 855 seqno = le16_to_cpu(bar->start_seq_num); 856 } 857 858 val |= MT_TXD3_SN_VALID | 859 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 860 } 861 862 txwi[3] |= cpu_to_le32(val); 863 864 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 865 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); 866 867 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 868 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | 869 FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); 870 txwi[7] = cpu_to_le32(val); 871 if (!is_mmio) { 872 val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | 873 FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); 874 txwi[8] = cpu_to_le32(val); 875 } 876 877 return 0; 878 } 879 EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi); 880 881 static void 882 mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp) 883 { 884 int i; 885 886 for (i = 0; i < txp->nbuf; i++) 887 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), 888 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); 889 } 890 891 static void 892 mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp) 893 { 894 u32 last_mask; 895 int i; 896 897 last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST; 898 899 for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) { 900 struct mt7615_txp_ptr *ptr = &txp->ptr[i]; 901 bool last; 902 u16 len; 903 904 len = le16_to_cpu(ptr->len0); 905 last = len & last_mask; 906 len &= MT_TXD_LEN_MASK; 907 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, 908 DMA_TO_DEVICE); 909 if (last) 910 break; 911 912 len = le16_to_cpu(ptr->len1); 913 last = len & last_mask; 914 len &= MT_TXD_LEN_MASK; 915 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, 916 DMA_TO_DEVICE); 917 if (last) 918 break; 919 } 920 } 921 922 void mt7615_txp_skb_unmap(struct mt76_dev *dev, 923 struct mt76_txwi_cache *t) 924 { 925 struct mt7615_txp_common *txp; 926 927 txp = mt7615_txwi_to_txp(dev, t); 928 if (is_mt7615(dev)) 929 mt7615_txp_skb_unmap_fw(dev, &txp->fw); 930 else 931 mt7615_txp_skb_unmap_hw(dev, &txp->hw); 932 } 933 EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap); 934 935 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask) 936 { 937 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 938 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 939 940 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 941 0, 5000); 942 } 943 944 void mt7615_mac_sta_poll(struct mt7615_dev *dev) 945 { 946 static const u8 ac_to_tid[4] = { 947 [IEEE80211_AC_BE] = 0, 948 [IEEE80211_AC_BK] = 1, 949 [IEEE80211_AC_VI] = 4, 950 [IEEE80211_AC_VO] = 6 951 }; 952 static const u8 hw_queue_map[] = { 953 [IEEE80211_AC_BK] = 0, 954 [IEEE80211_AC_BE] = 1, 955 [IEEE80211_AC_VI] = 2, 956 [IEEE80211_AC_VO] = 3, 957 }; 958 struct ieee80211_sta *sta; 959 struct mt7615_sta *msta; 960 u32 addr, tx_time[4], rx_time[4]; 961 struct list_head sta_poll_list; 962 int i; 963 964 INIT_LIST_HEAD(&sta_poll_list); 965 spin_lock_bh(&dev->sta_poll_lock); 966 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 967 spin_unlock_bh(&dev->sta_poll_lock); 968 969 while (!list_empty(&sta_poll_list)) { 970 bool clear = false; 971 972 msta = list_first_entry(&sta_poll_list, struct mt7615_sta, 973 poll_list); 974 list_del_init(&msta->poll_list); 975 976 addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; 977 978 for (i = 0; i < 4; i++, addr += 8) { 979 u32 tx_last = msta->airtime_ac[i]; 980 u32 rx_last = msta->airtime_ac[i + 4]; 981 982 msta->airtime_ac[i] = mt76_rr(dev, addr); 983 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 984 tx_time[i] = msta->airtime_ac[i] - tx_last; 985 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 986 987 if ((tx_last | rx_last) & BIT(30)) 988 clear = true; 989 } 990 991 if (clear) { 992 mt7615_mac_wtbl_update(dev, msta->wcid.idx, 993 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 994 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 995 } 996 997 if (!msta->wcid.sta) 998 continue; 999 1000 sta = container_of((void *)msta, struct ieee80211_sta, 1001 drv_priv); 1002 for (i = 0; i < 4; i++) { 1003 u32 tx_cur = tx_time[i]; 1004 u32 rx_cur = rx_time[hw_queue_map[i]]; 1005 u8 tid = ac_to_tid[i]; 1006 1007 if (!tx_cur && !rx_cur) 1008 continue; 1009 1010 ieee80211_sta_register_airtime(sta, tid, tx_cur, 1011 rx_cur); 1012 } 1013 } 1014 } 1015 EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll); 1016 1017 static void 1018 mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta, 1019 struct ieee80211_tx_rate *probe_rate, 1020 struct ieee80211_tx_rate *rates, 1021 struct mt7615_rate_desc *rd) 1022 { 1023 struct mt7615_dev *dev = phy->dev; 1024 struct mt76_phy *mphy = phy->mt76; 1025 struct ieee80211_tx_rate *ref; 1026 bool rateset, stbc = false; 1027 int n_rates = sta->n_rates; 1028 u8 bw, bw_prev; 1029 int i, j; 1030 1031 for (i = n_rates; i < 4; i++) 1032 rates[i] = rates[n_rates - 1]; 1033 1034 rateset = !(sta->rate_set_tsf & BIT(0)); 1035 memcpy(sta->rateset[rateset].rates, rates, 1036 sizeof(sta->rateset[rateset].rates)); 1037 if (probe_rate) { 1038 sta->rateset[rateset].probe_rate = *probe_rate; 1039 ref = &sta->rateset[rateset].probe_rate; 1040 } else { 1041 sta->rateset[rateset].probe_rate.idx = -1; 1042 ref = &sta->rateset[rateset].rates[0]; 1043 } 1044 1045 rates = sta->rateset[rateset].rates; 1046 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 1047 /* 1048 * We don't support switching between short and long GI 1049 * within the rate set. For accurate tx status reporting, we 1050 * need to make sure that flags match. 1051 * For improved performance, avoid duplicate entries by 1052 * decrementing the MCS index if necessary 1053 */ 1054 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 1055 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 1056 1057 for (j = 0; j < i; j++) { 1058 if (rates[i].idx != rates[j].idx) 1059 continue; 1060 if ((rates[i].flags ^ rates[j].flags) & 1061 (IEEE80211_TX_RC_40_MHZ_WIDTH | 1062 IEEE80211_TX_RC_80_MHZ_WIDTH | 1063 IEEE80211_TX_RC_160_MHZ_WIDTH)) 1064 continue; 1065 1066 if (!rates[i].idx) 1067 continue; 1068 1069 rates[i].idx--; 1070 } 1071 } 1072 1073 rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw); 1074 bw_prev = bw; 1075 1076 if (probe_rate) { 1077 rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate, 1078 stbc, &bw); 1079 if (bw) 1080 rd->bw_idx = 1; 1081 else 1082 bw_prev = 0; 1083 } else { 1084 rd->probe_val = rd->val[0]; 1085 } 1086 1087 rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw); 1088 if (bw_prev) { 1089 rd->bw_idx = 3; 1090 bw_prev = bw; 1091 } 1092 1093 rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw); 1094 if (bw_prev) { 1095 rd->bw_idx = 5; 1096 bw_prev = bw; 1097 } 1098 1099 rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw); 1100 if (bw_prev) 1101 rd->bw_idx = 7; 1102 1103 rd->rateset = rateset; 1104 rd->bw = bw; 1105 } 1106 1107 static int 1108 mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, 1109 struct ieee80211_tx_rate *probe_rate, 1110 struct ieee80211_tx_rate *rates) 1111 { 1112 struct mt7615_dev *dev = phy->dev; 1113 struct mt7615_wtbl_rate_desc *wrd; 1114 1115 if (work_pending(&dev->rate_work)) 1116 return -EBUSY; 1117 1118 wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC); 1119 if (!wrd) 1120 return -ENOMEM; 1121 1122 wrd->sta = sta; 1123 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, 1124 &wrd->rate); 1125 list_add_tail(&wrd->node, &dev->wrd_head); 1126 queue_work(dev->mt76.wq, &dev->rate_work); 1127 1128 return 0; 1129 } 1130 1131 u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) 1132 { 1133 u32 addr, val, val2; 1134 u8 offset; 1135 1136 addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4; 1137 1138 offset = tid * 12; 1139 addr += 4 * (offset / 32); 1140 offset %= 32; 1141 1142 val = mt76_rr(dev, addr); 1143 val >>= (tid % 32); 1144 1145 if (offset > 20) { 1146 addr += 4; 1147 val2 = mt76_rr(dev, addr); 1148 val |= val2 << (32 - offset); 1149 } 1150 1151 return val & GENMASK(11, 0); 1152 } 1153 1154 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, 1155 struct ieee80211_tx_rate *probe_rate, 1156 struct ieee80211_tx_rate *rates) 1157 { 1158 int wcid = sta->wcid.idx, n_rates = sta->n_rates; 1159 struct mt7615_dev *dev = phy->dev; 1160 struct mt7615_rate_desc rd; 1161 u32 w5, w27, addr; 1162 u16 idx = sta->vif->mt76.omac_idx; 1163 1164 if (!mt76_is_mmio(&dev->mt76)) { 1165 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); 1166 return; 1167 } 1168 1169 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 1170 return; 1171 1172 memset(&rd, 0, sizeof(struct mt7615_rate_desc)); 1173 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd); 1174 1175 addr = mt7615_mac_wtbl_addr(dev, wcid); 1176 w27 = mt76_rr(dev, addr + 27 * 4); 1177 w27 &= ~MT_WTBL_W27_CC_BW_SEL; 1178 w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw); 1179 1180 w5 = mt76_rr(dev, addr + 5 * 4); 1181 w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | 1182 MT_WTBL_W5_MPDU_OK_COUNT | 1183 MT_WTBL_W5_MPDU_FAIL_COUNT | 1184 MT_WTBL_W5_RATE_IDX); 1185 w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) | 1186 FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, 1187 rd.bw_idx ? rd.bw_idx - 1 : 7); 1188 1189 mt76_wr(dev, MT_WTBL_RIUCR0, w5); 1190 1191 mt76_wr(dev, MT_WTBL_RIUCR1, 1192 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) | 1193 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) | 1194 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1])); 1195 1196 mt76_wr(dev, MT_WTBL_RIUCR2, 1197 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) | 1198 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) | 1199 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) | 1200 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2])); 1201 1202 mt76_wr(dev, MT_WTBL_RIUCR3, 1203 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) | 1204 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) | 1205 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3])); 1206 1207 mt76_wr(dev, MT_WTBL_UPDATE, 1208 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 1209 MT_WTBL_UPDATE_RATE_UPDATE | 1210 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 1211 1212 mt76_wr(dev, addr + 27 * 4, w27); 1213 1214 idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; 1215 addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); 1216 1217 mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */ 1218 sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0); 1219 sta->rate_set_tsf |= rd.rateset; 1220 1221 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 1222 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 1223 1224 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; 1225 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 1226 sta->rate_probe = !!probe_rate; 1227 } 1228 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); 1229 1230 static int 1231 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1232 struct ieee80211_key_conf *key, 1233 enum mt76_cipher_type cipher, u16 cipher_mask, 1234 enum set_key_cmd cmd) 1235 { 1236 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; 1237 u8 data[32] = {}; 1238 1239 if (key->keylen > sizeof(data)) 1240 return -EINVAL; 1241 1242 mt76_rr_copy(dev, addr, data, sizeof(data)); 1243 if (cmd == SET_KEY) { 1244 if (cipher == MT_CIPHER_TKIP) { 1245 /* Rx/Tx MIC keys are swapped */ 1246 memcpy(data, key->key, 16); 1247 memcpy(data + 16, key->key + 24, 8); 1248 memcpy(data + 24, key->key + 16, 8); 1249 } else { 1250 if (cipher_mask == BIT(cipher)) 1251 memcpy(data, key->key, key->keylen); 1252 else if (cipher != MT_CIPHER_BIP_CMAC_128) 1253 memcpy(data, key->key, 16); 1254 if (cipher == MT_CIPHER_BIP_CMAC_128) 1255 memcpy(data + 16, key->key, 16); 1256 } 1257 } else { 1258 if (cipher == MT_CIPHER_BIP_CMAC_128) 1259 memset(data + 16, 0, 16); 1260 else if (cipher_mask) 1261 memset(data, 0, 16); 1262 if (!cipher_mask) 1263 memset(data, 0, sizeof(data)); 1264 } 1265 1266 mt76_wr_copy(dev, addr, data, sizeof(data)); 1267 1268 return 0; 1269 } 1270 1271 static int 1272 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1273 enum mt76_cipher_type cipher, u16 cipher_mask, 1274 int keyidx, enum set_key_cmd cmd) 1275 { 1276 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; 1277 1278 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 1279 return -ETIMEDOUT; 1280 1281 w0 = mt76_rr(dev, addr); 1282 w1 = mt76_rr(dev, addr + 4); 1283 1284 if (cipher_mask) 1285 w0 |= MT_WTBL_W0_RX_KEY_VALID; 1286 else 1287 w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX); 1288 if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128)) 1289 w0 |= MT_WTBL_W0_RX_IK_VALID; 1290 else 1291 w0 &= ~MT_WTBL_W0_RX_IK_VALID; 1292 1293 if (cmd == SET_KEY && 1294 (cipher != MT_CIPHER_BIP_CMAC_128 || 1295 cipher_mask == BIT(cipher))) { 1296 w0 &= ~MT_WTBL_W0_KEY_IDX; 1297 w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); 1298 } 1299 1300 mt76_wr(dev, MT_WTBL_RICR0, w0); 1301 mt76_wr(dev, MT_WTBL_RICR1, w1); 1302 1303 if (!mt7615_mac_wtbl_update(dev, wcid->idx, 1304 MT_WTBL_UPDATE_RXINFO_UPDATE)) 1305 return -ETIMEDOUT; 1306 1307 return 0; 1308 } 1309 1310 static void 1311 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1312 enum mt76_cipher_type cipher, u16 cipher_mask, 1313 enum set_key_cmd cmd) 1314 { 1315 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); 1316 1317 if (!cipher_mask) { 1318 mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); 1319 return; 1320 } 1321 1322 if (cmd != SET_KEY) 1323 return; 1324 1325 if (cipher == MT_CIPHER_BIP_CMAC_128 && 1326 cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128)) 1327 return; 1328 1329 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1330 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); 1331 } 1332 1333 int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, 1334 struct mt76_wcid *wcid, 1335 struct ieee80211_key_conf *key, 1336 enum set_key_cmd cmd) 1337 { 1338 enum mt76_cipher_type cipher; 1339 u16 cipher_mask = wcid->cipher; 1340 int err; 1341 1342 cipher = mt7615_mac_get_cipher(key->cipher); 1343 if (cipher == MT_CIPHER_NONE) 1344 return -EOPNOTSUPP; 1345 1346 if (cmd == SET_KEY) 1347 cipher_mask |= BIT(cipher); 1348 else 1349 cipher_mask &= ~BIT(cipher); 1350 1351 mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd); 1352 err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask, 1353 cmd); 1354 if (err < 0) 1355 return err; 1356 1357 err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask, 1358 key->keyidx, cmd); 1359 if (err < 0) 1360 return err; 1361 1362 wcid->cipher = cipher_mask; 1363 1364 return 0; 1365 } 1366 1367 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, 1368 struct mt76_wcid *wcid, 1369 struct ieee80211_key_conf *key, 1370 enum set_key_cmd cmd) 1371 { 1372 int err; 1373 1374 spin_lock_bh(&dev->mt76.lock); 1375 err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); 1376 spin_unlock_bh(&dev->mt76.lock); 1377 1378 return err; 1379 } 1380 1381 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, 1382 struct ieee80211_tx_info *info, __le32 *txs_data) 1383 { 1384 struct ieee80211_supported_band *sband; 1385 struct mt7615_rate_set *rs; 1386 struct mt76_phy *mphy; 1387 int first_idx = 0, last_idx; 1388 int i, idx, count; 1389 bool fixed_rate, ack_timeout; 1390 bool ampdu, cck = false; 1391 bool rs_idx; 1392 u32 rate_set_tsf; 1393 u32 final_rate, final_rate_flags, final_nss, txs; 1394 1395 txs = le32_to_cpu(txs_data[1]); 1396 ampdu = txs & MT_TXS1_AMPDU; 1397 1398 txs = le32_to_cpu(txs_data[3]); 1399 count = FIELD_GET(MT_TXS3_TX_COUNT, txs); 1400 last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs); 1401 1402 txs = le32_to_cpu(txs_data[0]); 1403 fixed_rate = txs & MT_TXS0_FIXED_RATE; 1404 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1405 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1406 1407 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1408 return false; 1409 1410 if (txs & MT_TXS0_QUEUE_TIMEOUT) 1411 return false; 1412 1413 if (!ack_timeout) 1414 info->flags |= IEEE80211_TX_STAT_ACK; 1415 1416 info->status.ampdu_len = 1; 1417 info->status.ampdu_ack_len = !!(info->flags & 1418 IEEE80211_TX_STAT_ACK); 1419 1420 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1421 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1422 1423 first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY); 1424 1425 if (fixed_rate) { 1426 info->status.rates[0].count = count; 1427 i = 0; 1428 goto out; 1429 } 1430 1431 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1432 rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) - 1433 rate_set_tsf) < 1000000); 1434 rs_idx ^= rate_set_tsf & BIT(0); 1435 rs = &sta->rateset[rs_idx]; 1436 1437 if (!first_idx && rs->probe_rate.idx >= 0) { 1438 info->status.rates[0] = rs->probe_rate; 1439 1440 spin_lock_bh(&dev->mt76.lock); 1441 if (sta->rate_probe) { 1442 struct mt7615_phy *phy = &dev->phy; 1443 1444 if (sta->wcid.ext_phy && dev->mt76.phy2) 1445 phy = dev->mt76.phy2->priv; 1446 1447 mt7615_mac_set_rates(phy, sta, NULL, sta->rates); 1448 } 1449 spin_unlock_bh(&dev->mt76.lock); 1450 } else { 1451 info->status.rates[0] = rs->rates[first_idx / 2]; 1452 } 1453 info->status.rates[0].count = 0; 1454 1455 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1456 struct ieee80211_tx_rate *cur_rate; 1457 int cur_count; 1458 1459 cur_rate = &rs->rates[idx / 2]; 1460 cur_count = min_t(int, MT7615_RATE_RETRY, count); 1461 count -= cur_count; 1462 1463 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1464 cur_rate->flags != info->status.rates[i].flags)) { 1465 i++; 1466 if (i == ARRAY_SIZE(info->status.rates)) { 1467 i--; 1468 break; 1469 } 1470 1471 info->status.rates[i] = *cur_rate; 1472 info->status.rates[i].count = 0; 1473 } 1474 1475 info->status.rates[i].count += cur_count; 1476 } 1477 1478 out: 1479 final_rate_flags = info->status.rates[i].flags; 1480 1481 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1482 case MT_PHY_TYPE_CCK: 1483 cck = true; 1484 fallthrough; 1485 case MT_PHY_TYPE_OFDM: 1486 mphy = &dev->mphy; 1487 if (sta->wcid.ext_phy && dev->mt76.phy2) 1488 mphy = dev->mt76.phy2; 1489 1490 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1491 sband = &mphy->sband_5g.sband; 1492 else 1493 sband = &mphy->sband_2g.sband; 1494 final_rate &= MT_TX_RATE_IDX; 1495 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1496 cck); 1497 final_rate_flags = 0; 1498 break; 1499 case MT_PHY_TYPE_HT_GF: 1500 case MT_PHY_TYPE_HT: 1501 final_rate_flags |= IEEE80211_TX_RC_MCS; 1502 final_rate &= MT_TX_RATE_IDX; 1503 if (final_rate > 31) 1504 return false; 1505 break; 1506 case MT_PHY_TYPE_VHT: 1507 final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate); 1508 1509 if ((final_rate & MT_TX_RATE_STBC) && final_nss) 1510 final_nss--; 1511 1512 final_rate_flags |= IEEE80211_TX_RC_VHT_MCS; 1513 final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4); 1514 break; 1515 default: 1516 return false; 1517 } 1518 1519 info->status.rates[i].idx = final_rate; 1520 info->status.rates[i].flags = final_rate_flags; 1521 1522 return true; 1523 } 1524 1525 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev, 1526 struct mt7615_sta *sta, int pid, 1527 __le32 *txs_data) 1528 { 1529 struct mt76_dev *mdev = &dev->mt76; 1530 struct sk_buff_head list; 1531 struct sk_buff *skb; 1532 1533 if (pid < MT_PACKET_ID_FIRST) 1534 return false; 1535 1536 trace_mac_txdone(mdev, sta->wcid.idx, pid); 1537 1538 mt76_tx_status_lock(mdev, &list); 1539 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1540 if (skb) { 1541 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1542 1543 if (!mt7615_fill_txs(dev, sta, info, txs_data)) { 1544 info->status.rates[0].count = 0; 1545 info->status.rates[0].idx = -1; 1546 } 1547 1548 mt76_tx_status_skb_done(mdev, skb, &list); 1549 } 1550 mt76_tx_status_unlock(mdev, &list); 1551 1552 return !!skb; 1553 } 1554 1555 static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) 1556 { 1557 struct ieee80211_tx_info info = {}; 1558 struct ieee80211_sta *sta = NULL; 1559 struct mt7615_sta *msta = NULL; 1560 struct mt76_wcid *wcid; 1561 struct mt76_phy *mphy = &dev->mt76.phy; 1562 __le32 *txs_data = data; 1563 u32 txs; 1564 u8 wcidx; 1565 u8 pid; 1566 1567 txs = le32_to_cpu(txs_data[0]); 1568 pid = FIELD_GET(MT_TXS0_PID, txs); 1569 txs = le32_to_cpu(txs_data[2]); 1570 wcidx = FIELD_GET(MT_TXS2_WCID, txs); 1571 1572 if (pid == MT_PACKET_ID_NO_ACK) 1573 return; 1574 1575 if (wcidx >= MT7615_WTBL_SIZE) 1576 return; 1577 1578 rcu_read_lock(); 1579 1580 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1581 if (!wcid) 1582 goto out; 1583 1584 msta = container_of(wcid, struct mt7615_sta, wcid); 1585 sta = wcid_to_sta(wcid); 1586 1587 spin_lock_bh(&dev->sta_poll_lock); 1588 if (list_empty(&msta->poll_list)) 1589 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1590 spin_unlock_bh(&dev->sta_poll_lock); 1591 1592 if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data)) 1593 goto out; 1594 1595 if (wcidx >= MT7615_WTBL_STA || !sta) 1596 goto out; 1597 1598 if (wcid->ext_phy && dev->mt76.phy2) 1599 mphy = dev->mt76.phy2; 1600 1601 if (mt7615_fill_txs(dev, msta, &info, txs_data)) 1602 ieee80211_tx_status_noskb(mphy->hw, sta, &info); 1603 1604 out: 1605 rcu_read_unlock(); 1606 } 1607 1608 static void 1609 mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi) 1610 { 1611 struct mt76_dev *mdev = &dev->mt76; 1612 __le32 *txwi_data; 1613 u32 val; 1614 u8 wcid; 1615 1616 mt7615_txp_skb_unmap(mdev, txwi); 1617 if (!txwi->skb) 1618 goto out; 1619 1620 txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi); 1621 val = le32_to_cpu(txwi_data[1]); 1622 wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val); 1623 mt76_tx_complete_skb(mdev, wcid, txwi->skb); 1624 1625 out: 1626 txwi->skb = NULL; 1627 mt76_put_txwi(mdev, txwi); 1628 } 1629 1630 static void 1631 mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) 1632 { 1633 struct mt76_dev *mdev = &dev->mt76; 1634 struct mt76_txwi_cache *txwi; 1635 1636 trace_mac_tx_free(dev, token); 1637 txwi = mt76_token_put(mdev, token); 1638 if (!txwi) 1639 return; 1640 1641 mt7615_txwi_free(dev, txwi); 1642 } 1643 1644 static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len) 1645 { 1646 struct mt7615_tx_free *free = (struct mt7615_tx_free *)data; 1647 void *end = data + len; 1648 u8 i, count; 1649 1650 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1651 if (is_mt7615(&dev->mt76)) { 1652 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1653 } else { 1654 for (i = 0; i < IEEE80211_NUM_ACS; i++) 1655 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); 1656 } 1657 1658 count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl)); 1659 if (is_mt7615(&dev->mt76)) { 1660 __le16 *token = &free->token[0]; 1661 1662 if (WARN_ON_ONCE((void *)&token[count] > end)) 1663 return; 1664 1665 for (i = 0; i < count; i++) 1666 mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i])); 1667 } else { 1668 __le32 *token = (__le32 *)&free->token[0]; 1669 1670 if (WARN_ON_ONCE((void *)&token[count] > end)) 1671 return; 1672 1673 for (i = 0; i < count; i++) 1674 mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i])); 1675 } 1676 1677 rcu_read_lock(); 1678 mt7615_mac_sta_poll(dev); 1679 rcu_read_unlock(); 1680 1681 mt76_worker_schedule(&dev->mt76.tx_worker); 1682 } 1683 1684 bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len) 1685 { 1686 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1687 __le32 *rxd = (__le32 *)data; 1688 __le32 *end = (__le32 *)&rxd[len / 4]; 1689 enum rx_pkt_type type; 1690 1691 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 1692 switch (type) { 1693 case PKT_TYPE_TXRX_NOTIFY: 1694 mt7615_mac_tx_free(dev, data, len); 1695 return false; 1696 case PKT_TYPE_TXS: 1697 for (rxd++; rxd + 7 <= end; rxd += 7) 1698 mt7615_mac_add_txs(dev, rxd); 1699 return false; 1700 default: 1701 return true; 1702 } 1703 } 1704 EXPORT_SYMBOL_GPL(mt7615_rx_check); 1705 1706 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1707 struct sk_buff *skb) 1708 { 1709 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1710 __le32 *rxd = (__le32 *)skb->data; 1711 __le32 *end = (__le32 *)&skb->data[skb->len]; 1712 enum rx_pkt_type type; 1713 u16 flag; 1714 1715 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 1716 flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0])); 1717 if (type == PKT_TYPE_RX_EVENT && flag == 0x1) 1718 type = PKT_TYPE_NORMAL_MCU; 1719 1720 switch (type) { 1721 case PKT_TYPE_TXS: 1722 for (rxd++; rxd + 7 <= end; rxd += 7) 1723 mt7615_mac_add_txs(dev, rxd); 1724 dev_kfree_skb(skb); 1725 break; 1726 case PKT_TYPE_TXRX_NOTIFY: 1727 mt7615_mac_tx_free(dev, skb->data, skb->len); 1728 dev_kfree_skb(skb); 1729 break; 1730 case PKT_TYPE_RX_EVENT: 1731 mt7615_mcu_rx_event(dev, skb); 1732 break; 1733 case PKT_TYPE_NORMAL_MCU: 1734 case PKT_TYPE_NORMAL: 1735 if (!mt7615_mac_fill_rx(dev, skb)) { 1736 mt76_rx(&dev->mt76, q, skb); 1737 return; 1738 } 1739 fallthrough; 1740 default: 1741 dev_kfree_skb(skb); 1742 break; 1743 } 1744 } 1745 EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb); 1746 1747 static void 1748 mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm) 1749 { 1750 struct mt7615_dev *dev = phy->dev; 1751 bool ext_phy = phy != &dev->phy; 1752 1753 if (is_mt7663(&dev->mt76)) { 1754 if (ofdm) 1755 mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy), 1756 MT_WF_PHY_PD_OFDM_MASK(0), 1757 MT_WF_PHY_PD_OFDM(0, val)); 1758 else 1759 mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy), 1760 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1761 MT_WF_PHY_PD_CCK(ext_phy, val)); 1762 return; 1763 } 1764 1765 if (ofdm) 1766 mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), 1767 MT_WF_PHY_PD_OFDM_MASK(ext_phy), 1768 MT_WF_PHY_PD_OFDM(ext_phy, val)); 1769 else 1770 mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), 1771 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1772 MT_WF_PHY_PD_CCK(ext_phy, val)); 1773 } 1774 1775 static void 1776 mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy) 1777 { 1778 /* ofdm */ 1779 mt7615_mac_set_sensitivity(phy, 0x13c, true); 1780 /* cck */ 1781 mt7615_mac_set_sensitivity(phy, 0x92, false); 1782 1783 phy->ofdm_sensitivity = -98; 1784 phy->cck_sensitivity = -110; 1785 phy->last_cca_adj = jiffies; 1786 } 1787 1788 void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable) 1789 { 1790 struct mt7615_dev *dev = phy->dev; 1791 bool ext_phy = phy != &dev->phy; 1792 u32 reg, mask; 1793 1794 mt7615_mutex_acquire(dev); 1795 1796 if (phy->scs_en == enable) 1797 goto out; 1798 1799 if (is_mt7663(&dev->mt76)) { 1800 reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy); 1801 mask = MT_WF_PHY_PD_BLK(0); 1802 } else { 1803 reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy); 1804 mask = MT_WF_PHY_PD_BLK(ext_phy); 1805 } 1806 1807 if (enable) { 1808 mt76_set(dev, reg, mask); 1809 if (is_mt7622(&dev->mt76)) { 1810 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8); 1811 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7); 1812 } 1813 } else { 1814 mt76_clear(dev, reg, mask); 1815 } 1816 1817 mt7615_mac_set_default_sensitivity(phy); 1818 phy->scs_en = enable; 1819 1820 out: 1821 mt7615_mutex_release(dev); 1822 } 1823 1824 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) 1825 { 1826 u32 rxtd, reg; 1827 1828 if (is_mt7663(&dev->mt76)) 1829 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1830 else 1831 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1832 1833 if (ext_phy) 1834 rxtd = MT_WF_PHY_RXTD2(10); 1835 else 1836 rxtd = MT_WF_PHY_RXTD(12); 1837 1838 mt76_set(dev, rxtd, BIT(18) | BIT(29)); 1839 mt76_set(dev, reg, 0x5 << 12); 1840 } 1841 1842 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy) 1843 { 1844 struct mt7615_dev *dev = phy->dev; 1845 bool ext_phy = phy != &dev->phy; 1846 u32 reg; 1847 1848 if (is_mt7663(&dev->mt76)) 1849 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1850 else 1851 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1852 1853 /* reset PD and MDRDY counters */ 1854 mt76_clear(dev, reg, GENMASK(22, 20)); 1855 mt76_set(dev, reg, BIT(22) | BIT(20)); 1856 } 1857 1858 static void 1859 mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, 1860 u32 rts_err_rate, bool ofdm) 1861 { 1862 struct mt7615_dev *dev = phy->dev; 1863 int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; 1864 bool ext_phy = phy != &dev->phy; 1865 u16 def_th = ofdm ? -98 : -110; 1866 bool update = false; 1867 s8 *sensitivity; 1868 int signal; 1869 1870 sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity; 1871 signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy); 1872 if (!signal) { 1873 mt7615_mac_set_default_sensitivity(phy); 1874 return; 1875 } 1876 1877 signal = min(signal, -72); 1878 if (false_cca > 500) { 1879 if (rts_err_rate > MT_FRAC(40, 100)) 1880 return; 1881 1882 /* decrease coverage */ 1883 if (*sensitivity == def_th && signal > -90) { 1884 *sensitivity = -90; 1885 update = true; 1886 } else if (*sensitivity + 2 < signal) { 1887 *sensitivity += 2; 1888 update = true; 1889 } 1890 } else if ((false_cca > 0 && false_cca < 50) || 1891 rts_err_rate > MT_FRAC(60, 100)) { 1892 /* increase coverage */ 1893 if (*sensitivity - 2 >= def_th) { 1894 *sensitivity -= 2; 1895 update = true; 1896 } 1897 } 1898 1899 if (*sensitivity > signal) { 1900 *sensitivity = signal; 1901 update = true; 1902 } 1903 1904 if (update) { 1905 u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256; 1906 1907 mt7615_mac_set_sensitivity(phy, val, ofdm); 1908 phy->last_cca_adj = jiffies; 1909 } 1910 } 1911 1912 static void 1913 mt7615_mac_scs_check(struct mt7615_phy *phy) 1914 { 1915 struct mt7615_dev *dev = phy->dev; 1916 struct mib_stats *mib = &phy->mib; 1917 u32 val, rts_err_rate = 0; 1918 u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm; 1919 bool ext_phy = phy != &dev->phy; 1920 1921 if (!phy->scs_en) 1922 return; 1923 1924 if (is_mt7663(&dev->mt76)) 1925 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1926 else 1927 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1928 pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val); 1929 pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val); 1930 1931 if (is_mt7663(&dev->mt76)) 1932 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1933 else 1934 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1935 mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val); 1936 mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val); 1937 1938 phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1939 phy->false_cca_cck = pd_cck - mdrdy_cck; 1940 mt7615_mac_cca_stats_reset(phy); 1941 1942 if (mib->rts_cnt + mib->rts_retries_cnt) 1943 rts_err_rate = MT_FRAC(mib->rts_retries_cnt, 1944 mib->rts_cnt + mib->rts_retries_cnt); 1945 1946 /* cck */ 1947 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false); 1948 /* ofdm */ 1949 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true); 1950 1951 if (time_after(jiffies, phy->last_cca_adj + 10 * HZ)) 1952 mt7615_mac_set_default_sensitivity(phy); 1953 } 1954 1955 static u8 1956 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx) 1957 { 1958 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1959 u32 reg, val, sum = 0, n = 0; 1960 int i; 1961 1962 if (is_mt7663(&dev->mt76)) 1963 reg = MT7663_WF_PHY_RXTD(20); 1964 else 1965 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20); 1966 1967 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1968 val = mt76_rr(dev, reg); 1969 sum += val * nf_power[i]; 1970 n += val; 1971 } 1972 1973 if (!n) 1974 return 0; 1975 1976 return sum / n; 1977 } 1978 1979 static void 1980 mt7615_phy_update_channel(struct mt76_phy *mphy, int idx) 1981 { 1982 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); 1983 struct mt7615_phy *phy = mphy->priv; 1984 struct mt76_channel_state *state; 1985 u64 busy_time, tx_time, rx_time, obss_time; 1986 u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5; 1987 int nf; 1988 1989 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), 1990 MT_MIB_SDR9_BUSY_MASK); 1991 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), 1992 MT_MIB_SDR36_TXTIME_MASK); 1993 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), 1994 MT_MIB_SDR37_RXTIME_MASK); 1995 obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK); 1996 1997 nf = mt7615_phy_get_nf(dev, idx); 1998 if (!phy->noise) 1999 phy->noise = nf << 4; 2000 else if (nf) 2001 phy->noise += nf - (phy->noise >> 4); 2002 2003 state = mphy->chan_state; 2004 state->cc_busy += busy_time; 2005 state->cc_tx += tx_time; 2006 state->cc_rx += rx_time + obss_time; 2007 state->cc_bss_rx += rx_time; 2008 state->noise = -(phy->noise >> 4); 2009 } 2010 2011 static void mt7615_update_survey(struct mt7615_dev *dev) 2012 { 2013 struct mt76_dev *mdev = &dev->mt76; 2014 ktime_t cur_time; 2015 2016 /* MT7615 can only update both phys simultaneously 2017 * since some reisters are shared across bands. 2018 */ 2019 2020 mt7615_phy_update_channel(&mdev->phy, 0); 2021 if (mdev->phy2) 2022 mt7615_phy_update_channel(mdev->phy2, 1); 2023 2024 cur_time = ktime_get_boottime(); 2025 2026 mt76_update_survey_active_time(&mdev->phy, cur_time); 2027 if (mdev->phy2) 2028 mt76_update_survey_active_time(mdev->phy2, cur_time); 2029 2030 /* reset obss airtime */ 2031 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 2032 } 2033 2034 void mt7615_update_channel(struct mt76_phy *mphy) 2035 { 2036 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); 2037 2038 if (mt76_connac_pm_wake(&dev->mphy, &dev->pm)) 2039 return; 2040 2041 mt7615_update_survey(dev); 2042 mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 2043 } 2044 EXPORT_SYMBOL_GPL(mt7615_update_channel); 2045 2046 static void 2047 mt7615_mac_update_mib_stats(struct mt7615_phy *phy) 2048 { 2049 struct mt7615_dev *dev = phy->dev; 2050 struct mib_stats *mib = &phy->mib; 2051 bool ext_phy = phy != &dev->phy; 2052 int i, aggr; 2053 u32 val, val2; 2054 2055 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy), 2056 MT_MIB_SDR3_FCS_ERR_MASK); 2057 2058 val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy), 2059 MT_MIB_AMPDU_MPDU_COUNT); 2060 if (val) { 2061 val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy), 2062 MT_MIB_AMPDU_ACK_COUNT); 2063 mib->aggr_per = 1000 * (val - val2) / val; 2064 } 2065 2066 aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; 2067 for (i = 0; i < 4; i++) { 2068 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); 2069 mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 2070 mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, 2071 val); 2072 2073 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); 2074 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 2075 mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, 2076 val); 2077 2078 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); 2079 dev->mt76.aggr_stats[aggr++] += val & 0xffff; 2080 dev->mt76.aggr_stats[aggr++] += val >> 16; 2081 } 2082 } 2083 2084 void mt7615_pm_wake_work(struct work_struct *work) 2085 { 2086 struct mt7615_dev *dev; 2087 struct mt76_phy *mphy; 2088 2089 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 2090 pm.wake_work); 2091 mphy = dev->phy.mt76; 2092 2093 if (!mt7615_mcu_set_drv_ctrl(dev)) { 2094 struct mt76_dev *mdev = &dev->mt76; 2095 int i; 2096 2097 if (mt76_is_sdio(mdev)) { 2098 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); 2099 mt76_worker_schedule(&mdev->sdio.txrx_worker); 2100 } else { 2101 mt76_for_each_q_rx(mdev, i) 2102 napi_schedule(&mdev->napi[i]); 2103 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); 2104 mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM], 2105 false); 2106 } 2107 2108 if (test_bit(MT76_STATE_RUNNING, &mphy->state)) { 2109 unsigned long timeout; 2110 2111 timeout = mt7615_get_macwork_timeout(dev); 2112 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2113 timeout); 2114 } 2115 } 2116 2117 ieee80211_wake_queues(mphy->hw); 2118 wake_up(&dev->pm.wait); 2119 } 2120 2121 void mt7615_pm_power_save_work(struct work_struct *work) 2122 { 2123 struct mt7615_dev *dev; 2124 unsigned long delta; 2125 2126 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 2127 pm.ps_work.work); 2128 2129 delta = dev->pm.idle_timeout; 2130 if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) || 2131 test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state)) 2132 goto out; 2133 2134 if (mutex_is_locked(&dev->mt76.mutex)) 2135 /* if mt76 mutex is held we should not put the device 2136 * to sleep since we are currently accessing device 2137 * register map. We need to wait for the next power_save 2138 * trigger. 2139 */ 2140 goto out; 2141 2142 if (time_is_after_jiffies(dev->pm.last_activity + delta)) { 2143 delta = dev->pm.last_activity + delta - jiffies; 2144 goto out; 2145 } 2146 2147 if (!mt7615_mcu_set_fw_ctrl(dev)) 2148 return; 2149 out: 2150 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); 2151 } 2152 2153 void mt7615_mac_work(struct work_struct *work) 2154 { 2155 struct mt7615_phy *phy; 2156 struct mt76_phy *mphy; 2157 unsigned long timeout; 2158 2159 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2160 mac_work.work); 2161 phy = mphy->priv; 2162 2163 mt7615_mutex_acquire(phy->dev); 2164 2165 mt7615_update_survey(phy->dev); 2166 if (++mphy->mac_work_count == 5) { 2167 mphy->mac_work_count = 0; 2168 2169 mt7615_mac_update_mib_stats(phy); 2170 mt7615_mac_scs_check(phy); 2171 } 2172 2173 mt7615_mutex_release(phy->dev); 2174 2175 mt76_tx_status_check(mphy->dev, false); 2176 2177 timeout = mt7615_get_macwork_timeout(phy->dev); 2178 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout); 2179 } 2180 2181 void mt7615_tx_token_put(struct mt7615_dev *dev) 2182 { 2183 struct mt76_txwi_cache *txwi; 2184 int id; 2185 2186 spin_lock_bh(&dev->mt76.token_lock); 2187 idr_for_each_entry(&dev->mt76.token, txwi, id) 2188 mt7615_txwi_free(dev, txwi); 2189 spin_unlock_bh(&dev->mt76.token_lock); 2190 idr_destroy(&dev->mt76.token); 2191 } 2192 EXPORT_SYMBOL_GPL(mt7615_tx_token_put); 2193 2194 static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) 2195 { 2196 struct mt7615_dev *dev = phy->dev; 2197 2198 if (phy->rdd_state & BIT(0)) 2199 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, 2200 MT_RX_SEL0, 0); 2201 if (phy->rdd_state & BIT(1)) 2202 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, 2203 MT_RX_SEL0, 0); 2204 } 2205 2206 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain) 2207 { 2208 int err; 2209 2210 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, 2211 MT_RX_SEL0, 0); 2212 if (err < 0) 2213 return err; 2214 2215 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, 2216 MT_RX_SEL0, 1); 2217 } 2218 2219 static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy) 2220 { 2221 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2222 struct mt7615_dev *dev = phy->dev; 2223 bool ext_phy = phy != &dev->phy; 2224 int err; 2225 2226 /* start CAC */ 2227 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy, 2228 MT_RX_SEL0, 0); 2229 if (err < 0) 2230 return err; 2231 2232 err = mt7615_dfs_start_rdd(dev, ext_phy); 2233 if (err < 0) 2234 return err; 2235 2236 phy->rdd_state |= BIT(ext_phy); 2237 2238 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2239 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2240 err = mt7615_dfs_start_rdd(dev, 1); 2241 if (err < 0) 2242 return err; 2243 2244 phy->rdd_state |= BIT(1); 2245 } 2246 2247 return 0; 2248 } 2249 2250 static int 2251 mt7615_dfs_init_radar_specs(struct mt7615_phy *phy) 2252 { 2253 const struct mt7615_dfs_radar_spec *radar_specs; 2254 struct mt7615_dev *dev = phy->dev; 2255 int err, i, lpn = 500; 2256 2257 switch (dev->mt76.region) { 2258 case NL80211_DFS_FCC: 2259 radar_specs = &fcc_radar_specs; 2260 lpn = 8; 2261 break; 2262 case NL80211_DFS_ETSI: 2263 radar_specs = &etsi_radar_specs; 2264 break; 2265 case NL80211_DFS_JP: 2266 radar_specs = &jp_radar_specs; 2267 break; 2268 default: 2269 return -EINVAL; 2270 } 2271 2272 /* avoid FCC radar detection in non-FCC region */ 2273 err = mt7615_mcu_set_fcc5_lpn(dev, lpn); 2274 if (err < 0) 2275 return err; 2276 2277 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2278 err = mt7615_mcu_set_radar_th(dev, i, 2279 &radar_specs->radar_pattern[i]); 2280 if (err < 0) 2281 return err; 2282 } 2283 2284 return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2285 } 2286 2287 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy) 2288 { 2289 struct mt7615_dev *dev = phy->dev; 2290 bool ext_phy = phy != &dev->phy; 2291 enum mt76_dfs_state dfs_state, prev_state; 2292 int err; 2293 2294 if (is_mt7663(&dev->mt76)) 2295 return 0; 2296 2297 prev_state = phy->mt76->dfs_state; 2298 dfs_state = mt76_phy_dfs_state(phy->mt76); 2299 2300 if (prev_state == dfs_state) 2301 return 0; 2302 2303 if (prev_state == MT_DFS_STATE_UNKNOWN) 2304 mt7615_dfs_stop_radar_detector(phy); 2305 2306 if (dfs_state == MT_DFS_STATE_DISABLED) 2307 goto stop; 2308 2309 if (prev_state <= MT_DFS_STATE_DISABLED) { 2310 err = mt7615_dfs_init_radar_specs(phy); 2311 if (err < 0) 2312 return err; 2313 2314 err = mt7615_dfs_start_radar_detector(phy); 2315 if (err < 0) 2316 return err; 2317 2318 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2319 } 2320 2321 if (dfs_state == MT_DFS_STATE_CAC) 2322 return 0; 2323 2324 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, 2325 ext_phy, MT_RX_SEL0, 0); 2326 if (err < 0) { 2327 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2328 return err; 2329 } 2330 2331 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2332 return 0; 2333 2334 stop: 2335 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy, 2336 MT_RX_SEL0, 0); 2337 if (err < 0) 2338 return err; 2339 2340 mt7615_dfs_stop_radar_detector(phy); 2341 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2342 2343 return 0; 2344 } 2345 2346 int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy, 2347 struct ieee80211_vif *vif, 2348 bool enable) 2349 { 2350 struct mt7615_dev *dev = phy->dev; 2351 bool ext_phy = phy != &dev->phy; 2352 int err; 2353 2354 if (!mt7615_firmware_offload(dev)) 2355 return -EOPNOTSUPP; 2356 2357 switch (vif->type) { 2358 case NL80211_IFTYPE_MONITOR: 2359 return 0; 2360 case NL80211_IFTYPE_MESH_POINT: 2361 case NL80211_IFTYPE_ADHOC: 2362 case NL80211_IFTYPE_AP: 2363 if (enable) 2364 phy->n_beacon_vif++; 2365 else 2366 phy->n_beacon_vif--; 2367 fallthrough; 2368 default: 2369 break; 2370 } 2371 2372 err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif); 2373 if (err) 2374 return err; 2375 2376 if (phy->n_beacon_vif) { 2377 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; 2378 mt76_clear(dev, MT_WF_RFCR(ext_phy), 2379 MT_WF_RFCR_DROP_OTHER_BEACON); 2380 } else { 2381 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 2382 mt76_set(dev, MT_WF_RFCR(ext_phy), 2383 MT_WF_RFCR_DROP_OTHER_BEACON); 2384 } 2385 2386 return 0; 2387 } 2388 2389 void mt7615_coredump_work(struct work_struct *work) 2390 { 2391 struct mt7615_dev *dev; 2392 char *dump, *data; 2393 2394 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 2395 coredump.work.work); 2396 2397 if (time_is_after_jiffies(dev->coredump.last_activity + 2398 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) { 2399 queue_delayed_work(dev->mt76.wq, &dev->coredump.work, 2400 MT76_CONNAC_COREDUMP_TIMEOUT); 2401 return; 2402 } 2403 2404 dump = vzalloc(MT76_CONNAC_COREDUMP_SZ); 2405 data = dump; 2406 2407 while (true) { 2408 struct sk_buff *skb; 2409 2410 spin_lock_bh(&dev->mt76.lock); 2411 skb = __skb_dequeue(&dev->coredump.msg_list); 2412 spin_unlock_bh(&dev->mt76.lock); 2413 2414 if (!skb) 2415 break; 2416 2417 skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); 2418 if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { 2419 dev_kfree_skb(skb); 2420 continue; 2421 } 2422 2423 memcpy(data, skb->data, skb->len); 2424 data += skb->len; 2425 2426 dev_kfree_skb(skb); 2427 } 2428 dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, 2429 GFP_KERNEL); 2430 } 2431