1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2019 MediaTek Inc. 3 * 4 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Roy Luo <royluo@google.com> 6 * Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 */ 9 10 #include <linux/devcoredump.h> 11 #include <linux/etherdevice.h> 12 #include <linux/timekeeping.h> 13 #include "mt7615.h" 14 #include "../trace.h" 15 #include "../dma.h" 16 #include "mt7615_trace.h" 17 #include "mac.h" 18 #include "mcu.h" 19 20 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 21 22 static const struct mt7615_dfs_radar_spec etsi_radar_specs = { 23 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 24 .radar_pattern = { 25 [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 }, 26 [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 }, 27 [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 }, 28 [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 }, 29 [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 }, 30 [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 }, 31 [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 }, 32 [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 }, 33 }, 34 }; 35 36 static const struct mt7615_dfs_radar_spec fcc_radar_specs = { 37 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 38 .radar_pattern = { 39 [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 }, 40 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 41 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 42 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 43 [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 }, 44 }, 45 }; 46 47 static const struct mt7615_dfs_radar_spec jp_radar_specs = { 48 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 49 .radar_pattern = { 50 [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 }, 51 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 52 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 53 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 54 [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 }, 55 [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 }, 56 [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 }, 57 }, 58 }; 59 60 static enum mt76_cipher_type 61 mt7615_mac_get_cipher(int cipher) 62 { 63 switch (cipher) { 64 case WLAN_CIPHER_SUITE_WEP40: 65 return MT_CIPHER_WEP40; 66 case WLAN_CIPHER_SUITE_WEP104: 67 return MT_CIPHER_WEP104; 68 case WLAN_CIPHER_SUITE_TKIP: 69 return MT_CIPHER_TKIP; 70 case WLAN_CIPHER_SUITE_AES_CMAC: 71 return MT_CIPHER_BIP_CMAC_128; 72 case WLAN_CIPHER_SUITE_CCMP: 73 return MT_CIPHER_AES_CCMP; 74 case WLAN_CIPHER_SUITE_CCMP_256: 75 return MT_CIPHER_CCMP_256; 76 case WLAN_CIPHER_SUITE_GCMP: 77 return MT_CIPHER_GCMP; 78 case WLAN_CIPHER_SUITE_GCMP_256: 79 return MT_CIPHER_GCMP_256; 80 case WLAN_CIPHER_SUITE_SMS4: 81 return MT_CIPHER_WAPI; 82 default: 83 return MT_CIPHER_NONE; 84 } 85 } 86 87 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev, 88 u8 idx, bool unicast) 89 { 90 struct mt7615_sta *sta; 91 struct mt76_wcid *wcid; 92 93 wcid = mt76_wcid_ptr(dev, idx); 94 if (unicast || !wcid) 95 return wcid; 96 97 if (!wcid->sta) 98 return NULL; 99 100 sta = container_of(wcid, struct mt7615_sta, wcid); 101 if (!sta->vif) 102 return NULL; 103 104 return &sta->vif->sta.wcid; 105 } 106 107 void mt7615_mac_reset_counters(struct mt7615_phy *phy) 108 { 109 struct mt7615_dev *dev = phy->dev; 110 int i; 111 112 for (i = 0; i < 4; i++) { 113 mt76_rr(dev, MT_TX_AGG_CNT(0, i)); 114 mt76_rr(dev, MT_TX_AGG_CNT(1, i)); 115 } 116 117 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 118 phy->mt76->survey_time = ktime_get_boottime(); 119 120 /* reset airtime counters */ 121 mt76_rr(dev, MT_MIB_SDR9(0)); 122 mt76_rr(dev, MT_MIB_SDR9(1)); 123 124 mt76_rr(dev, MT_MIB_SDR36(0)); 125 mt76_rr(dev, MT_MIB_SDR36(1)); 126 127 mt76_rr(dev, MT_MIB_SDR37(0)); 128 mt76_rr(dev, MT_MIB_SDR37(1)); 129 130 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 131 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 132 } 133 134 void mt7615_mac_set_timing(struct mt7615_phy *phy) 135 { 136 s16 coverage_class = phy->coverage_class; 137 struct mt7615_dev *dev = phy->dev; 138 bool ext_phy = phy != &dev->phy; 139 u32 val, reg_offset; 140 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 141 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 142 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 143 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 144 int sifs, offset; 145 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; 146 147 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 148 return; 149 150 if (is_5ghz) 151 sifs = 16; 152 else 153 sifs = 10; 154 155 if (ext_phy) { 156 coverage_class = max_t(s16, dev->phy.coverage_class, 157 coverage_class); 158 mt76_set(dev, MT_ARB_SCR, 159 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 160 } else { 161 struct mt7615_phy *phy_ext = mt7615_ext_phy(dev); 162 163 if (phy_ext) 164 coverage_class = max_t(s16, phy_ext->coverage_class, 165 coverage_class); 166 mt76_set(dev, MT_ARB_SCR, 167 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 168 } 169 udelay(1); 170 171 offset = 3 * coverage_class; 172 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 173 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 174 mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset); 175 mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset); 176 177 mt76_wr(dev, MT_TMAC_ICR(ext_phy), 178 FIELD_PREP(MT_IFS_EIFS, 360) | 179 FIELD_PREP(MT_IFS_RIFS, 2) | 180 FIELD_PREP(MT_IFS_SIFS, sifs) | 181 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 182 183 if (phy->slottime < 20 || is_5ghz) 184 val = MT7615_CFEND_RATE_DEFAULT; 185 else 186 val = MT7615_CFEND_RATE_11B; 187 188 mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val); 189 if (ext_phy) 190 mt76_clear(dev, MT_ARB_SCR, 191 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 192 else 193 mt76_clear(dev, MT_ARB_SCR, 194 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 195 196 } 197 198 static void 199 mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy, 200 struct mt76_rx_status *status, u8 chfreq) 201 { 202 if (!test_bit(MT76_HW_SCANNING, &mphy->state) && 203 !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) && 204 !test_bit(MT76_STATE_ROC, &mphy->state)) { 205 status->freq = mphy->chandef.chan->center_freq; 206 status->band = mphy->chandef.chan->band; 207 return; 208 } 209 210 status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 211 status->freq = ieee80211_channel_to_frequency(chfreq, status->band); 212 } 213 214 static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv) 215 { 216 #ifdef CONFIG_NL80211_TESTMODE 217 u32 rxv1 = le32_to_cpu(rxv[0]); 218 u32 rxv3 = le32_to_cpu(rxv[2]); 219 u32 rxv4 = le32_to_cpu(rxv[3]); 220 u32 rxv5 = le32_to_cpu(rxv[4]); 221 u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1); 222 u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1); 223 s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5); 224 u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000; 225 226 if (!mode) { 227 /* CCK */ 228 foe &= ~BIT(11); 229 foe *= 1000; 230 foe >>= 11; 231 } else { 232 if (foe > 2048) 233 foe -= 4096; 234 235 foe = (foe * foe_const) >> 15; 236 } 237 238 phy->test.last_freq_offset = foe; 239 phy->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4); 240 phy->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4); 241 phy->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4); 242 phy->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4); 243 phy->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3); 244 phy->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3); 245 #endif 246 } 247 248 /* The HW does not translate the mac header to 802.3 for mesh point */ 249 static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 250 { 251 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 252 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 253 struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid; 254 __le32 *rxd = (__le32 *)skb->data; 255 struct ieee80211_sta *sta; 256 struct ieee80211_vif *vif; 257 struct ieee80211_hdr hdr; 258 u16 frame_control; 259 260 if (le32_get_bits(rxd[1], MT_RXD1_NORMAL_ADDR_TYPE) != 261 MT_RXD1_NORMAL_U2M) 262 return -EINVAL; 263 264 if (!(le32_to_cpu(rxd[0]) & MT_RXD0_NORMAL_GROUP_4)) 265 return -EINVAL; 266 267 if (!msta || !msta->vif) 268 return -EINVAL; 269 270 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 271 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 272 273 /* store the info from RXD and ethhdr to avoid being overridden */ 274 frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL); 275 hdr.frame_control = cpu_to_le16(frame_control); 276 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL)); 277 hdr.duration_id = 0; 278 279 ether_addr_copy(hdr.addr1, vif->addr); 280 ether_addr_copy(hdr.addr2, sta->addr); 281 switch (frame_control & (IEEE80211_FCTL_TODS | 282 IEEE80211_FCTL_FROMDS)) { 283 case 0: 284 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); 285 break; 286 case IEEE80211_FCTL_FROMDS: 287 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 288 break; 289 case IEEE80211_FCTL_TODS: 290 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 291 break; 292 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 293 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 294 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 295 break; 296 default: 297 break; 298 } 299 300 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 301 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 302 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 303 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 304 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) 305 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 306 else 307 skb_pull(skb, 2); 308 309 if (ieee80211_has_order(hdr.frame_control)) 310 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7], 311 IEEE80211_HT_CTL_LEN); 312 313 if (ieee80211_is_data_qos(hdr.frame_control)) { 314 __le16 qos_ctrl; 315 316 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL)); 317 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, 318 IEEE80211_QOS_CTL_LEN); 319 } 320 321 if (ieee80211_has_a4(hdr.frame_control)) 322 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 323 else 324 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 325 326 status->flag &= ~(RX_FLAG_RADIOTAP_HE | RX_FLAG_RADIOTAP_HE_MU); 327 return 0; 328 } 329 330 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) 331 { 332 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 333 struct mt76_phy *mphy = &dev->mt76.phy; 334 struct mt7615_phy *phy = &dev->phy; 335 struct ieee80211_supported_band *sband; 336 struct ieee80211_hdr *hdr; 337 struct mt7615_phy *phy2; 338 __le32 *rxd = (__le32 *)skb->data; 339 u32 rxd0 = le32_to_cpu(rxd[0]); 340 u32 rxd1 = le32_to_cpu(rxd[1]); 341 u32 rxd2 = le32_to_cpu(rxd[2]); 342 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 343 u32 csum_status = *(u32 *)skb->cb; 344 bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false; 345 u16 hdr_gap; 346 int phy_idx; 347 int i, idx; 348 u8 chfreq, amsdu_info, qos_ctl = 0; 349 u16 seq_ctrl = 0; 350 __le16 fc = 0; 351 352 memset(status, 0, sizeof(*status)); 353 354 chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 355 356 phy2 = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL; 357 if (!phy2) 358 phy_idx = 0; 359 else if (phy2->chfreq == phy->chfreq) 360 phy_idx = -1; 361 else if (phy->chfreq == chfreq) 362 phy_idx = 0; 363 else if (phy2->chfreq == chfreq) 364 phy_idx = 1; 365 else 366 phy_idx = -1; 367 368 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 369 return -EINVAL; 370 371 hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS; 372 if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_CM)) 373 return -EINVAL; 374 375 /* ICV error or CCMP/BIP/WPI MIC error */ 376 if (rxd2 & MT_RXD2_NORMAL_ICV_ERR) 377 status->flag |= RX_FLAG_ONLY_MONITOR; 378 379 unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; 380 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 381 status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); 382 383 if (status->wcid) { 384 struct mt7615_sta *msta; 385 386 msta = container_of(status->wcid, struct mt7615_sta, wcid); 387 mt76_wcid_add_poll(&dev->mt76, &msta->wcid); 388 } 389 390 if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask && 391 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 392 skb->ip_summed = CHECKSUM_UNNECESSARY; 393 394 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 395 status->flag |= RX_FLAG_FAILED_FCS_CRC; 396 397 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 398 status->flag |= RX_FLAG_MMIC_ERROR; 399 400 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 401 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 402 status->flag |= RX_FLAG_DECRYPTED; 403 status->flag |= RX_FLAG_IV_STRIPPED; 404 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 405 } 406 407 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 408 409 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 410 return -EINVAL; 411 412 rxd += 4; 413 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 414 u32 v0 = le32_to_cpu(rxd[0]); 415 u32 v2 = le32_to_cpu(rxd[2]); 416 417 fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0)); 418 qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2); 419 seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2); 420 421 rxd += 4; 422 if ((u8 *)rxd - skb->data >= skb->len) 423 return -EINVAL; 424 } 425 426 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 427 u8 *data = (u8 *)rxd; 428 429 if (status->flag & RX_FLAG_DECRYPTED) { 430 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 431 case MT_CIPHER_AES_CCMP: 432 case MT_CIPHER_CCMP_CCX: 433 case MT_CIPHER_CCMP_256: 434 insert_ccmp_hdr = 435 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 436 fallthrough; 437 case MT_CIPHER_TKIP: 438 case MT_CIPHER_TKIP_NO_MIC: 439 case MT_CIPHER_GCMP: 440 case MT_CIPHER_GCMP_256: 441 status->iv[0] = data[5]; 442 status->iv[1] = data[4]; 443 status->iv[2] = data[3]; 444 status->iv[3] = data[2]; 445 status->iv[4] = data[1]; 446 status->iv[5] = data[0]; 447 break; 448 default: 449 break; 450 } 451 } 452 rxd += 4; 453 if ((u8 *)rxd - skb->data >= skb->len) 454 return -EINVAL; 455 } 456 457 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 458 status->timestamp = le32_to_cpu(rxd[0]); 459 status->flag |= RX_FLAG_MACTIME_START; 460 461 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | 462 MT_RXD2_NORMAL_NON_AMPDU))) { 463 status->flag |= RX_FLAG_AMPDU_DETAILS; 464 465 /* all subframes of an A-MPDU have the same timestamp */ 466 if (phy->rx_ampdu_ts != status->timestamp) { 467 if (!++phy->ampdu_ref) 468 phy->ampdu_ref++; 469 } 470 phy->rx_ampdu_ts = status->timestamp; 471 472 status->ampdu_ref = phy->ampdu_ref; 473 } 474 475 rxd += 2; 476 if ((u8 *)rxd - skb->data >= skb->len) 477 return -EINVAL; 478 } 479 480 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 481 u32 rxdg5 = le32_to_cpu(rxd[5]); 482 483 /* 484 * If both PHYs are on the same channel and we don't have a WCID, 485 * we need to figure out which PHY this packet was received on. 486 * On the primary PHY, the noise value for the chains belonging to the 487 * second PHY will be set to the noise value of the last packet from 488 * that PHY. 489 */ 490 if (phy_idx < 0) { 491 int first_chain = ffs(phy2->mt76->chainmask) - 1; 492 493 phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0; 494 } 495 } 496 497 if (phy_idx == 1 && phy2) { 498 mphy = dev->mt76.phys[MT_BAND1]; 499 phy = phy2; 500 status->phy_idx = phy_idx; 501 } 502 503 if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq) 504 return -EINVAL; 505 506 mt7615_get_status_freq_info(dev, mphy, status, chfreq); 507 if (status->band == NL80211_BAND_5GHZ) 508 sband = &mphy->sband_5g.sband; 509 else 510 sband = &mphy->sband_2g.sband; 511 512 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 513 return -EINVAL; 514 515 if (!sband->channels) 516 return -EINVAL; 517 518 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 519 u32 rxdg0 = le32_to_cpu(rxd[0]); 520 u32 rxdg1 = le32_to_cpu(rxd[1]); 521 u32 rxdg3 = le32_to_cpu(rxd[3]); 522 u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 523 bool cck = false; 524 525 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 526 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 527 case MT_PHY_TYPE_CCK: 528 cck = true; 529 fallthrough; 530 case MT_PHY_TYPE_OFDM: 531 i = mt76_get_rate(&dev->mt76, sband, i, cck); 532 break; 533 case MT_PHY_TYPE_HT_GF: 534 case MT_PHY_TYPE_HT: 535 status->encoding = RX_ENC_HT; 536 if (i > 31) 537 return -EINVAL; 538 break; 539 case MT_PHY_TYPE_VHT: 540 status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1; 541 status->encoding = RX_ENC_VHT; 542 break; 543 default: 544 return -EINVAL; 545 } 546 status->rate_idx = i; 547 548 switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) { 549 case MT_PHY_BW_20: 550 break; 551 case MT_PHY_BW_40: 552 status->bw = RATE_INFO_BW_40; 553 break; 554 case MT_PHY_BW_80: 555 status->bw = RATE_INFO_BW_80; 556 break; 557 case MT_PHY_BW_160: 558 status->bw = RATE_INFO_BW_160; 559 break; 560 default: 561 return -EINVAL; 562 } 563 564 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 565 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 566 if (rxdg0 & MT_RXV1_HT_AD_CODE) 567 status->enc_flags |= RX_ENC_FLAG_LDPC; 568 569 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 570 571 status->chains = mphy->antenna_mask; 572 status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3); 573 status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3); 574 status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3); 575 status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3); 576 577 mt7615_mac_fill_tm_rx(mphy->priv, rxd); 578 579 rxd += 6; 580 if ((u8 *)rxd - skb->data >= skb->len) 581 return -EINVAL; 582 } 583 584 amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1); 585 status->amsdu = !!amsdu_info; 586 if (status->amsdu) { 587 status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME; 588 status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME; 589 } 590 591 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 592 if (hdr_trans && ieee80211_has_morefrags(fc)) { 593 if (mt7615_reverse_frag0_hdr_trans(skb, hdr_gap)) 594 return -EINVAL; 595 hdr_trans = false; 596 } else { 597 int pad_start = 0; 598 599 skb_pull(skb, hdr_gap); 600 if (!hdr_trans && status->amsdu) { 601 pad_start = ieee80211_get_hdrlen_from_skb(skb); 602 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 603 /* 604 * When header translation failure is indicated, 605 * the hardware will insert an extra 2-byte field 606 * containing the data length after the protocol 607 * type field. This happens either when the LLC-SNAP 608 * pattern did not match, or if a VLAN header was 609 * detected. 610 */ 611 pad_start = 12; 612 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 613 pad_start += 4; 614 else 615 pad_start = 0; 616 } 617 618 if (pad_start) { 619 memmove(skb->data + 2, skb->data, pad_start); 620 skb_pull(skb, 2); 621 } 622 } 623 624 if (insert_ccmp_hdr && !hdr_trans) { 625 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 626 627 mt76_insert_ccmp_hdr(skb, key_id); 628 } 629 630 if (!hdr_trans) { 631 hdr = (struct ieee80211_hdr *)skb->data; 632 fc = hdr->frame_control; 633 if (ieee80211_is_data_qos(fc)) { 634 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 635 qos_ctl = *ieee80211_get_qos_ctl(hdr); 636 } 637 } else { 638 status->flag |= RX_FLAG_8023; 639 } 640 641 if (!status->wcid || !ieee80211_is_data_qos(fc)) 642 return 0; 643 644 status->aggr = unicast && 645 !ieee80211_is_qos_nullfunc(fc); 646 status->qos_ctl = qos_ctl; 647 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 648 649 return 0; 650 } 651 652 static u16 653 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, 654 struct mt76_phy *mphy, 655 const struct ieee80211_tx_rate *rate, 656 bool stbc, u8 *bw) 657 { 658 u8 phy, nss, rate_idx; 659 u16 rateval = 0; 660 661 *bw = 0; 662 663 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { 664 rate_idx = ieee80211_rate_get_vht_mcs(rate); 665 nss = ieee80211_rate_get_vht_nss(rate); 666 phy = MT_PHY_TYPE_VHT; 667 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 668 *bw = 1; 669 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) 670 *bw = 2; 671 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) 672 *bw = 3; 673 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 674 rate_idx = rate->idx; 675 nss = 1 + (rate->idx >> 3); 676 phy = MT_PHY_TYPE_HT; 677 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 678 phy = MT_PHY_TYPE_HT_GF; 679 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 680 *bw = 1; 681 } else { 682 const struct ieee80211_rate *r; 683 int band = mphy->chandef.chan->band; 684 u16 val; 685 686 nss = 1; 687 r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx]; 688 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 689 val = r->hw_value_short; 690 else 691 val = r->hw_value; 692 693 phy = val >> 8; 694 rate_idx = val & 0xff; 695 } 696 697 if (stbc && nss == 1) { 698 nss++; 699 rateval |= MT_TX_RATE_STBC; 700 } 701 702 rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 703 FIELD_PREP(MT_TX_RATE_MODE, phy) | 704 FIELD_PREP(MT_TX_RATE_NSS, nss - 1)); 705 706 return rateval; 707 } 708 709 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, 710 struct sk_buff *skb, struct mt76_wcid *wcid, 711 struct ieee80211_sta *sta, int pid, 712 struct ieee80211_key_conf *key, 713 enum mt76_txq_id qid, bool beacon) 714 { 715 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 716 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 717 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 718 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 719 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 720 bool multicast = is_multicast_ether_addr(hdr->addr1); 721 struct ieee80211_vif *vif = info->control.vif; 722 bool is_mmio = mt76_is_mmio(&dev->mt76); 723 u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE; 724 struct mt76_phy *mphy = &dev->mphy; 725 __le16 fc = hdr->frame_control; 726 int tx_count = 8; 727 u16 seqno = 0; 728 729 if (vif) { 730 struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv; 731 732 omac_idx = mvif->omac_idx; 733 wmm_idx = mvif->wmm_idx; 734 } 735 736 if (sta) { 737 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; 738 739 tx_count = msta->rate_count; 740 } 741 742 if (phy_idx && dev->mt76.phys[MT_BAND1]) 743 mphy = dev->mt76.phys[MT_BAND1]; 744 745 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 746 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 747 748 if (beacon) { 749 p_fmt = MT_TX_TYPE_FW; 750 q_idx = phy_idx ? MT_LMAC_BCN1 : MT_LMAC_BCN0; 751 } else if (qid >= MT_TXQ_PSD) { 752 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 753 q_idx = phy_idx ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; 754 } else { 755 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 756 q_idx = wmm_idx * MT7615_MAX_WMM_SETS + 757 mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); 758 } 759 760 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | 761 FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) | 762 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 763 txwi[0] = cpu_to_le32(val); 764 765 val = MT_TXD1_LONG_FORMAT | 766 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 767 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 768 FIELD_PREP(MT_TXD1_HDR_INFO, 769 ieee80211_get_hdrlen_from_skb(skb) / 2) | 770 FIELD_PREP(MT_TXD1_TID, 771 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 772 FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) | 773 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 774 txwi[1] = cpu_to_le32(val); 775 776 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 777 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | 778 FIELD_PREP(MT_TXD2_MULTICAST, multicast); 779 if (key) { 780 if (multicast && ieee80211_is_robust_mgmt_frame(skb) && 781 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 782 val |= MT_TXD2_BIP; 783 txwi[3] = 0; 784 } else { 785 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); 786 } 787 } else { 788 txwi[3] = 0; 789 } 790 txwi[2] = cpu_to_le32(val); 791 792 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 793 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 794 795 txwi[4] = 0; 796 txwi[6] = 0; 797 798 if (rate->idx >= 0 && rate->count && 799 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 800 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 801 u8 bw; 802 u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc, 803 &bw); 804 805 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 806 807 val = MT_TXD6_FIXED_BW | 808 FIELD_PREP(MT_TXD6_BW, bw) | 809 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 810 txwi[6] |= cpu_to_le32(val); 811 812 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 813 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 814 815 if (info->flags & IEEE80211_TX_CTL_LDPC) 816 txwi[6] |= cpu_to_le32(MT_TXD6_LDPC); 817 818 if (!(rate->flags & (IEEE80211_TX_RC_MCS | 819 IEEE80211_TX_RC_VHT_MCS))) 820 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 821 822 tx_count = rate->count; 823 } 824 825 if (!ieee80211_is_beacon(fc)) { 826 struct ieee80211_hw *hw = mt76_hw(dev); 827 828 val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid); 829 if (!ieee80211_hw_check(hw, SUPPORTS_PS)) 830 val |= MT_TXD5_SW_POWER_MGMT; 831 txwi[5] = cpu_to_le32(val); 832 } else { 833 txwi[5] = 0; 834 /* use maximum tx count for beacons */ 835 tx_count = 0x1f; 836 } 837 838 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 839 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 840 seqno = le16_to_cpu(hdr->seq_ctrl); 841 842 if (ieee80211_is_back_req(hdr->frame_control)) { 843 struct ieee80211_bar *bar; 844 845 bar = (struct ieee80211_bar *)skb->data; 846 seqno = le16_to_cpu(bar->start_seq_num); 847 } 848 849 val |= MT_TXD3_SN_VALID | 850 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 851 } 852 853 txwi[3] |= cpu_to_le32(val); 854 855 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 856 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); 857 858 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 859 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | 860 FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); 861 txwi[7] = cpu_to_le32(val); 862 if (!is_mmio) { 863 val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | 864 FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); 865 txwi[8] = cpu_to_le32(val); 866 } 867 868 return 0; 869 } 870 EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi); 871 872 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask) 873 { 874 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 875 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 876 877 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 878 0, 5000); 879 } 880 881 void mt7615_mac_sta_poll(struct mt7615_dev *dev) 882 { 883 static const u8 ac_to_tid[4] = { 884 [IEEE80211_AC_BE] = 0, 885 [IEEE80211_AC_BK] = 1, 886 [IEEE80211_AC_VI] = 4, 887 [IEEE80211_AC_VO] = 6 888 }; 889 static const u8 hw_queue_map[] = { 890 [IEEE80211_AC_BK] = 0, 891 [IEEE80211_AC_BE] = 1, 892 [IEEE80211_AC_VI] = 2, 893 [IEEE80211_AC_VO] = 3, 894 }; 895 struct ieee80211_sta *sta; 896 struct mt7615_sta *msta; 897 u32 addr, tx_time[4], rx_time[4]; 898 struct list_head sta_poll_list; 899 int i; 900 901 INIT_LIST_HEAD(&sta_poll_list); 902 spin_lock_bh(&dev->mt76.sta_poll_lock); 903 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); 904 spin_unlock_bh(&dev->mt76.sta_poll_lock); 905 906 while (!list_empty(&sta_poll_list)) { 907 bool clear = false; 908 909 msta = list_first_entry(&sta_poll_list, struct mt7615_sta, 910 wcid.poll_list); 911 912 spin_lock_bh(&dev->mt76.sta_poll_lock); 913 list_del_init(&msta->wcid.poll_list); 914 spin_unlock_bh(&dev->mt76.sta_poll_lock); 915 916 addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; 917 918 for (i = 0; i < 4; i++, addr += 8) { 919 u32 tx_last = msta->airtime_ac[i]; 920 u32 rx_last = msta->airtime_ac[i + 4]; 921 922 msta->airtime_ac[i] = mt76_rr(dev, addr); 923 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 924 tx_time[i] = msta->airtime_ac[i] - tx_last; 925 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 926 927 if ((tx_last | rx_last) & BIT(30)) 928 clear = true; 929 } 930 931 if (clear) { 932 mt7615_mac_wtbl_update(dev, msta->wcid.idx, 933 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 934 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 935 } 936 937 if (!msta->wcid.sta) 938 continue; 939 940 sta = container_of((void *)msta, struct ieee80211_sta, 941 drv_priv); 942 for (i = 0; i < 4; i++) { 943 u32 tx_cur = tx_time[i]; 944 u32 rx_cur = rx_time[hw_queue_map[i]]; 945 u8 tid = ac_to_tid[i]; 946 947 if (!tx_cur && !rx_cur) 948 continue; 949 950 ieee80211_sta_register_airtime(sta, tid, tx_cur, 951 rx_cur); 952 } 953 } 954 } 955 EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll); 956 957 static void 958 mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta, 959 struct ieee80211_tx_rate *probe_rate, 960 struct ieee80211_tx_rate *rates, 961 struct mt7615_rate_desc *rd) 962 { 963 struct mt7615_dev *dev = phy->dev; 964 struct mt76_phy *mphy = phy->mt76; 965 struct ieee80211_tx_rate *ref; 966 bool rateset, stbc = false; 967 int n_rates = sta->n_rates; 968 u8 bw, bw_prev; 969 int i, j; 970 971 for (i = n_rates; i < 4; i++) 972 rates[i] = rates[n_rates - 1]; 973 974 rateset = !(sta->rate_set_tsf & BIT(0)); 975 memcpy(sta->rateset[rateset].rates, rates, 976 sizeof(sta->rateset[rateset].rates)); 977 if (probe_rate) { 978 sta->rateset[rateset].probe_rate = *probe_rate; 979 ref = &sta->rateset[rateset].probe_rate; 980 } else { 981 sta->rateset[rateset].probe_rate.idx = -1; 982 ref = &sta->rateset[rateset].rates[0]; 983 } 984 985 rates = sta->rateset[rateset].rates; 986 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 987 /* 988 * We don't support switching between short and long GI 989 * within the rate set. For accurate tx status reporting, we 990 * need to make sure that flags match. 991 * For improved performance, avoid duplicate entries by 992 * decrementing the MCS index if necessary 993 */ 994 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 995 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 996 997 for (j = 0; j < i; j++) { 998 if (rates[i].idx != rates[j].idx) 999 continue; 1000 if ((rates[i].flags ^ rates[j].flags) & 1001 (IEEE80211_TX_RC_40_MHZ_WIDTH | 1002 IEEE80211_TX_RC_80_MHZ_WIDTH | 1003 IEEE80211_TX_RC_160_MHZ_WIDTH)) 1004 continue; 1005 1006 if (!rates[i].idx) 1007 continue; 1008 1009 rates[i].idx--; 1010 } 1011 } 1012 1013 rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw); 1014 bw_prev = bw; 1015 1016 if (probe_rate) { 1017 rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate, 1018 stbc, &bw); 1019 if (bw) 1020 rd->bw_idx = 1; 1021 else 1022 bw_prev = 0; 1023 } else { 1024 rd->probe_val = rd->val[0]; 1025 } 1026 1027 rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw); 1028 if (bw_prev) { 1029 rd->bw_idx = 3; 1030 bw_prev = bw; 1031 } 1032 1033 rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw); 1034 if (bw_prev) { 1035 rd->bw_idx = 5; 1036 bw_prev = bw; 1037 } 1038 1039 rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw); 1040 if (bw_prev) 1041 rd->bw_idx = 7; 1042 1043 rd->rateset = rateset; 1044 rd->bw = bw; 1045 } 1046 1047 static int 1048 mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, 1049 struct ieee80211_tx_rate *probe_rate, 1050 struct ieee80211_tx_rate *rates) 1051 { 1052 struct mt7615_dev *dev = phy->dev; 1053 struct mt7615_wtbl_rate_desc *wrd; 1054 1055 if (work_pending(&dev->rate_work)) 1056 return -EBUSY; 1057 1058 wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC); 1059 if (!wrd) 1060 return -ENOMEM; 1061 1062 wrd->sta = sta; 1063 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, 1064 &wrd->rate); 1065 list_add_tail(&wrd->node, &dev->wrd_head); 1066 queue_work(dev->mt76.wq, &dev->rate_work); 1067 1068 return 0; 1069 } 1070 1071 u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) 1072 { 1073 u32 addr, val, val2; 1074 u8 offset; 1075 1076 addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4; 1077 1078 offset = tid * 12; 1079 addr += 4 * (offset / 32); 1080 offset %= 32; 1081 1082 val = mt76_rr(dev, addr); 1083 val >>= offset; 1084 1085 if (offset > 20) { 1086 addr += 4; 1087 val2 = mt76_rr(dev, addr); 1088 val |= val2 << (32 - offset); 1089 } 1090 1091 return val & GENMASK(11, 0); 1092 } 1093 1094 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, 1095 struct ieee80211_tx_rate *probe_rate, 1096 struct ieee80211_tx_rate *rates) 1097 { 1098 int wcid = sta->wcid.idx, n_rates = sta->n_rates; 1099 struct mt7615_dev *dev = phy->dev; 1100 struct mt7615_rate_desc rd; 1101 u32 w5, w27, addr; 1102 u16 idx = sta->vif->mt76.omac_idx; 1103 1104 if (!mt76_is_mmio(&dev->mt76)) { 1105 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); 1106 return; 1107 } 1108 1109 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 1110 return; 1111 1112 memset(&rd, 0, sizeof(struct mt7615_rate_desc)); 1113 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd); 1114 1115 addr = mt7615_mac_wtbl_addr(dev, wcid); 1116 w27 = mt76_rr(dev, addr + 27 * 4); 1117 w27 &= ~MT_WTBL_W27_CC_BW_SEL; 1118 w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw); 1119 1120 w5 = mt76_rr(dev, addr + 5 * 4); 1121 w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | 1122 MT_WTBL_W5_MPDU_OK_COUNT | 1123 MT_WTBL_W5_MPDU_FAIL_COUNT | 1124 MT_WTBL_W5_RATE_IDX); 1125 w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) | 1126 FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, 1127 rd.bw_idx ? rd.bw_idx - 1 : 7); 1128 1129 mt76_wr(dev, MT_WTBL_RIUCR0, w5); 1130 1131 mt76_wr(dev, MT_WTBL_RIUCR1, 1132 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) | 1133 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) | 1134 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1])); 1135 1136 mt76_wr(dev, MT_WTBL_RIUCR2, 1137 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) | 1138 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) | 1139 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) | 1140 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2])); 1141 1142 mt76_wr(dev, MT_WTBL_RIUCR3, 1143 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) | 1144 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) | 1145 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3])); 1146 1147 mt76_wr(dev, MT_WTBL_UPDATE, 1148 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 1149 MT_WTBL_UPDATE_RATE_UPDATE | 1150 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 1151 1152 mt76_wr(dev, addr + 27 * 4, w27); 1153 1154 idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; 1155 addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); 1156 1157 mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */ 1158 sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0); 1159 sta->rate_set_tsf |= rd.rateset; 1160 1161 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 1162 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 1163 1164 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; 1165 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 1166 sta->rate_probe = !!probe_rate; 1167 } 1168 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); 1169 1170 void mt7615_mac_enable_rtscts(struct mt7615_dev *dev, 1171 struct ieee80211_vif *vif, bool enable) 1172 { 1173 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 1174 u32 addr; 1175 1176 addr = mt7615_mac_wtbl_addr(dev, mvif->sta.wcid.idx) + 3 * 4; 1177 1178 if (enable) 1179 mt76_set(dev, addr, MT_WTBL_W3_RTS); 1180 else 1181 mt76_clear(dev, addr, MT_WTBL_W3_RTS); 1182 } 1183 EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts); 1184 1185 static int 1186 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1187 struct ieee80211_key_conf *key, 1188 enum mt76_cipher_type cipher, u16 cipher_mask) 1189 { 1190 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; 1191 u8 data[32] = {}; 1192 1193 if (key->keylen > sizeof(data)) 1194 return -EINVAL; 1195 1196 mt76_rr_copy(dev, addr, data, sizeof(data)); 1197 if (cipher == MT_CIPHER_TKIP) { 1198 /* Rx/Tx MIC keys are swapped */ 1199 memcpy(data, key->key, 16); 1200 memcpy(data + 16, key->key + 24, 8); 1201 memcpy(data + 24, key->key + 16, 8); 1202 } else { 1203 if (cipher_mask == BIT(cipher)) 1204 memcpy(data, key->key, key->keylen); 1205 else if (cipher != MT_CIPHER_BIP_CMAC_128) 1206 memcpy(data, key->key, 16); 1207 if (cipher == MT_CIPHER_BIP_CMAC_128) 1208 memcpy(data + 16, key->key, 16); 1209 } 1210 1211 mt76_wr_copy(dev, addr, data, sizeof(data)); 1212 1213 return 0; 1214 } 1215 1216 static int 1217 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1218 enum mt76_cipher_type cipher, u16 cipher_mask, 1219 int keyidx) 1220 { 1221 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; 1222 1223 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 1224 return -ETIMEDOUT; 1225 1226 w0 = mt76_rr(dev, addr); 1227 w1 = mt76_rr(dev, addr + 4); 1228 1229 if (cipher_mask) 1230 w0 |= MT_WTBL_W0_RX_KEY_VALID; 1231 else 1232 w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX); 1233 if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128)) 1234 w0 |= MT_WTBL_W0_RX_IK_VALID; 1235 else 1236 w0 &= ~MT_WTBL_W0_RX_IK_VALID; 1237 1238 if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) { 1239 w0 &= ~MT_WTBL_W0_KEY_IDX; 1240 w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); 1241 } 1242 1243 mt76_wr(dev, MT_WTBL_RICR0, w0); 1244 mt76_wr(dev, MT_WTBL_RICR1, w1); 1245 1246 if (!mt7615_mac_wtbl_update(dev, wcid->idx, 1247 MT_WTBL_UPDATE_RXINFO_UPDATE)) 1248 return -ETIMEDOUT; 1249 1250 return 0; 1251 } 1252 1253 static void 1254 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1255 enum mt76_cipher_type cipher, u16 cipher_mask) 1256 { 1257 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); 1258 1259 if (cipher == MT_CIPHER_BIP_CMAC_128 && 1260 cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128)) 1261 return; 1262 1263 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1264 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); 1265 } 1266 1267 int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, 1268 struct mt76_wcid *wcid, 1269 struct ieee80211_key_conf *key) 1270 { 1271 enum mt76_cipher_type cipher; 1272 u16 cipher_mask = wcid->cipher; 1273 int err; 1274 1275 cipher = mt7615_mac_get_cipher(key->cipher); 1276 if (cipher == MT_CIPHER_NONE) 1277 return -EOPNOTSUPP; 1278 1279 cipher_mask |= BIT(cipher); 1280 mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask); 1281 err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask); 1282 if (err < 0) 1283 return err; 1284 1285 err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask, 1286 key->keyidx); 1287 if (err < 0) 1288 return err; 1289 1290 wcid->cipher = cipher_mask; 1291 1292 return 0; 1293 } 1294 1295 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, 1296 struct mt76_wcid *wcid, 1297 struct ieee80211_key_conf *key) 1298 { 1299 int err; 1300 1301 spin_lock_bh(&dev->mt76.lock); 1302 err = __mt7615_mac_wtbl_set_key(dev, wcid, key); 1303 spin_unlock_bh(&dev->mt76.lock); 1304 1305 return err; 1306 } 1307 1308 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, 1309 struct ieee80211_tx_info *info, __le32 *txs_data) 1310 { 1311 struct ieee80211_supported_band *sband; 1312 struct mt7615_rate_set *rs; 1313 struct mt76_phy *mphy; 1314 int first_idx = 0, last_idx; 1315 int i, idx, count; 1316 bool fixed_rate, ack_timeout; 1317 bool ampdu, cck = false; 1318 bool rs_idx; 1319 u32 rate_set_tsf; 1320 u32 final_rate, final_rate_flags, final_nss, txs; 1321 1322 txs = le32_to_cpu(txs_data[1]); 1323 ampdu = txs & MT_TXS1_AMPDU; 1324 1325 txs = le32_to_cpu(txs_data[3]); 1326 count = FIELD_GET(MT_TXS3_TX_COUNT, txs); 1327 last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs); 1328 1329 txs = le32_to_cpu(txs_data[0]); 1330 fixed_rate = txs & MT_TXS0_FIXED_RATE; 1331 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1332 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1333 1334 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1335 return false; 1336 1337 if (txs & MT_TXS0_QUEUE_TIMEOUT) 1338 return false; 1339 1340 if (!ack_timeout) 1341 info->flags |= IEEE80211_TX_STAT_ACK; 1342 1343 info->status.ampdu_len = 1; 1344 info->status.ampdu_ack_len = !!(info->flags & 1345 IEEE80211_TX_STAT_ACK); 1346 1347 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1348 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1349 1350 first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY); 1351 1352 if (fixed_rate) { 1353 info->status.rates[0].count = count; 1354 i = 0; 1355 goto out; 1356 } 1357 1358 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1359 rs_idx = !((u32)(le32_get_bits(txs_data[4], MT_TXS4_F0_TIMESTAMP) - 1360 rate_set_tsf) < 1000000); 1361 rs_idx ^= rate_set_tsf & BIT(0); 1362 rs = &sta->rateset[rs_idx]; 1363 1364 if (!first_idx && rs->probe_rate.idx >= 0) { 1365 info->status.rates[0] = rs->probe_rate; 1366 1367 spin_lock_bh(&dev->mt76.lock); 1368 if (sta->rate_probe) { 1369 struct mt7615_phy *phy = &dev->phy; 1370 1371 if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1]) 1372 phy = dev->mt76.phys[MT_BAND1]->priv; 1373 1374 mt7615_mac_set_rates(phy, sta, NULL, sta->rates); 1375 } 1376 spin_unlock_bh(&dev->mt76.lock); 1377 } else { 1378 info->status.rates[0] = rs->rates[first_idx / 2]; 1379 } 1380 info->status.rates[0].count = 0; 1381 1382 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1383 struct ieee80211_tx_rate *cur_rate; 1384 int cur_count; 1385 1386 cur_rate = &rs->rates[idx / 2]; 1387 cur_count = min_t(int, MT7615_RATE_RETRY, count); 1388 count -= cur_count; 1389 1390 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1391 cur_rate->flags != info->status.rates[i].flags)) { 1392 i++; 1393 if (i == ARRAY_SIZE(info->status.rates)) { 1394 i--; 1395 break; 1396 } 1397 1398 info->status.rates[i] = *cur_rate; 1399 info->status.rates[i].count = 0; 1400 } 1401 1402 info->status.rates[i].count += cur_count; 1403 } 1404 1405 out: 1406 final_rate_flags = info->status.rates[i].flags; 1407 1408 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1409 case MT_PHY_TYPE_CCK: 1410 cck = true; 1411 fallthrough; 1412 case MT_PHY_TYPE_OFDM: 1413 mphy = &dev->mphy; 1414 if (sta->wcid.phy_idx && dev->mt76.phys[MT_BAND1]) 1415 mphy = dev->mt76.phys[MT_BAND1]; 1416 1417 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1418 sband = &mphy->sband_5g.sband; 1419 else 1420 sband = &mphy->sband_2g.sband; 1421 final_rate &= MT_TX_RATE_IDX; 1422 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1423 cck); 1424 final_rate_flags = 0; 1425 break; 1426 case MT_PHY_TYPE_HT_GF: 1427 case MT_PHY_TYPE_HT: 1428 final_rate_flags |= IEEE80211_TX_RC_MCS; 1429 final_rate &= MT_TX_RATE_IDX; 1430 if (final_rate > 31) 1431 return false; 1432 break; 1433 case MT_PHY_TYPE_VHT: 1434 final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate); 1435 1436 if ((final_rate & MT_TX_RATE_STBC) && final_nss) 1437 final_nss--; 1438 1439 final_rate_flags |= IEEE80211_TX_RC_VHT_MCS; 1440 final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4); 1441 break; 1442 default: 1443 return false; 1444 } 1445 1446 info->status.rates[i].idx = final_rate; 1447 info->status.rates[i].flags = final_rate_flags; 1448 1449 return true; 1450 } 1451 1452 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev, 1453 struct mt7615_sta *sta, int pid, 1454 __le32 *txs_data) 1455 { 1456 struct mt76_dev *mdev = &dev->mt76; 1457 struct sk_buff_head list; 1458 struct sk_buff *skb; 1459 1460 if (pid < MT_PACKET_ID_FIRST) 1461 return false; 1462 1463 trace_mac_txdone(mdev, sta->wcid.idx, pid); 1464 1465 mt76_tx_status_lock(mdev, &list); 1466 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1467 if (skb) { 1468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1469 1470 if (!mt7615_fill_txs(dev, sta, info, txs_data)) { 1471 info->status.rates[0].count = 0; 1472 info->status.rates[0].idx = -1; 1473 } 1474 1475 mt76_tx_status_skb_done(mdev, skb, &list); 1476 } 1477 mt76_tx_status_unlock(mdev, &list); 1478 1479 return !!skb; 1480 } 1481 1482 static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) 1483 { 1484 struct ieee80211_tx_info info = {}; 1485 struct ieee80211_sta *sta = NULL; 1486 struct mt7615_sta *msta = NULL; 1487 struct mt76_wcid *wcid; 1488 struct mt76_phy *mphy = &dev->mt76.phy; 1489 __le32 *txs_data = data; 1490 u8 wcidx; 1491 u8 pid; 1492 1493 pid = le32_get_bits(txs_data[0], MT_TXS0_PID); 1494 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1495 1496 if (pid == MT_PACKET_ID_NO_ACK) 1497 return; 1498 1499 if (wcidx >= MT7615_WTBL_SIZE) 1500 return; 1501 1502 rcu_read_lock(); 1503 1504 wcid = mt76_wcid_ptr(dev, wcidx); 1505 if (!wcid) 1506 goto out; 1507 1508 msta = container_of(wcid, struct mt7615_sta, wcid); 1509 sta = wcid_to_sta(wcid); 1510 mt76_wcid_add_poll(&dev->mt76, &msta->wcid); 1511 1512 if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data)) 1513 goto out; 1514 1515 if (wcidx >= MT7615_WTBL_STA || !sta) 1516 goto out; 1517 1518 if (wcid->phy_idx && dev->mt76.phys[MT_BAND1]) 1519 mphy = dev->mt76.phys[MT_BAND1]; 1520 1521 if (mt7615_fill_txs(dev, msta, &info, txs_data)) { 1522 spin_lock_bh(&dev->mt76.rx_lock); 1523 ieee80211_tx_status_noskb(mphy->hw, sta, &info); 1524 spin_unlock_bh(&dev->mt76.rx_lock); 1525 } 1526 1527 out: 1528 rcu_read_unlock(); 1529 } 1530 1531 static void 1532 mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi) 1533 { 1534 struct mt76_dev *mdev = &dev->mt76; 1535 __le32 *txwi_data; 1536 u32 val; 1537 u8 wcid; 1538 1539 mt76_connac_txp_skb_unmap(mdev, txwi); 1540 if (!txwi->skb) 1541 goto out; 1542 1543 txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi); 1544 val = le32_to_cpu(txwi_data[1]); 1545 wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val); 1546 mt76_tx_complete_skb(mdev, wcid, txwi->skb); 1547 1548 out: 1549 txwi->skb = NULL; 1550 mt76_put_txwi(mdev, txwi); 1551 } 1552 1553 static void 1554 mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) 1555 { 1556 struct mt76_dev *mdev = &dev->mt76; 1557 struct mt76_txwi_cache *txwi; 1558 1559 trace_mac_tx_free(dev, token); 1560 txwi = mt76_token_put(mdev, token); 1561 if (!txwi) 1562 return; 1563 1564 mt7615_txwi_free(dev, txwi); 1565 } 1566 1567 static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len) 1568 { 1569 struct mt76_connac_tx_free *free = data; 1570 void *tx_token = data + sizeof(*free); 1571 void *end = data + len; 1572 u8 i, count; 1573 1574 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1575 if (is_mt7615(&dev->mt76)) { 1576 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1577 } else { 1578 for (i = 0; i < IEEE80211_NUM_ACS; i++) 1579 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); 1580 } 1581 1582 count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT); 1583 if (is_mt7615(&dev->mt76)) { 1584 __le16 *token = tx_token; 1585 1586 if (WARN_ON_ONCE((void *)&token[count] > end)) 1587 return; 1588 1589 for (i = 0; i < count; i++) 1590 mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i])); 1591 } else { 1592 __le32 *token = tx_token; 1593 1594 if (WARN_ON_ONCE((void *)&token[count] > end)) 1595 return; 1596 1597 for (i = 0; i < count; i++) 1598 mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i])); 1599 } 1600 1601 rcu_read_lock(); 1602 mt7615_mac_sta_poll(dev); 1603 rcu_read_unlock(); 1604 1605 mt76_worker_schedule(&dev->mt76.tx_worker); 1606 } 1607 1608 bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len) 1609 { 1610 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1611 __le32 *rxd = (__le32 *)data; 1612 __le32 *end = (__le32 *)&rxd[len / 4]; 1613 enum rx_pkt_type type; 1614 1615 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1616 1617 switch (type) { 1618 case PKT_TYPE_TXRX_NOTIFY: 1619 mt7615_mac_tx_free(dev, data, len); 1620 return false; 1621 case PKT_TYPE_TXS: 1622 for (rxd++; rxd + 7 <= end; rxd += 7) 1623 mt7615_mac_add_txs(dev, rxd); 1624 return false; 1625 default: 1626 return true; 1627 } 1628 } 1629 EXPORT_SYMBOL_GPL(mt7615_rx_check); 1630 1631 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1632 struct sk_buff *skb, u32 *info) 1633 { 1634 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1635 __le32 *rxd = (__le32 *)skb->data; 1636 __le32 *end = (__le32 *)&skb->data[skb->len]; 1637 enum rx_pkt_type type; 1638 u16 flag; 1639 1640 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1641 flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG); 1642 if (type == PKT_TYPE_RX_EVENT && flag == 0x1) 1643 type = PKT_TYPE_NORMAL_MCU; 1644 1645 switch (type) { 1646 case PKT_TYPE_TXS: 1647 for (rxd++; rxd + 7 <= end; rxd += 7) 1648 mt7615_mac_add_txs(dev, rxd); 1649 dev_kfree_skb(skb); 1650 break; 1651 case PKT_TYPE_TXRX_NOTIFY: 1652 mt7615_mac_tx_free(dev, skb->data, skb->len); 1653 dev_kfree_skb(skb); 1654 break; 1655 case PKT_TYPE_RX_EVENT: 1656 mt7615_mcu_rx_event(dev, skb); 1657 break; 1658 case PKT_TYPE_NORMAL_MCU: 1659 case PKT_TYPE_NORMAL: 1660 if (!mt7615_mac_fill_rx(dev, skb)) { 1661 mt76_rx(&dev->mt76, q, skb); 1662 return; 1663 } 1664 fallthrough; 1665 default: 1666 dev_kfree_skb(skb); 1667 break; 1668 } 1669 } 1670 EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb); 1671 1672 static void 1673 mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm) 1674 { 1675 struct mt7615_dev *dev = phy->dev; 1676 bool ext_phy = phy != &dev->phy; 1677 1678 if (is_mt7663(&dev->mt76)) { 1679 if (ofdm) 1680 mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy), 1681 MT_WF_PHY_PD_OFDM_MASK(0), 1682 MT_WF_PHY_PD_OFDM(0, val)); 1683 else 1684 mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy), 1685 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1686 MT_WF_PHY_PD_CCK(ext_phy, val)); 1687 return; 1688 } 1689 1690 if (ofdm) 1691 mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), 1692 MT_WF_PHY_PD_OFDM_MASK(ext_phy), 1693 MT_WF_PHY_PD_OFDM(ext_phy, val)); 1694 else 1695 mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), 1696 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1697 MT_WF_PHY_PD_CCK(ext_phy, val)); 1698 } 1699 1700 static void 1701 mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy) 1702 { 1703 /* ofdm */ 1704 mt7615_mac_set_sensitivity(phy, 0x13c, true); 1705 /* cck */ 1706 mt7615_mac_set_sensitivity(phy, 0x92, false); 1707 1708 phy->ofdm_sensitivity = -98; 1709 phy->cck_sensitivity = -110; 1710 phy->last_cca_adj = jiffies; 1711 } 1712 1713 void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable) 1714 { 1715 struct mt7615_dev *dev = phy->dev; 1716 bool ext_phy = phy != &dev->phy; 1717 u32 reg, mask; 1718 1719 mt7615_mutex_acquire(dev); 1720 1721 if (phy->scs_en == enable) 1722 goto out; 1723 1724 if (is_mt7663(&dev->mt76)) { 1725 reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy); 1726 mask = MT_WF_PHY_PD_BLK(0); 1727 } else { 1728 reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy); 1729 mask = MT_WF_PHY_PD_BLK(ext_phy); 1730 } 1731 1732 if (enable) { 1733 mt76_set(dev, reg, mask); 1734 if (is_mt7622(&dev->mt76)) { 1735 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8); 1736 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7); 1737 } 1738 } else { 1739 mt76_clear(dev, reg, mask); 1740 } 1741 1742 mt7615_mac_set_default_sensitivity(phy); 1743 phy->scs_en = enable; 1744 1745 out: 1746 mt7615_mutex_release(dev); 1747 } 1748 1749 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) 1750 { 1751 u32 rxtd, reg; 1752 1753 if (is_mt7663(&dev->mt76)) 1754 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1755 else 1756 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1757 1758 if (ext_phy) 1759 rxtd = MT_WF_PHY_RXTD2(10); 1760 else 1761 rxtd = MT_WF_PHY_RXTD(12); 1762 1763 mt76_set(dev, rxtd, BIT(18) | BIT(29)); 1764 mt76_set(dev, reg, 0x5 << 12); 1765 } 1766 1767 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy) 1768 { 1769 struct mt7615_dev *dev = phy->dev; 1770 bool ext_phy = phy != &dev->phy; 1771 u32 reg; 1772 1773 if (is_mt7663(&dev->mt76)) 1774 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1775 else 1776 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1777 1778 /* reset PD and MDRDY counters */ 1779 mt76_clear(dev, reg, GENMASK(22, 20)); 1780 mt76_set(dev, reg, BIT(22) | BIT(20)); 1781 } 1782 1783 static void 1784 mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, 1785 u32 rts_err_rate, bool ofdm) 1786 { 1787 struct mt7615_dev *dev = phy->dev; 1788 int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; 1789 bool ext_phy = phy != &dev->phy; 1790 s16 def_th = ofdm ? -98 : -110; 1791 bool update = false; 1792 s8 *sensitivity; 1793 int signal; 1794 1795 sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity; 1796 signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy); 1797 if (!signal) { 1798 mt7615_mac_set_default_sensitivity(phy); 1799 return; 1800 } 1801 1802 signal = min(signal, -72); 1803 if (false_cca > 500) { 1804 if (rts_err_rate > MT_FRAC(40, 100)) 1805 return; 1806 1807 /* decrease coverage */ 1808 if (*sensitivity == def_th && signal > -90) { 1809 *sensitivity = -90; 1810 update = true; 1811 } else if (*sensitivity + 2 < signal) { 1812 *sensitivity += 2; 1813 update = true; 1814 } 1815 } else if ((false_cca > 0 && false_cca < 50) || 1816 rts_err_rate > MT_FRAC(60, 100)) { 1817 /* increase coverage */ 1818 if (*sensitivity - 2 >= def_th) { 1819 *sensitivity -= 2; 1820 update = true; 1821 } 1822 } 1823 1824 if (*sensitivity > signal) { 1825 *sensitivity = signal; 1826 update = true; 1827 } 1828 1829 if (update) { 1830 u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256; 1831 1832 mt7615_mac_set_sensitivity(phy, val, ofdm); 1833 phy->last_cca_adj = jiffies; 1834 } 1835 } 1836 1837 static void 1838 mt7615_mac_scs_check(struct mt7615_phy *phy) 1839 { 1840 struct mt7615_dev *dev = phy->dev; 1841 struct mib_stats *mib = &phy->mib; 1842 u32 val, rts_err_rate = 0; 1843 u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm; 1844 bool ext_phy = phy != &dev->phy; 1845 1846 if (!phy->scs_en) 1847 return; 1848 1849 if (is_mt7663(&dev->mt76)) 1850 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1851 else 1852 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1853 pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val); 1854 pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val); 1855 1856 if (is_mt7663(&dev->mt76)) 1857 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1858 else 1859 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1860 mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val); 1861 mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val); 1862 1863 phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1864 phy->false_cca_cck = pd_cck - mdrdy_cck; 1865 mt7615_mac_cca_stats_reset(phy); 1866 1867 if (mib->rts_cnt + mib->rts_retries_cnt) 1868 rts_err_rate = MT_FRAC(mib->rts_retries_cnt, 1869 mib->rts_cnt + mib->rts_retries_cnt); 1870 1871 /* cck */ 1872 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false); 1873 /* ofdm */ 1874 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true); 1875 1876 if (time_after(jiffies, phy->last_cca_adj + 10 * HZ)) 1877 mt7615_mac_set_default_sensitivity(phy); 1878 } 1879 1880 static u8 1881 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx) 1882 { 1883 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1884 u32 reg, val, sum = 0, n = 0; 1885 int i; 1886 1887 if (is_mt7663(&dev->mt76)) 1888 reg = MT7663_WF_PHY_RXTD(20); 1889 else 1890 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20); 1891 1892 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1893 val = mt76_rr(dev, reg); 1894 sum += val * nf_power[i]; 1895 n += val; 1896 } 1897 1898 if (!n) 1899 return 0; 1900 1901 return sum / n; 1902 } 1903 1904 static void 1905 mt7615_phy_update_channel(struct mt76_phy *mphy, int idx) 1906 { 1907 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); 1908 struct mt7615_phy *phy = mphy->priv; 1909 struct mt76_channel_state *state; 1910 u64 busy_time, tx_time, rx_time, obss_time; 1911 u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5; 1912 int nf; 1913 1914 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), 1915 MT_MIB_SDR9_BUSY_MASK); 1916 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), 1917 MT_MIB_SDR36_TXTIME_MASK); 1918 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), 1919 MT_MIB_SDR37_RXTIME_MASK); 1920 obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK); 1921 1922 nf = mt7615_phy_get_nf(dev, idx); 1923 if (!phy->noise) 1924 phy->noise = nf << 4; 1925 else if (nf) 1926 phy->noise += nf - (phy->noise >> 4); 1927 1928 state = mphy->chan_state; 1929 state->cc_busy += busy_time; 1930 state->cc_tx += tx_time; 1931 state->cc_rx += rx_time + obss_time; 1932 state->cc_bss_rx += rx_time; 1933 state->noise = -(phy->noise >> 4); 1934 } 1935 1936 static void mt7615_update_survey(struct mt7615_dev *dev) 1937 { 1938 struct mt76_dev *mdev = &dev->mt76; 1939 struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1]; 1940 ktime_t cur_time; 1941 1942 /* MT7615 can only update both phys simultaneously 1943 * since some reisters are shared across bands. 1944 */ 1945 1946 mt7615_phy_update_channel(&mdev->phy, 0); 1947 if (mphy_ext) 1948 mt7615_phy_update_channel(mphy_ext, 1); 1949 1950 cur_time = ktime_get_boottime(); 1951 1952 mt76_update_survey_active_time(&mdev->phy, cur_time); 1953 if (mphy_ext) 1954 mt76_update_survey_active_time(mphy_ext, cur_time); 1955 1956 /* reset obss airtime */ 1957 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 1958 } 1959 1960 void mt7615_update_channel(struct mt76_phy *mphy) 1961 { 1962 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); 1963 1964 if (mt76_connac_pm_wake(&dev->mphy, &dev->pm)) 1965 return; 1966 1967 mt7615_update_survey(dev); 1968 mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 1969 } 1970 EXPORT_SYMBOL_GPL(mt7615_update_channel); 1971 1972 static void 1973 mt7615_mac_update_mib_stats(struct mt7615_phy *phy) 1974 { 1975 struct mt7615_dev *dev = phy->dev; 1976 struct mib_stats *mib = &phy->mib; 1977 bool ext_phy = phy != &dev->phy; 1978 int i, aggr = 0; 1979 u32 val, val2; 1980 1981 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy), 1982 MT_MIB_SDR3_FCS_ERR_MASK); 1983 1984 val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy), 1985 MT_MIB_AMPDU_MPDU_COUNT); 1986 if (val) { 1987 val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy), 1988 MT_MIB_AMPDU_ACK_COUNT); 1989 mib->aggr_per = 1000 * (val - val2) / val; 1990 } 1991 1992 for (i = 0; i < 4; i++) { 1993 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); 1994 mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1995 mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, 1996 val); 1997 1998 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); 1999 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 2000 mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, 2001 val); 2002 2003 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); 2004 phy->mt76->aggr_stats[aggr++] += val & 0xffff; 2005 phy->mt76->aggr_stats[aggr++] += val >> 16; 2006 } 2007 } 2008 2009 void mt7615_pm_wake_work(struct work_struct *work) 2010 { 2011 struct mt7615_dev *dev; 2012 struct mt76_phy *mphy; 2013 2014 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 2015 pm.wake_work); 2016 mphy = dev->phy.mt76; 2017 2018 if (!mt7615_mcu_set_drv_ctrl(dev)) { 2019 struct mt76_dev *mdev = &dev->mt76; 2020 int i; 2021 2022 if (mt76_is_sdio(mdev)) { 2023 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); 2024 mt76_worker_schedule(&mdev->sdio.txrx_worker); 2025 } else { 2026 local_bh_disable(); 2027 mt76_for_each_q_rx(mdev, i) 2028 napi_schedule(&mdev->napi[i]); 2029 local_bh_enable(); 2030 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); 2031 mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM], 2032 false); 2033 } 2034 2035 if (test_bit(MT76_STATE_RUNNING, &mphy->state)) { 2036 unsigned long timeout; 2037 2038 timeout = mt7615_get_macwork_timeout(dev); 2039 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2040 timeout); 2041 } 2042 } 2043 2044 ieee80211_wake_queues(mphy->hw); 2045 wake_up(&dev->pm.wait); 2046 } 2047 2048 void mt7615_pm_power_save_work(struct work_struct *work) 2049 { 2050 struct mt7615_dev *dev; 2051 unsigned long delta; 2052 2053 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 2054 pm.ps_work.work); 2055 2056 delta = dev->pm.idle_timeout; 2057 if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) || 2058 test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state)) 2059 goto out; 2060 2061 if (mutex_is_locked(&dev->mt76.mutex)) 2062 /* if mt76 mutex is held we should not put the device 2063 * to sleep since we are currently accessing device 2064 * register map. We need to wait for the next power_save 2065 * trigger. 2066 */ 2067 goto out; 2068 2069 if (time_is_after_jiffies(dev->pm.last_activity + delta)) { 2070 delta = dev->pm.last_activity + delta - jiffies; 2071 goto out; 2072 } 2073 2074 if (!mt7615_mcu_set_fw_ctrl(dev)) 2075 return; 2076 out: 2077 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); 2078 } 2079 2080 void mt7615_mac_work(struct work_struct *work) 2081 { 2082 struct mt7615_phy *phy; 2083 struct mt76_phy *mphy; 2084 unsigned long timeout; 2085 2086 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2087 mac_work.work); 2088 phy = mphy->priv; 2089 2090 mt7615_mutex_acquire(phy->dev); 2091 2092 mt7615_update_survey(phy->dev); 2093 if (++mphy->mac_work_count == 5) { 2094 mphy->mac_work_count = 0; 2095 2096 mt7615_mac_update_mib_stats(phy); 2097 mt7615_mac_scs_check(phy); 2098 } 2099 2100 mt7615_mutex_release(phy->dev); 2101 2102 mt76_tx_status_check(mphy->dev, false); 2103 2104 timeout = mt7615_get_macwork_timeout(phy->dev); 2105 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout); 2106 } 2107 2108 void mt7615_tx_token_put(struct mt7615_dev *dev) 2109 { 2110 struct mt76_txwi_cache *txwi; 2111 int id; 2112 2113 spin_lock_bh(&dev->mt76.token_lock); 2114 idr_for_each_entry(&dev->mt76.token, txwi, id) 2115 mt7615_txwi_free(dev, txwi); 2116 spin_unlock_bh(&dev->mt76.token_lock); 2117 idr_destroy(&dev->mt76.token); 2118 } 2119 EXPORT_SYMBOL_GPL(mt7615_tx_token_put); 2120 2121 static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) 2122 { 2123 struct mt7615_dev *dev = phy->dev; 2124 2125 if (phy->rdd_state & BIT(0)) 2126 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, 2127 MT_RX_SEL0, 0); 2128 if (phy->rdd_state & BIT(1)) 2129 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, 2130 MT_RX_SEL0, 0); 2131 } 2132 2133 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain) 2134 { 2135 int err; 2136 2137 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, 2138 MT_RX_SEL0, 0); 2139 if (err < 0) 2140 return err; 2141 2142 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, 2143 MT_RX_SEL0, 1); 2144 } 2145 2146 static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy) 2147 { 2148 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2149 struct mt7615_dev *dev = phy->dev; 2150 bool ext_phy = phy != &dev->phy; 2151 int err; 2152 2153 /* start CAC */ 2154 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy, 2155 MT_RX_SEL0, 0); 2156 if (err < 0) 2157 return err; 2158 2159 err = mt7615_dfs_start_rdd(dev, ext_phy); 2160 if (err < 0) 2161 return err; 2162 2163 phy->rdd_state |= BIT(ext_phy); 2164 2165 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2166 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2167 err = mt7615_dfs_start_rdd(dev, 1); 2168 if (err < 0) 2169 return err; 2170 2171 phy->rdd_state |= BIT(1); 2172 } 2173 2174 return 0; 2175 } 2176 2177 static int 2178 mt7615_dfs_init_radar_specs(struct mt7615_phy *phy) 2179 { 2180 const struct mt7615_dfs_radar_spec *radar_specs; 2181 struct mt7615_dev *dev = phy->dev; 2182 int err, i, lpn = 500; 2183 2184 switch (dev->mt76.region) { 2185 case NL80211_DFS_FCC: 2186 radar_specs = &fcc_radar_specs; 2187 lpn = 8; 2188 break; 2189 case NL80211_DFS_ETSI: 2190 radar_specs = &etsi_radar_specs; 2191 break; 2192 case NL80211_DFS_JP: 2193 radar_specs = &jp_radar_specs; 2194 break; 2195 default: 2196 return -EINVAL; 2197 } 2198 2199 /* avoid FCC radar detection in non-FCC region */ 2200 err = mt7615_mcu_set_fcc5_lpn(dev, lpn); 2201 if (err < 0) 2202 return err; 2203 2204 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2205 err = mt7615_mcu_set_radar_th(dev, i, 2206 &radar_specs->radar_pattern[i]); 2207 if (err < 0) 2208 return err; 2209 } 2210 2211 return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2212 } 2213 2214 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy) 2215 { 2216 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2217 struct mt7615_dev *dev = phy->dev; 2218 bool ext_phy = phy != &dev->phy; 2219 enum mt76_dfs_state dfs_state, prev_state; 2220 int err; 2221 2222 if (is_mt7663(&dev->mt76)) 2223 return 0; 2224 2225 prev_state = phy->mt76->dfs_state; 2226 dfs_state = mt76_phy_dfs_state(phy->mt76); 2227 if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && 2228 dfs_state < MT_DFS_STATE_CAC) 2229 dfs_state = MT_DFS_STATE_ACTIVE; 2230 2231 if (prev_state == dfs_state) 2232 return 0; 2233 2234 if (dfs_state == MT_DFS_STATE_DISABLED) 2235 goto stop; 2236 2237 if (prev_state <= MT_DFS_STATE_DISABLED) { 2238 err = mt7615_dfs_init_radar_specs(phy); 2239 if (err < 0) 2240 return err; 2241 2242 err = mt7615_dfs_start_radar_detector(phy); 2243 if (err < 0) 2244 return err; 2245 2246 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2247 } 2248 2249 if (dfs_state == MT_DFS_STATE_CAC) 2250 return 0; 2251 2252 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, 2253 ext_phy, MT_RX_SEL0, 0); 2254 if (err < 0) { 2255 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2256 return err; 2257 } 2258 2259 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2260 return 0; 2261 2262 stop: 2263 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy, 2264 MT_RX_SEL0, 0); 2265 if (err < 0) 2266 return err; 2267 2268 mt7615_dfs_stop_radar_detector(phy); 2269 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2270 2271 return 0; 2272 } 2273 2274 int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy, 2275 struct ieee80211_vif *vif, 2276 bool enable) 2277 { 2278 struct mt7615_dev *dev = phy->dev; 2279 bool ext_phy = phy != &dev->phy; 2280 int err; 2281 2282 if (!mt7615_firmware_offload(dev)) 2283 return -EOPNOTSUPP; 2284 2285 switch (vif->type) { 2286 case NL80211_IFTYPE_MONITOR: 2287 return 0; 2288 case NL80211_IFTYPE_MESH_POINT: 2289 case NL80211_IFTYPE_ADHOC: 2290 case NL80211_IFTYPE_AP: 2291 if (enable) 2292 phy->n_beacon_vif++; 2293 else 2294 phy->n_beacon_vif--; 2295 fallthrough; 2296 default: 2297 break; 2298 } 2299 2300 err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif); 2301 if (err) 2302 return err; 2303 2304 if (phy->n_beacon_vif) { 2305 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; 2306 mt76_clear(dev, MT_WF_RFCR(ext_phy), 2307 MT_WF_RFCR_DROP_OTHER_BEACON); 2308 } else { 2309 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 2310 mt76_set(dev, MT_WF_RFCR(ext_phy), 2311 MT_WF_RFCR_DROP_OTHER_BEACON); 2312 } 2313 2314 return 0; 2315 } 2316 2317 void mt7615_coredump_work(struct work_struct *work) 2318 { 2319 struct mt7615_dev *dev; 2320 char *dump, *data; 2321 2322 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 2323 coredump.work.work); 2324 2325 if (time_is_after_jiffies(dev->coredump.last_activity + 2326 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) { 2327 queue_delayed_work(dev->mt76.wq, &dev->coredump.work, 2328 MT76_CONNAC_COREDUMP_TIMEOUT); 2329 return; 2330 } 2331 2332 dump = vzalloc(MT76_CONNAC_COREDUMP_SZ); 2333 data = dump; 2334 2335 while (true) { 2336 struct sk_buff *skb; 2337 2338 spin_lock_bh(&dev->mt76.lock); 2339 skb = __skb_dequeue(&dev->coredump.msg_list); 2340 spin_unlock_bh(&dev->mt76.lock); 2341 2342 if (!skb) 2343 break; 2344 2345 skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); 2346 if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { 2347 dev_kfree_skb(skb); 2348 continue; 2349 } 2350 2351 memcpy(data, skb->data, skb->len); 2352 data += skb->len; 2353 2354 dev_kfree_skb(skb); 2355 } 2356 2357 if (dump) 2358 dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, 2359 GFP_KERNEL); 2360 } 2361